update comments

This commit is contained in:
Daan Leijen 2024-03-25 07:35:49 -07:00
parent 7c17c3d33e
commit cc8d89a085
3 changed files with 46 additions and 33 deletions

View file

@ -13,9 +13,12 @@ terms of the MIT license. A copy of the license can be found in the file
// mi_heap_t : all data for a thread-local heap, contains // mi_heap_t : all data for a thread-local heap, contains
// lists of all managed heap pages. // lists of all managed heap pages.
// mi_segment_t : a larger chunk of memory (32GiB) from where pages // mi_segment_t : a larger chunk of memory (32GiB) from where pages
// are allocated. // are allocated. A segment is divided in slices (64KiB) from
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from // which pages are allocated.
// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from
// where objects are allocated. // where objects are allocated.
// Note: we always explicitly use "OS page" to refer to OS pages
// and just use "page" to refer to mimalloc pages (`mi_page_t`)
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@ -192,15 +195,15 @@ typedef int32_t mi_ssize_t;
#error "mimalloc internal: define more bins" #error "mimalloc internal: define more bins"
#endif #endif
// Maximum slice offset (15)
#define MI_MAX_SLICE_OFFSET ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
// blocks up to this size are always allocated aligned // blocks up to this size are always allocated aligned
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE) #define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments // Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) #define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
// Maximum slice count (255) for which we can find the page for interior pointers
#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
// ------------------------------------------------------ // ------------------------------------------------------
// Mimalloc pages contain allocated blocks // Mimalloc pages contain allocated blocks
@ -285,9 +288,9 @@ typedef struct mi_page_s {
// "owned" by the segment // "owned" by the segment
uint32_t slice_count; // slices in this page (0 if not a page) uint32_t slice_count; // slices in this page (0 if not a page)
uint32_t slice_offset; // distance from the actual page data slice (0 if a page) uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint8_t is_committed : 1; // `true` if the page virtual memory is committed uint8_t is_committed:1; // `true` if the page virtual memory is committed
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
uint8_t is_huge:1; // `true` if the page is in a huge segment uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`)
// padding // padding
// layout like this to optimize access in `mi_malloc` and `mi_free` // layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
@ -328,12 +331,13 @@ typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
MI_PAGE_LARGE, // larger blocks go into a page of just one block MI_PAGE_LARGE, // larger blocks go into a page of just one block
MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment. MI_PAGE_HUGE, // huge blocks (> `MI_LARGE_OBJ_SIZE_MAX) or with alignment `> MI_BLOCK_ALIGNMENT_MAX`
// are put into a single page in a single `MI_SEGMENT_HUGE` segment.
} mi_page_kind_t; } mi_page_kind_t;
typedef enum mi_segment_kind_e { typedef enum mi_segment_kind_e {
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside. MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside. MI_SEGMENT_HUGE, // segment with just one huge page inside.
} mi_segment_kind_t; } mi_segment_kind_t;
// ------------------------------------------------------ // ------------------------------------------------------
@ -404,39 +408,48 @@ typedef struct mi_memid_s {
} mi_memid_t; } mi_memid_t;
// Segments are large allocated memory blocks (8mb on 64 bit) from // Segments are large allocated memory blocks (8mb on 64 bit) from arenas or the OS.
// the OS. Inside segments we allocated fixed size _pages_ that //
// contain blocks. // Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks.
// The start of a segment is this structure with a fixed number of slice entries (`slices`)
// usually followed by a guard OS page and the actual allocation area with pages.
// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`).
// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent
// slices part of the area, the `slice_offset` is the byte offset back to the first slice
// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`).
// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`).
// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while
// large and huge pages span a variable amount of slices.
typedef struct mi_segment_s { typedef struct mi_segment_s {
// constant fields // constant fields
mi_memid_t memid; // memory id for arena allocation mi_memid_t memid; // memory id for arena/OS allocation
bool allow_decommit; bool allow_decommit; // can we decommmit the memory
bool allow_purge; bool allow_purge; // can we purge the memory (reset or decommit)
size_t segment_size; size_t segment_size;
// segment fields // segment fields
mi_msecs_t purge_expire; mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
mi_commit_mask_t purge_mask; mi_commit_mask_t purge_mask; // slices that can be purged
mi_commit_mask_t commit_mask; mi_commit_mask_t commit_mask; // slices that are currently committed
// from here is zero initialized // from here is zero initialized
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation) bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long) size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
size_t used; // count of pages in use size_t used; // count of pages in use
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages. size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages.
// layout like this to optimize access in `mi_free` // layout like this to optimize access in `mi_free`
mi_segment_kind_t kind; mi_segment_kind_t kind;
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT` size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment
} mi_segment_t; } mi_segment_t;

View file

@ -455,7 +455,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
if (pq->last==page && pq->first==page) { // the only page in the queue? if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 1+(bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages); mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages; const size_t index = pq - heap->pages;

View file

@ -11,7 +11,11 @@ terms of the MIT license. A copy of the license can be found in the file
#include <string.h> // memset #include <string.h> // memset
#include <stdio.h> #include <stdio.h>
#define MI_PAGE_HUGE_ALIGN (256*1024) // -------------------------------------------------------------------
// Segments
// mimalloc pages reside in segments. See `mi_segment_valid` for invariants.
// -------------------------------------------------------------------
static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats); static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats);
@ -146,10 +150,6 @@ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) {
/* -------------------------------------------------------------------------------- /* --------------------------------------------------------------------------------
Segment allocation Segment allocation
If a thread ends, it "abandons" pages with used blocks
and there is an abandoned segment list whose segments can
be reclaimed by still running threads, much like work-stealing.
-------------------------------------------------------------------------------- */ -------------------------------------------------------------------------------- */
@ -268,10 +268,10 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(slice->slice_offset == 0); mi_assert_internal(slice->slice_offset == 0);
size_t index = mi_slice_index(slice); size_t index = mi_slice_index(slice);
size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1; size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET valid back offsets if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets
used_count++; used_count++;
if (segment->kind == MI_SEGMENT_HUGE) { mi_assert_internal(slice->is_huge); } mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE));
for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET && index + i <= maxindex; i++) { for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) {
mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t)); mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0); mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1); mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1);
@ -720,9 +720,9 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
mi_page_t* page = mi_slice_to_page(slice); mi_page_t* page = mi_slice_to_page(slice);
mi_assert_internal(mi_page_block_size(page) == bsize); mi_assert_internal(mi_page_block_size(page) == bsize);
// set slice back pointers for the first MI_MAX_SLICE_OFFSET entries // set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries
size_t extra = slice_count-1; size_t extra = slice_count-1;
if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET; if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT;
if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
mi_slice_t* slice_next = slice + 1; mi_slice_t* slice_next = slice + 1;