diff --git a/include/mimalloc.h b/include/mimalloc.h index b87e8db2..ba426488 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -379,7 +379,7 @@ typedef enum mi_option_e { mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000) mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0) mi_option_target_segments_per_thread, // experimental (=0) - mi_option_eager_abandon, // eagerly abandon pages from the heap if suitable (to reduce memory footprint in multi-threaded code) + mi_option_full_page_retain, // retain N full pages per size class (=4, lower it to reduce memory footprint in multi-thread applications) _mi_option_last, // legacy option names mi_option_large_os_pages = mi_option_allow_large_os_pages, diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 3d83e27a..348e2aa9 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -305,7 +305,7 @@ typedef struct mi_page_s { #endif _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads - + mi_heap_t* heap; // heap this threads belong to. struct mi_page_s* next; // next page owned by the heap with the same `block_size` struct mi_page_s* prev; // previous page owned by the heap with the same `block_size` @@ -417,7 +417,7 @@ struct mi_heap_s { size_t page_retired_max; // largest retired index into the `pages` array. mi_heap_t* next; // list of heaps per thread bool no_reclaim; // `true` if this heap should not reclaim abandoned pages - bool eager_abandon; // `true` if this heap can abandon pages to reduce memory footprint + bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint uint8_t tag; // custom tag, can be used for separating heaps based on the object types #if MI_GUARDED size_t guarded_size_min; // minimal size for guarded objects diff --git a/src/heap.c b/src/heap.c index 96342907..833af278 100644 --- a/src/heap.c +++ b/src/heap.c @@ -206,7 +206,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool heap->thread_id = _mi_thread_id(); heap->arena_id = arena_id; heap->no_reclaim = noreclaim; - heap->eager_abandon = (!noreclaim && mi_option_is_enabled(mi_option_eager_abandon)); + heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_full_page_retain) >= 0); heap->tag = tag; if (heap == tld->heap_backing) { _mi_random_init(&heap->random); diff --git a/src/options.c b/src/options.c index 1b326cc3..a6d42c58 100644 --- a/src/options.c +++ b/src/options.c @@ -143,7 +143,7 @@ static mi_option_desc_t options[_mi_option_last] = { MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) }, - { 0, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free + { 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free { MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. #if defined(MI_VISIT_ABANDONED) @@ -158,7 +158,7 @@ static mi_option_desc_t options[_mi_option_last] = UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) { 0, UNINIT, MI_OPTION(guarded_sample_seed)}, { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable. - { 1, UNINIT, MI_OPTION(eager_abandon) }, + { 2, UNINIT, MI_OPTION(full_page_retain) }, }; static void mi_option_init(mi_option_desc_t* desc); diff --git a/src/page.c b/src/page.c index faef2f48..9b35a4db 100644 --- a/src/page.c +++ b/src/page.c @@ -212,7 +212,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { mi_assert_internal(page!=NULL); // collect the thread free list - _mi_page_thread_free_collect(page); + _mi_page_thread_free_collect(page); // and the local free list if (page->local_free != NULL) { @@ -264,7 +264,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { */ // called from `mi_free` on a reclaim, and fresh_alloc if we get an abandoned page -void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page) +void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_ptr_page(page)==page); @@ -381,7 +381,7 @@ void _mi_page_unfull(mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_is_in_full(page)); - mi_assert_internal(!mi_page_heap(page)->eager_abandon); + mi_assert_internal(!mi_page_heap(page)->allow_page_abandon); if (!mi_page_is_in_full(page)) return; mi_heap_t* heap = mi_page_heap(page); @@ -398,7 +398,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { mi_assert_internal(!mi_page_is_in_full(page)); mi_heap_t* heap = mi_page_heap(page); - if (heap->eager_abandon) { + if (heap->allow_page_abandon) { // abandon full pages _mi_page_abandon(page, pq); } @@ -761,9 +761,10 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) { // search for a best next page to use for at most N pages (often cut short if immediate blocks are available) #define MI_MAX_CANDIDATE_SEARCH (8) +#define MI_MAX_FULL_PAGES_PER_QUEUE (4) // Find a page with free blocks of `page->block_size`. -static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) +static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) { // search through the pages in "next fit" order #if MI_STAT @@ -772,6 +773,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p #if MI_MAX_CANDIDATE_SEARCH > 1 size_t candidate_count = 0; // we reset this on the first candidate to limit the search #endif + size_t full_page_count = 0; mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; @@ -797,8 +799,11 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p // if the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. if (!immediate_available && !mi_page_is_expandable(page)) { - mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); - mi_page_to_full(page, pq); + full_page_count++; + if (full_page_count > MI_MAX_FULL_PAGES_PER_QUEUE) { + mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); + mi_page_to_full(page, pq); + } } else { // the page has free space, make it a candidate @@ -807,8 +812,8 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p page_candidate = page; candidate_count = 0; } - else if (mi_page_all_free(page_candidate)) { - _mi_page_free(page_candidate, pq); + else if (mi_page_all_free(page_candidate)) { + _mi_page_free(page_candidate, pq); page_candidate = page; } else if (page->used >= page_candidate->used) { // && !mi_page_is_mostly_used(page)) { @@ -1000,7 +1005,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al mi_assert_internal(mi_heap_is_initialized(heap)); // call potential deferred free routines - _mi_deferred_free(heap, false); + // _mi_deferred_free(heap, false); // free delayed frees from other threads (but skip contended ones) // _mi_heap_delayed_free_partial(heap);