From 061ef80de7c6240c756c6786f73dfbfeba2e006c Mon Sep 17 00:00:00 2001 From: Daan Leijen Date: Tue, 7 Jan 2025 21:39:11 -0800 Subject: [PATCH 1/2] clarify allow_destroy --- src/free.c | 4 ++-- src/heap.c | 14 +++++++------- src/init.c | 1 + 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/free.c b/src/free.c index ed1b830e..5d9628f0 100644 --- a/src/free.c +++ b/src/free.c @@ -250,8 +250,8 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) mi_attr_noe // 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations if (!mi_page_is_used_at_frac(page,8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page - !mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA && - _mi_arenas_page_try_reabandon_to_mapped(page)) + !mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA && + _mi_arenas_page_try_reabandon_to_mapped(page)) { return; } diff --git a/src/heap.c b/src/heap.c index b744c153..6d5e328e 100644 --- a/src/heap.c +++ b/src/heap.c @@ -167,7 +167,7 @@ mi_heap_t* mi_heap_get_backing(void) { } // todo: make order of parameters consistent (but would that break compat with CPython?) -void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint8_t heap_tag, mi_tld_t* tld) +void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, uint8_t heap_tag, mi_tld_t* tld) { mi_assert_internal(heap!=NULL); mi_memid_t memid = heap->memid; @@ -175,15 +175,15 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint heap->memid = memid; heap->tld = tld; // avoid reading the thread-local tld during initialization heap->exclusive_arena = _mi_arena_from_id(arena_id); - heap->allow_page_reclaim = !noreclaim; - heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_page_full_retain) >= 0); + heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_reclaim_on_free)); + heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0); heap->full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); heap->tag = heap_tag; if (heap->tld->is_in_threadpool) { // if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap. - // (but abandoning is good in this case) heap->allow_page_reclaim = false; - // and halve the full page retain (possibly to 0) + // .. but abandoning is good in this case: quarter the full page retain (possibly to 0) + // (so blocked threads do not hold on to too much memory) if (heap->full_page_retain >= 0) { heap->full_page_retain = heap->full_page_retain / 4; } @@ -236,12 +236,12 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi } mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { - return mi_heap_new_ex(0 /* default heap tag */, false /* don't allow `mi_heap_destroy` */, arena_id); + return mi_heap_new_ex(0 /* default heap tag */, false /* allow destroy? */, arena_id); } mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { // don't reclaim abandoned memory or otherwise destroy is unsafe - return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none()); + return mi_heap_new_ex(0 /* default heap tag */, true /* allow destroy? */, _mi_arena_id_none()); } bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { diff --git a/src/init.c b/src/init.c index 1d352248..40d6143f 100644 --- a/src/init.c +++ b/src/init.c @@ -259,6 +259,7 @@ static void mi_heap_main_init(void) { //heap_main.keys[0] = _mi_heap_random_next(&heap_main); //heap_main.keys[1] = _mi_heap_random_next(&heap_main); _mi_heap_guarded_init(&heap_main); + heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_reclaim_on_free); heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0); heap_main.full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); } From 57eee51f46b4a5710468259c3251157900f9abcd Mon Sep 17 00:00:00 2001 From: Daan Leijen Date: Tue, 7 Jan 2025 21:42:30 -0800 Subject: [PATCH 2/2] rename full_page_retain to page_full_retain for consistency with the option --- include/mimalloc/types.h | 2 +- src/heap.c | 6 +++--- src/init.c | 2 +- src/page.c | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index e45da9a7..c61b0498 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -426,7 +426,7 @@ struct mi_heap_s { size_t page_retired_max; // largest retired index into the `pages` array. size_t generic_count; // how often is mimalloc_generic invoked? mi_heap_t* next; // list of heaps per thread - long full_page_retain; // how many full pages can be retained per queue (before abondoning them) + long page_full_retain; // how many full pages can be retained per queue (before abondoning them) bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint uint8_t tag; // custom tag, can be used for separating heaps based on the object types diff --git a/src/heap.c b/src/heap.c index 6d5e328e..82ca05cb 100644 --- a/src/heap.c +++ b/src/heap.c @@ -177,15 +177,15 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, heap->exclusive_arena = _mi_arena_from_id(arena_id); heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_reclaim_on_free)); heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0); - heap->full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); + heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); heap->tag = heap_tag; if (heap->tld->is_in_threadpool) { // if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap. heap->allow_page_reclaim = false; // .. but abandoning is good in this case: quarter the full page retain (possibly to 0) // (so blocked threads do not hold on to too much memory) - if (heap->full_page_retain >= 0) { - heap->full_page_retain = heap->full_page_retain / 4; + if (heap->page_full_retain >= 0) { + heap->page_full_retain = heap->page_full_retain / 4; } } diff --git a/src/init.c b/src/init.c index 40d6143f..ac49d292 100644 --- a/src/init.c +++ b/src/init.c @@ -261,7 +261,7 @@ static void mi_heap_main_init(void) { _mi_heap_guarded_init(&heap_main); heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_reclaim_on_free); heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0); - heap_main.full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); + heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); } } diff --git a/src/page.c b/src/page.c index 7e52d68f..d2d6a854 100644 --- a/src/page.c +++ b/src/page.c @@ -680,7 +680,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m size_t count = 0; #endif long candidate_limit = 0; // we reset this on the first candidate to limit the search - long full_page_retain = heap->full_page_retain; + long page_full_retain = heap->page_full_retain; mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; @@ -703,8 +703,8 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m // if the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. if (!immediate_available && !mi_page_is_expandable(page)) { - full_page_retain--; - if (full_page_retain < 0) { + page_full_retain--; + if (page_full_retain < 0) { mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_page_to_full(page, pq); }