diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index e45da9a7..c61b0498 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -426,7 +426,7 @@ struct mi_heap_s { size_t page_retired_max; // largest retired index into the `pages` array. size_t generic_count; // how often is mimalloc_generic invoked? mi_heap_t* next; // list of heaps per thread - long full_page_retain; // how many full pages can be retained per queue (before abondoning them) + long page_full_retain; // how many full pages can be retained per queue (before abondoning them) bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint uint8_t tag; // custom tag, can be used for separating heaps based on the object types diff --git a/src/heap.c b/src/heap.c index 6d5e328e..82ca05cb 100644 --- a/src/heap.c +++ b/src/heap.c @@ -177,15 +177,15 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, heap->exclusive_arena = _mi_arena_from_id(arena_id); heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_reclaim_on_free)); heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0); - heap->full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); + heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); heap->tag = heap_tag; if (heap->tld->is_in_threadpool) { // if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap. heap->allow_page_reclaim = false; // .. but abandoning is good in this case: quarter the full page retain (possibly to 0) // (so blocked threads do not hold on to too much memory) - if (heap->full_page_retain >= 0) { - heap->full_page_retain = heap->full_page_retain / 4; + if (heap->page_full_retain >= 0) { + heap->page_full_retain = heap->page_full_retain / 4; } } diff --git a/src/init.c b/src/init.c index 40d6143f..ac49d292 100644 --- a/src/init.c +++ b/src/init.c @@ -261,7 +261,7 @@ static void mi_heap_main_init(void) { _mi_heap_guarded_init(&heap_main); heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_reclaim_on_free); heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0); - heap_main.full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); + heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); } } diff --git a/src/page.c b/src/page.c index 7e52d68f..d2d6a854 100644 --- a/src/page.c +++ b/src/page.c @@ -680,7 +680,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m size_t count = 0; #endif long candidate_limit = 0; // we reset this on the first candidate to limit the search - long full_page_retain = heap->full_page_retain; + long page_full_retain = heap->page_full_retain; mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; @@ -703,8 +703,8 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m // if the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. if (!immediate_available && !mi_page_is_expandable(page)) { - full_page_retain--; - if (full_page_retain < 0) { + page_full_retain--; + if (page_full_retain < 0) { mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_page_to_full(page, pq); }