use frac 8 for reclaim_on_free and reabandon; halve full_page_retain if running in a threadpool

This commit is contained in:
daanx 2024-12-11 16:26:39 -08:00
parent 1c8d15abac
commit ccf5e36e6b
5 changed files with 13 additions and 3 deletions

View file

@ -409,6 +409,7 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
mi_memid_t memid; // provenance of the heap struct itseft (meta or os)
long full_page_retain; // how many full pages can be retained per queue (before abondoning them)
bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages
bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
uint8_t tag; // custom tag, can be used for separating heaps based on the object types

View file

@ -219,7 +219,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) {
// 2. if the page is not too full, we can try to reclaim it for ourselves
// note: this seems a bad idea but it speeds up some benchmarks (like `larson`) quite a bit.
if (_mi_option_get_fast(mi_option_reclaim_on_free) != 0 &&
!mi_page_is_used_at_frac(page,4)
!mi_page_is_used_at_frac(page,8)
// && !mi_page_is_abandoned_mapped(page)
)
{
@ -250,7 +250,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) {
}
// 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
if (!mi_page_is_used_at_frac(page,4) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
if (!mi_page_is_used_at_frac(page,8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
_mi_arena_page_try_reabandon_to_mapped(page))
{

View file

@ -194,11 +194,16 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint
heap->arena_id = arena_id;
heap->allow_page_reclaim = !noreclaim;
heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_full_page_retain) >= 0);
heap->full_page_retain = mi_option_get_clamp(mi_option_full_page_retain, -1, 32);
heap->tag = heap_tag;
if (heap->tld->is_in_threadpool) {
// if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap.
// (but abandoning is good in this case)
heap->allow_page_reclaim = false;
// and halve the full page retain (possibly to 0)
if (heap->full_page_retain >= 0) {
heap->full_page_retain = heap->full_page_retain / 2;
}
}
if (heap->tld->heap_backing == NULL) {

View file

@ -109,6 +109,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
MI_MEMID_STATIC, // memid
0, // full page retain
false, // can reclaim
true, // can eager abandon
0, // tag
@ -155,6 +156,7 @@ mi_decl_cache_align mi_heap_t _mi_heap_main = {
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap
MI_MEMID_STATIC, // memid
2, // full page retain
true, // allow page reclaim
true, // allow page abandon
0, // tag
@ -224,6 +226,8 @@ static void mi_heap_main_init(void) {
mi_lock_init(&mi_subproc_default.abandoned_os_lock);
mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
_mi_heap_guarded_init(&_mi_heap_main);
_mi_heap_main.allow_page_abandon = (mi_option_get(mi_option_full_page_retain) >= 0);
_mi_heap_main.full_page_retain = mi_option_get_clamp(mi_option_full_page_retain, -1, 32);
}
}

View file

@ -642,7 +642,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
size_t count = 0;
#endif
long candidate_limit = 0; // we reset this on the first candidate to limit the search
long full_page_retain = _mi_option_get_fast(mi_option_full_page_retain);
long full_page_retain = heap->full_page_retain;
mi_page_t* page_candidate = NULL; // a page with free space
mi_page_t* page = pq->first;