diff --git a/src/free.c b/src/free.c index 266faad8..3fdb35aa 100644 --- a/src/free.c +++ b/src/free.c @@ -220,7 +220,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* if (mi_page_all_free(page)) { // first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish) - _mi_arenas_page_unabandon(page); + _mi_arenas_page_unabandon(page); // we can free the page directly _mi_arenas_page_free(page); return; @@ -244,8 +244,9 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* // can we reclaim? if (heap != NULL && heap->allow_page_reclaim) { if (heap == page->heap || // only reclaim if we were the originating heap, - (reclaim_on_free == 1 && // OR if the reclaim option across heaps is enabled + (reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed !mi_page_is_used_at_frac(page, 8) && // and the page is not too full + !heap->tld->is_in_threadpool && // and not part of a threadpool _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable ) { diff --git a/src/heap.c b/src/heap.c index 5ac79996..daad8afc 100644 --- a/src/heap.c +++ b/src/heap.c @@ -181,10 +181,10 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, heap->tag = heap_tag; if (heap->tld->is_in_threadpool) { // if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap. - heap->allow_page_reclaim = false; - // .. but abandoning is good in this case: quarter the full page retain (possibly to 0) + // this is checked in `free.c:mi_free_try_collect_mt` + // .. but abandoning is good in this case: halve the full page retain (possibly to 0) // (so blocked threads do not hold on to too much memory) - if (heap->page_full_retain >= 0) { + if (heap->page_full_retain > 0) { heap->page_full_retain = heap->page_full_retain / 4; } } diff --git a/src/page.c b/src/page.c index 2a51bea6..b3dabb41 100644 --- a/src/page.c +++ b/src/page.c @@ -279,7 +279,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { else { mi_page_queue_remove(pq, page); mi_heap_t* heap = page->heap; - mi_page_set_heap(page, NULL); + mi_page_set_heap(page, NULL); page->heap = heap; // dont set heap to NULL so we can reclaim_on_free within the same heap _mi_arenas_page_abandon(page, heap->tld); _mi_arenas_collect(false, false, heap->tld); // allow purging @@ -358,11 +358,11 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { mi_heap_t* heap = mi_page_heap(page); if (heap->allow_page_abandon) { - // abandon full pages + // abandon full pages (this is the usual case in order to allow for sharing of memory between heaps) _mi_page_abandon(page, pq); } else if (!mi_page_is_in_full(page)) { - // put full pages in a heap local queue + // put full pages in a heap local queue (this is for heaps that cannot abandon, for example, if the heap can be destroyed) mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); _mi_page_free_collect(page, false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set }