mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
clarify control flow and comments in page reclaim_on_free
This commit is contained in:
parent
5aa679cdee
commit
1657bfb453
1 changed files with 18 additions and 18 deletions
36
src/free.c
36
src/free.c
|
@ -217,17 +217,13 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. if the page is not too full, we can try to reclaim it for ourselves
|
// 2. we can try to reclaim the page for ourselves
|
||||||
// note:
|
// note: we only reclaim if the page originated from our heap (the heap field is preserved on abandonment)
|
||||||
// we only reclaim if the page originated from our heap (the heap field is preserved on abandonment)
|
// to avoid claiming arbitrary object sizes and limit indefinite expansion. This helps benchmarks like `larson`
|
||||||
// to avoid claiming arbitrary object sizes and limit indefinite expansion.
|
|
||||||
// this helps benchmarks like `larson`
|
|
||||||
const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free);
|
const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free);
|
||||||
if (reclaim_on_free >= 0 && page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks
|
if (reclaim_on_free >= 0 && page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks
|
||||||
{
|
{
|
||||||
// the page has still some blocks in use (but not too many)
|
// get our heap (with the right tag)
|
||||||
// reclaim in our heap if compatible, or otherwise abandon again
|
|
||||||
// todo: optimize this check further?
|
|
||||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||||
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
||||||
mi_heap_t* heap = mi_prim_get_default_heap();
|
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||||
|
@ -236,16 +232,20 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
||||||
heap = _mi_heap_by_tag(heap, page->heap_tag);
|
heap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (heap != NULL && heap->allow_page_reclaim &&
|
// can we reclaim?
|
||||||
(heap == page->heap || (reclaim_on_free == 1 && !mi_page_is_used_at_frac(page, 8))) && // only reclaim if we were the originating heap, or if reclaim_on_free == 1 and the pages is not too full
|
if (heap != NULL && heap->allow_page_reclaim) {
|
||||||
_mi_arena_memid_is_suitable(page->memid,heap->exclusive_arena) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
if (heap == page->heap || // only reclaim if we were the originating heap,
|
||||||
)
|
(reclaim_on_free == 1 && // OR if the reclaim option across heaps is enabled
|
||||||
{
|
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
||||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
|
||||||
_mi_arenas_page_unabandon(page);
|
)
|
||||||
_mi_heap_page_reclaim(heap, page);
|
{
|
||||||
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);
|
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||||
return;
|
_mi_arenas_page_unabandon(page);
|
||||||
|
_mi_heap_page_reclaim(heap, page);
|
||||||
|
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue