mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
allow page reclaim on free to the originating heap also within a threadpool
This commit is contained in:
parent
3d767ebef6
commit
4c562f392a
3 changed files with 9 additions and 8 deletions
|
@ -220,7 +220,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
|||
if (mi_page_all_free(page))
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
|
||||
_mi_arenas_page_unabandon(page);
|
||||
_mi_arenas_page_unabandon(page);
|
||||
// we can free the page directly
|
||||
_mi_arenas_page_free(page);
|
||||
return;
|
||||
|
@ -244,8 +244,9 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
|||
// can we reclaim?
|
||||
if (heap != NULL && heap->allow_page_reclaim) {
|
||||
if (heap == page->heap || // only reclaim if we were the originating heap,
|
||||
(reclaim_on_free == 1 && // OR if the reclaim option across heaps is enabled
|
||||
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed
|
||||
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
||||
!heap->tld->is_in_threadpool && // and not part of a threadpool
|
||||
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
|
||||
)
|
||||
{
|
||||
|
|
|
@ -181,10 +181,10 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy,
|
|||
heap->tag = heap_tag;
|
||||
if (heap->tld->is_in_threadpool) {
|
||||
// if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap.
|
||||
heap->allow_page_reclaim = false;
|
||||
// .. but abandoning is good in this case: quarter the full page retain (possibly to 0)
|
||||
// this is checked in `free.c:mi_free_try_collect_mt`
|
||||
// .. but abandoning is good in this case: halve the full page retain (possibly to 0)
|
||||
// (so blocked threads do not hold on to too much memory)
|
||||
if (heap->page_full_retain >= 0) {
|
||||
if (heap->page_full_retain > 0) {
|
||||
heap->page_full_retain = heap->page_full_retain / 4;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
else {
|
||||
mi_page_queue_remove(pq, page);
|
||||
mi_heap_t* heap = page->heap;
|
||||
mi_page_set_heap(page, NULL);
|
||||
mi_page_set_heap(page, NULL);
|
||||
page->heap = heap; // dont set heap to NULL so we can reclaim_on_free within the same heap
|
||||
_mi_arenas_page_abandon(page, heap->tld);
|
||||
_mi_arenas_collect(false, false, heap->tld); // allow purging
|
||||
|
@ -358,11 +358,11 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (heap->allow_page_abandon) {
|
||||
// abandon full pages
|
||||
// abandon full pages (this is the usual case in order to allow for sharing of memory between heaps)
|
||||
_mi_page_abandon(page, pq);
|
||||
}
|
||||
else if (!mi_page_is_in_full(page)) {
|
||||
// put full pages in a heap local queue
|
||||
// put full pages in a heap local queue (this is for heaps that cannot abandon, for example, if the heap can be destroyed)
|
||||
mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
|
||||
_mi_page_free_collect(page, false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue