From d7d626cbfae73e22ab85d92a12feb76b9bf8f981 Mon Sep 17 00:00:00 2001 From: daanx Date: Sat, 21 Dec 2024 16:24:56 -0800 Subject: [PATCH] enable collecting from the full page queue --- src/heap.c | 23 ----------------------- src/page.c | 39 ++++++++++++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 28 deletions(-) diff --git a/src/heap.c b/src/heap.c index 03030b47..412c6465 100644 --- a/src/heap.c +++ b/src/heap.c @@ -102,14 +102,6 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t return true; // don't break } -//static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { -// MI_UNUSED(arg1); -// MI_UNUSED(arg2); -// MI_UNUSED(heap); -// MI_UNUSED(pq); -// _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); -// return true; // don't break -//} static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) { @@ -121,21 +113,6 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) // python/cpython#112532: we may be called from a thread that is not the owner of the heap // const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id()); - // note: never reclaim on collect but leave it to threads that need storage to reclaim - //if ( - //#ifdef NDEBUG - // collect == MI_FORCE - //#else - // collect >= MI_FORCE - //#endif - // && is_main_thread && mi_heap_is_backing(heap) && heap->allow_page_reclaim) - //{ - // // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. - // // if all memory is freed by now, all segments should be freed. - // // note: this only collects in the current subprocess - // _mi_arena_reclaim_all_abandoned(heap); - //} - // collect retired pages _mi_heap_collect_retired(heap, force); diff --git a/src/page.c b/src/page.c index c366439e..200cdaa9 100644 --- a/src/page.c +++ b/src/page.c @@ -433,6 +433,36 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { } +static void mi_heap_collect_full_pages(mi_heap_t* heap) { + // note: normally full pages get immediately abandoned and the full queue is always empty + // this path is only used if abandoning is disabled due to a destroy-able heap or options + // set by the user. + mi_page_queue_t* pq = &heap->pages[MI_BIN_FULL]; + for (mi_page_t* page = pq->first; page != NULL; ) { + mi_page_t* next = page->next; // get next in case we free the page + _mi_page_free_collect(page, false); // register concurrent free's + // no longer full? + if (!mi_page_is_full(page)) { + if (mi_page_all_free(page)) { + _mi_page_free(page, pq); + } + else { + _mi_page_unfull(page); + } + } + page = next; + } +} + +static mi_decl_noinline void mi_heap_generic_collect(mi_heap_t* heap) { + // call potential deferred free routines + _mi_deferred_free(heap, false); + // collect retired pages + _mi_heap_collect_retired(heap, false); + // collect full pages that had concurrent free's + mi_heap_collect_full_pages(heap); +} + /* ----------------------------------------------------------- Initialize the initial free list in a page. In secure mode we initialize a randomized list by @@ -857,6 +887,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme } } + // Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. // The `huge_alignment` is normally 0 but is set to a multiple of MI_SLICE_SIZE for @@ -873,17 +904,15 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al mi_assert_internal(mi_heap_is_initialized(heap)); // collect every N generic mallocs - if (heap->generic_count++ > 10000) { + if mi_unlikely(heap->generic_count++ > 10000) { heap->generic_count = 0; - // call potential deferred free routines - _mi_deferred_free(heap, false); - // collect retired pages - _mi_heap_collect_retired(heap, false); + mi_heap_generic_collect(heap); } // find (or allocate) a page of the right size mi_page_t* page = mi_find_page(heap, size, huge_alignment); if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more + mi_heap_generic_collect(heap); mi_heap_collect(heap, true /* force */); page = mi_find_page(heap, size, huge_alignment); }