mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-07 15:59:32 +03:00
enable collecting from the full page queue
This commit is contained in:
parent
da17a59bdb
commit
d7d626cbfa
2 changed files with 34 additions and 28 deletions
23
src/heap.c
23
src/heap.c
|
@ -102,14 +102,6 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
|
||||||
return true; // don't break
|
return true; // don't break
|
||||||
}
|
}
|
||||||
|
|
||||||
//static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
|
||||||
// MI_UNUSED(arg1);
|
|
||||||
// MI_UNUSED(arg2);
|
|
||||||
// MI_UNUSED(heap);
|
|
||||||
// MI_UNUSED(pq);
|
|
||||||
// _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
|
||||||
// return true; // don't break
|
|
||||||
//}
|
|
||||||
|
|
||||||
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
{
|
{
|
||||||
|
@ -121,21 +113,6 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
// python/cpython#112532: we may be called from a thread that is not the owner of the heap
|
// python/cpython#112532: we may be called from a thread that is not the owner of the heap
|
||||||
// const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
|
// const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
|
||||||
|
|
||||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
|
||||||
//if (
|
|
||||||
//#ifdef NDEBUG
|
|
||||||
// collect == MI_FORCE
|
|
||||||
//#else
|
|
||||||
// collect >= MI_FORCE
|
|
||||||
//#endif
|
|
||||||
// && is_main_thread && mi_heap_is_backing(heap) && heap->allow_page_reclaim)
|
|
||||||
//{
|
|
||||||
// // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
|
|
||||||
// // if all memory is freed by now, all segments should be freed.
|
|
||||||
// // note: this only collects in the current subprocess
|
|
||||||
// _mi_arena_reclaim_all_abandoned(heap);
|
|
||||||
//}
|
|
||||||
|
|
||||||
// collect retired pages
|
// collect retired pages
|
||||||
_mi_heap_collect_retired(heap, force);
|
_mi_heap_collect_retired(heap, force);
|
||||||
|
|
||||||
|
|
39
src/page.c
39
src/page.c
|
@ -433,6 +433,36 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void mi_heap_collect_full_pages(mi_heap_t* heap) {
|
||||||
|
// note: normally full pages get immediately abandoned and the full queue is always empty
|
||||||
|
// this path is only used if abandoning is disabled due to a destroy-able heap or options
|
||||||
|
// set by the user.
|
||||||
|
mi_page_queue_t* pq = &heap->pages[MI_BIN_FULL];
|
||||||
|
for (mi_page_t* page = pq->first; page != NULL; ) {
|
||||||
|
mi_page_t* next = page->next; // get next in case we free the page
|
||||||
|
_mi_page_free_collect(page, false); // register concurrent free's
|
||||||
|
// no longer full?
|
||||||
|
if (!mi_page_is_full(page)) {
|
||||||
|
if (mi_page_all_free(page)) {
|
||||||
|
_mi_page_free(page, pq);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
_mi_page_unfull(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
page = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static mi_decl_noinline void mi_heap_generic_collect(mi_heap_t* heap) {
|
||||||
|
// call potential deferred free routines
|
||||||
|
_mi_deferred_free(heap, false);
|
||||||
|
// collect retired pages
|
||||||
|
_mi_heap_collect_retired(heap, false);
|
||||||
|
// collect full pages that had concurrent free's
|
||||||
|
mi_heap_collect_full_pages(heap);
|
||||||
|
}
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Initialize the initial free list in a page.
|
Initialize the initial free list in a page.
|
||||||
In secure mode we initialize a randomized list by
|
In secure mode we initialize a randomized list by
|
||||||
|
@ -857,6 +887,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
||||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||||
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SLICE_SIZE for
|
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SLICE_SIZE for
|
||||||
|
@ -873,17 +904,15 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
||||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||||
|
|
||||||
// collect every N generic mallocs
|
// collect every N generic mallocs
|
||||||
if (heap->generic_count++ > 10000) {
|
if mi_unlikely(heap->generic_count++ > 10000) {
|
||||||
heap->generic_count = 0;
|
heap->generic_count = 0;
|
||||||
// call potential deferred free routines
|
mi_heap_generic_collect(heap);
|
||||||
_mi_deferred_free(heap, false);
|
|
||||||
// collect retired pages
|
|
||||||
_mi_heap_collect_retired(heap, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// find (or allocate) a page of the right size
|
// find (or allocate) a page of the right size
|
||||||
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
|
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
|
||||||
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
||||||
|
mi_heap_generic_collect(heap);
|
||||||
mi_heap_collect(heap, true /* force */);
|
mi_heap_collect(heap, true /* force */);
|
||||||
page = mi_find_page(heap, size, huge_alignment);
|
page = mi_find_page(heap, size, huge_alignment);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue