use standard heap_collect every 10k generic allocations, disable reclaim_on_free by default

This commit is contained in:
Daan Leijen 2025-01-07 17:42:42 -08:00
parent b2cdf81e8e
commit dd4b4a36b1
3 changed files with 7 additions and 15 deletions

View file

@ -123,7 +123,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// collect arenas (this is program wide so don't force purges on abandonment of threads)
//mi_atomic_storei64_release(&heap->tld->subproc->purge_expire, 1);
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, true /* visit all? */, heap->tld);
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, collect >= MI_FORCE /* visit all? */, heap->tld);
}
void _mi_heap_collect_abandon(mi_heap_t* heap) {

View file

@ -144,7 +144,7 @@ static mi_option_desc_t options[_mi_option_last] =
#else
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
#endif
{ 0, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
{ 2500,UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
@ -169,7 +169,7 @@ static mi_option_desc_t options[_mi_option_last] =
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
{ 1, UNINIT, MI_OPTION_LEGACY(reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ 0, UNINIT, MI_OPTION_LEGACY(reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ 2, UNINIT, MI_OPTION(page_full_retain) },
{ 4, UNINIT, MI_OPTION(page_max_candidates) },
{ 0, UNINIT, MI_OPTION(max_vabits) },

View file

@ -436,7 +436,7 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
heap->page_retired_max = max;
}
/*
static void mi_heap_collect_full_pages(mi_heap_t* heap) {
// note: normally full pages get immediately abandoned and the full queue is always empty
// this path is only used if abandoning is disabled due to a destroy-able heap or options
@ -457,15 +457,8 @@ static void mi_heap_collect_full_pages(mi_heap_t* heap) {
page = next;
}
}
*/
static mi_decl_noinline void mi_heap_generic_collect(mi_heap_t* heap) {
// call potential deferred free routines
_mi_deferred_free(heap, false);
// collect retired pages
_mi_heap_collect_retired(heap, false);
// collect full pages that had concurrent free's
mi_heap_collect_full_pages(heap);
}
/* -----------------------------------------------------------
Initialize the initial free list in a page.
@ -921,14 +914,13 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
// collect every N generic mallocs
if mi_unlikely(heap->generic_count++ > 10000) {
heap->generic_count = 0;
mi_heap_generic_collect(heap);
mi_heap_collect(heap, false /* force? */);
}
// find (or allocate) a page of the right size
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
mi_heap_generic_collect(heap);
mi_heap_collect(heap, true /* force */);
mi_heap_collect(heap, true /* force? */);
page = mi_find_page(heap, size, huge_alignment);
}