change default page_reclaim_max and change reclamation test to potentially address perf regresion in mimalloc v3 with respect to v2 -- see also https://github.com/leanprover/lean4/pull/7786

This commit is contained in:
Daan Leijen 2025-05-12 18:02:42 -07:00
parent f788746143
commit 36e1cbfdbc
3 changed files with 23 additions and 15 deletions

View file

@ -411,7 +411,7 @@ typedef enum mi_option_e {
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
mi_option_page_commit_on_demand, // commit page memory on-demand
mi_option_page_reclaim_max, // don't reclaim pages if we already own N pages (in that size class) (=16)
mi_option_page_max_reclaim, // don't reclaim pages if we already own N pages (in that size class) (=16)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,

View file

@ -219,7 +219,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
// 1. free if the page is free now (this is updated by `_mi_page_free_collect_partly`)
if (mi_page_all_free(page))
{
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
// first remove it from the abandoned pages in the arena (if mapped, this might wait for any readers to finish)
_mi_arenas_page_unabandon(page);
// we can free the page directly
_mi_arenas_page_free(page);
@ -243,16 +243,19 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
}
}
// can we reclaim into this heap?
if (heap != NULL && heap->allow_page_reclaim) {
const long reclaim_max = _mi_option_get_fast(mi_option_page_reclaim_max);
if ((heap == page->heap && mi_page_queue_len_is_atmost(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
!heap->tld->is_in_threadpool && // and not part of a threadpool
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
const long max_reclaim = _mi_option_get_fast(mi_option_page_max_reclaim);
if (heap != NULL && heap->allow_page_reclaim &&
(max_reclaim < 0 || mi_page_queue_len_is_atmost(heap, page->block_size, max_reclaim))) // we have less than N pages already
{
if ((heap == page->heap) // always reclaim if we were the originating heap,
|| // OR:
(reclaim_on_free == 1 && // reclaim across heaps is allowed
!mi_page_is_used_at_frac(page,8) && // and the page is not too full
!heap->tld->is_in_threadpool && // and not part of a threadpool
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
)
{
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
// first remove it from the abandoned pages in the arena -- this might wait for any readers to finish
_mi_arenas_page_unabandon(page);
_mi_heap_page_reclaim(heap, page);
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);

View file

@ -98,6 +98,10 @@ int mi_version(void) mi_attr_noexcept {
#endif
#endif
#ifndef MI_DEFAULT_PAGE_MAX_RECLAIM
#define MI_DEFAULT_PAGE_MAX_RECLAIM 4096
#endif
// Static options
static mi_option_desc_t mi_options[_mi_option_last] =
{
@ -157,14 +161,15 @@ static mi_option_desc_t mi_options[_mi_option_last] =
MI_OPTION_UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, MI_OPTION_UNINIT, MI_OPTION(guarded_sample_seed)},
{ 10000, MI_OPTION_UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
{ 0, MI_OPTION_UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
{ 0, MI_OPTION_UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned (small) pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
{ 2, MI_OPTION_UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
{ 4, MI_OPTION_UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
{ 0, MI_OPTION_UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
{ MI_DEFAULT_PAGEMAP_COMMIT,
MI_OPTION_UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
{ 0, MI_OPTION_UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux))
{ 16, MI_OPTION_UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own N pages (in that size class)
{ MI_DEFAULT_PAGE_MAX_RECLAIM,
MI_OPTION_UNINIT, MI_OPTION(page_max_reclaim) }, // don't reclaim (small) pages if we already own N pages in that size class
};
static void mi_option_init(mi_option_desc_t* desc);