change default page_reclaim_max and change reclamation test to potentially address perf regresion in mimalloc v3 with respect to v2 -- see also https://github.com/leanprover/lean4/pull/7786

This commit is contained in:
Daan Leijen 2025-05-12 18:02:42 -07:00
parent f788746143
commit 36e1cbfdbc
3 changed files with 23 additions and 15 deletions

View file

@ -411,7 +411,7 @@ typedef enum mi_option_e {
mi_option_max_vabits, // max user space virtual address bits to consider (=48) mi_option_max_vabits, // max user space virtual address bits to consider (=48)
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0) mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
mi_option_page_commit_on_demand, // commit page memory on-demand mi_option_page_commit_on_demand, // commit page memory on-demand
mi_option_page_reclaim_max, // don't reclaim pages if we already own N pages (in that size class) (=16) mi_option_page_max_reclaim, // don't reclaim pages if we already own N pages (in that size class) (=16)
_mi_option_last, _mi_option_last,
// legacy option names // legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages, mi_option_large_os_pages = mi_option_allow_large_os_pages,

View file

@ -148,7 +148,7 @@ static inline mi_page_t* mi_validate_ptr_page(const void* p, const char* msg)
} }
mi_page_t* page = _mi_safe_ptr_page(p); mi_page_t* page = _mi_safe_ptr_page(p);
if (p != NULL && page == NULL) { if (p != NULL && page == NULL) {
_mi_error_message(EINVAL, "%s: invalid pointer: %p\n", msg, p); _mi_error_message(EINVAL, "%s: invalid pointer: %p\n", msg, p);
} }
return page; return page;
#else #else
@ -163,7 +163,7 @@ void mi_free(void* p) mi_attr_noexcept
mi_page_t* const page = mi_validate_ptr_page(p,"mi_free"); mi_page_t* const page = mi_validate_ptr_page(p,"mi_free");
if mi_unlikely(page==NULL) return; // page will be NULL if p==NULL if mi_unlikely(page==NULL) return; // page will be NULL if p==NULL
mi_assert_internal(p!=NULL && page!=NULL); mi_assert_internal(p!=NULL && page!=NULL);
const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page)); const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page));
if mi_likely(xtid == 0) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0` if mi_likely(xtid == 0) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0`
// thread-local, aligned, and not a full page // thread-local, aligned, and not a full page
@ -219,7 +219,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
// 1. free if the page is free now (this is updated by `_mi_page_free_collect_partly`) // 1. free if the page is free now (this is updated by `_mi_page_free_collect_partly`)
if (mi_page_all_free(page)) if (mi_page_all_free(page))
{ {
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish) // first remove it from the abandoned pages in the arena (if mapped, this might wait for any readers to finish)
_mi_arenas_page_unabandon(page); _mi_arenas_page_unabandon(page);
// we can free the page directly // we can free the page directly
_mi_arenas_page_free(page); _mi_arenas_page_free(page);
@ -243,16 +243,19 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
} }
} }
// can we reclaim into this heap? // can we reclaim into this heap?
if (heap != NULL && heap->allow_page_reclaim) { const long max_reclaim = _mi_option_get_fast(mi_option_page_max_reclaim);
const long reclaim_max = _mi_option_get_fast(mi_option_page_reclaim_max); if (heap != NULL && heap->allow_page_reclaim &&
if ((heap == page->heap && mi_page_queue_len_is_atmost(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already (max_reclaim < 0 || mi_page_queue_len_is_atmost(heap, page->block_size, max_reclaim))) // we have less than N pages already
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed {
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full if ((heap == page->heap) // always reclaim if we were the originating heap,
!heap->tld->is_in_threadpool && // and not part of a threadpool || // OR:
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable (reclaim_on_free == 1 && // reclaim across heaps is allowed
!mi_page_is_used_at_frac(page,8) && // and the page is not too full
!heap->tld->is_in_threadpool && // and not part of a threadpool
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
) )
{ {
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish // first remove it from the abandoned pages in the arena -- this might wait for any readers to finish
_mi_arenas_page_unabandon(page); _mi_arenas_page_unabandon(page);
_mi_heap_page_reclaim(heap, page); _mi_heap_page_reclaim(heap, page);
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1); mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);

View file

@ -98,7 +98,11 @@ int mi_version(void) mi_attr_noexcept {
#endif #endif
#endif #endif
// Static options #ifndef MI_DEFAULT_PAGE_MAX_RECLAIM
#define MI_DEFAULT_PAGE_MAX_RECLAIM 4096
#endif
// Static options
static mi_option_desc_t mi_options[_mi_option_last] = static mi_option_desc_t mi_options[_mi_option_last] =
{ {
// stable options // stable options
@ -157,14 +161,15 @@ static mi_option_desc_t mi_options[_mi_option_last] =
MI_OPTION_UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) MI_OPTION_UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, MI_OPTION_UNINIT, MI_OPTION(guarded_sample_seed)}, { 0, MI_OPTION_UNINIT, MI_OPTION(guarded_sample_seed)},
{ 10000, MI_OPTION_UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls { 10000, MI_OPTION_UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
{ 0, MI_OPTION_UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps { 0, MI_OPTION_UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned (small) pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
{ 2, MI_OPTION_UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues { 2, MI_OPTION_UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
{ 4, MI_OPTION_UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate { 4, MI_OPTION_UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
{ 0, MI_OPTION_UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits { 0, MI_OPTION_UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
{ MI_DEFAULT_PAGEMAP_COMMIT, { MI_DEFAULT_PAGEMAP_COMMIT,
MI_OPTION_UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront? MI_OPTION_UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
{ 0, MI_OPTION_UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux)) { 0, MI_OPTION_UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux))
{ 16, MI_OPTION_UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own N pages (in that size class) { MI_DEFAULT_PAGE_MAX_RECLAIM,
MI_OPTION_UNINIT, MI_OPTION(page_max_reclaim) }, // don't reclaim (small) pages if we already own N pages in that size class
}; };
static void mi_option_init(mi_option_desc_t* desc); static void mi_option_init(mi_option_desc_t* desc);