add page_cross_thread_max_reclaim option

This commit is contained in:
daanx 2025-05-13 14:12:22 -07:00
parent 3ef6784455
commit a92f86dc73
3 changed files with 18 additions and 10 deletions

View file

@ -411,7 +411,8 @@ typedef enum mi_option_e {
mi_option_max_vabits, // max user space virtual address bits to consider (=48) mi_option_max_vabits, // max user space virtual address bits to consider (=48)
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0) mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
mi_option_page_commit_on_demand, // commit page memory on-demand mi_option_page_commit_on_demand, // commit page memory on-demand
mi_option_page_max_reclaim, // don't reclaim pages if we already own N pages (in that size class) (=16) mi_option_page_max_reclaim, // don't reclaim pages of the same originating heap if we already own N pages (in that size class) (=-1 (unlimited))
mi_option_page_cross_thread_max_reclaim, // don't reclaim pages across threads if we already own N pages (in that size class) (=16)
_mi_option_last, _mi_option_last,
// legacy option names // legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages, mi_option_large_os_pages = mi_option_allow_large_os_pages,

View file

@ -191,10 +191,11 @@ void mi_free(void* p) mi_attr_noexcept
// Multi-threaded Free (`_mt`) // Multi-threaded Free (`_mt`)
// ------------------------------------------------------ // ------------------------------------------------------
static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free); static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free);
static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, size_t atmost) { static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, long atmost) {
if (atmost < 0) return true; // unlimited
mi_page_queue_t* const pq = mi_page_queue(heap,block_size); mi_page_queue_t* const pq = mi_page_queue(heap,block_size);
mi_assert_internal(pq!=NULL); mi_assert_internal(pq!=NULL);
return (pq->count <= atmost); return (pq->count <= (size_t)atmost);
/* /*
for(mi_page_t* p = pq->first; p!=NULL; p = p->next, atmost--) { for(mi_page_t* p = pq->first; p!=NULL; p = p->next, atmost--) {
if (atmost == 0) { return false; } if (atmost == 0) { return false; }
@ -242,14 +243,14 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
heap = _mi_heap_by_tag(heap, page->heap_tag); heap = _mi_heap_by_tag(heap, page->heap_tag);
} }
} }
// can we reclaim into this heap? // can we reclaim into this heap?
const long max_reclaim = _mi_option_get_fast(mi_option_page_max_reclaim); if (heap != NULL && heap->allow_page_reclaim)
if (heap != NULL && heap->allow_page_reclaim &&
(max_reclaim < 0 || mi_page_queue_len_is_atmost(heap, page->block_size, max_reclaim))) // we have less than N pages already
{ {
if ((heap == page->heap) // always reclaim if we were the originating heap (todo: maybe not if in a threadpool?) if ((heap == page->heap && // always reclaim if we were the originating heap (todo: maybe not if in a threadpool?)
mi_page_queue_len_is_atmost(heap, page->block_size, _mi_option_get_fast(mi_option_page_max_reclaim)))
|| // OR: || // OR:
(reclaim_on_free == 1 && // reclaim across heaps is allowed (reclaim_on_free == 1 && // reclaim across heaps is allowed
mi_page_queue_len_is_atmost(heap, page->block_size, _mi_option_get_fast(mi_option_page_cross_thread_max_reclaim)) &&
!mi_page_is_used_at_frac(page,8) && // and the page is not too full !mi_page_is_used_at_frac(page,8) && // and the page is not too full
!heap->tld->is_in_threadpool && // and not part of a threadpool !heap->tld->is_in_threadpool && // and not part of a threadpool
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable

View file

@ -99,7 +99,11 @@ int mi_version(void) mi_attr_noexcept {
#endif #endif
#ifndef MI_DEFAULT_PAGE_MAX_RECLAIM #ifndef MI_DEFAULT_PAGE_MAX_RECLAIM
#define MI_DEFAULT_PAGE_MAX_RECLAIM 4096 #define MI_DEFAULT_PAGE_MAX_RECLAIM (-1) // unlimited
#endif
#ifndef MI_DEFAULT_PAGE_CROSS_THREAD_MAX_RECLAIM
#define MI_DEFAULT_PAGE_CROSS_THREAD_MAX_RECLAIM 16
#endif #endif
// Static options // Static options
@ -169,7 +173,9 @@ static mi_option_desc_t mi_options[_mi_option_last] =
MI_OPTION_UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront? MI_OPTION_UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
{ 0, MI_OPTION_UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux)) { 0, MI_OPTION_UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux))
{ MI_DEFAULT_PAGE_MAX_RECLAIM, { MI_DEFAULT_PAGE_MAX_RECLAIM,
MI_OPTION_UNINIT, MI_OPTION(page_max_reclaim) }, // don't reclaim (small) pages if we already own N pages in that size class MI_OPTION_UNINIT, MI_OPTION(page_max_reclaim) }, // don't reclaim (small) pages of the same originating heap if we already own N pages in that size class
{ MI_DEFAULT_PAGE_CROSS_THREAD_MAX_RECLAIM,
MI_OPTION_UNINIT, MI_OPTION(page_cross_thread_max_reclaim) }, // don't reclaim (small) pages across threads if we already own N pages in that size class
}; };
static void mi_option_init(mi_option_desc_t* desc); static void mi_option_init(mi_option_desc_t* desc);