From f4f060543baea6b9bae74a2485193cc10b3cafc9 Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 13 Mar 2025 12:54:57 -0700 Subject: [PATCH] set default settings to use minimal block sizes /4 (instead of /8), and no page reclaim on free --- include/mimalloc/types.h | 6 +++--- src/free.c | 17 +++++++++-------- src/options.c | 6 +++--- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 2f76cfe6..6d161e82 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -341,12 +341,12 @@ typedef struct mi_page_s { #endif // The max object size are checked to not waste more than 12.5% internally over the page sizes. -#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~8 KiB +#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < ~16 KiB #if MI_ENABLE_LARGE_PAGES -#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~64 KiB +#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < ~128 KiB #define MI_LARGE_MAX_OBJ_SIZE (MI_LARGE_PAGE_SIZE/8) // <= 512KiB // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin` #else -#define MI_MEDIUM_MAX_OBJ_SIZE (MI_MEDIUM_PAGE_SIZE/8) // <= 64 KiB +#define MI_MEDIUM_MAX_OBJ_SIZE (MI_MEDIUM_PAGE_SIZE/4) // <= 128 KiB #define MI_LARGE_MAX_OBJ_SIZE MI_MEDIUM_MAX_OBJ_SIZE // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin` #endif #define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE) diff --git a/src/free.c b/src/free.c index d63a430e..7ac4d01e 100644 --- a/src/free.c +++ b/src/free.c @@ -202,10 +202,11 @@ void mi_free(void* p) mi_attr_noexcept // Multi-threaded Free (`_mt`) // ------------------------------------------------------ static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free); -static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, size_t atmost) { +static inline bool mi_page_queue_len_is_lower_as( mi_heap_t* heap, size_t block_size, long atmost) { + if (atmost <= 0) { return false; } mi_page_queue_t* const pq = mi_page_queue(heap,block_size); mi_assert_internal(pq!=NULL); - return (pq->count <= atmost); + return (pq->count < (size_t)atmost); /* for(mi_page_t* p = pq->first; p!=NULL; p = p->next, atmost--) { if (atmost == 0) { return false; } @@ -256,12 +257,12 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* // can we reclaim into this heap? if (heap != NULL && heap->allow_page_reclaim) { const long reclaim_max = _mi_option_get_fast(mi_option_page_reclaim_max); - if ((heap == page->heap && mi_page_queue_len_is_atmost(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already - (reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed - !mi_page_is_used_at_frac(page, 8) && // and the page is not too full - !heap->tld->is_in_threadpool && // and not part of a threadpool - _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable - ) + if ((heap == page->heap && mi_page_queue_len_is_lower_as(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already + (reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed + !mi_page_is_used_at_frac(page, 8) && // and the page is not too full + !heap->tld->is_in_threadpool && // and not part of a threadpool + _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable + ) { // first remove it from the abandoned pages in the arena -- this waits for any readers to finish _mi_arenas_page_unabandon(page); diff --git a/src/options.c b/src/options.c index e8eb85ad..c3aae8b4 100644 --- a/src/options.c +++ b/src/options.c @@ -169,14 +169,14 @@ static mi_option_desc_t options[_mi_option_last] = UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) { 0, UNINIT, MI_OPTION(guarded_sample_seed)}, { 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls - { 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps + { -1, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps { 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues { 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate { 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits { MI_DEFAULT_PAGEMAP_COMMIT, UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront? - { 1, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux)) - { 16, UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own N pages (in that size class) + { 0, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux)) + { 16, UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own < N pages (in that size class) }; static void mi_option_init(mi_option_desc_t* desc);