mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-07 15:59:32 +03:00
set default settings to use minimal block sizes /4 (instead of /8), and no page reclaim on free
This commit is contained in:
parent
a9be1b915a
commit
f4f060543b
3 changed files with 15 additions and 14 deletions
|
@ -341,12 +341,12 @@ typedef struct mi_page_s {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
||||||
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~8 KiB
|
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < ~16 KiB
|
||||||
#if MI_ENABLE_LARGE_PAGES
|
#if MI_ENABLE_LARGE_PAGES
|
||||||
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~64 KiB
|
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < ~128 KiB
|
||||||
#define MI_LARGE_MAX_OBJ_SIZE (MI_LARGE_PAGE_SIZE/8) // <= 512KiB // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin`
|
#define MI_LARGE_MAX_OBJ_SIZE (MI_LARGE_PAGE_SIZE/8) // <= 512KiB // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin`
|
||||||
#else
|
#else
|
||||||
#define MI_MEDIUM_MAX_OBJ_SIZE (MI_MEDIUM_PAGE_SIZE/8) // <= 64 KiB
|
#define MI_MEDIUM_MAX_OBJ_SIZE (MI_MEDIUM_PAGE_SIZE/4) // <= 128 KiB
|
||||||
#define MI_LARGE_MAX_OBJ_SIZE MI_MEDIUM_MAX_OBJ_SIZE // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin`
|
#define MI_LARGE_MAX_OBJ_SIZE MI_MEDIUM_MAX_OBJ_SIZE // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin`
|
||||||
#endif
|
#endif
|
||||||
#define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE)
|
#define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE)
|
||||||
|
|
|
@ -202,10 +202,11 @@ void mi_free(void* p) mi_attr_noexcept
|
||||||
// Multi-threaded Free (`_mt`)
|
// Multi-threaded Free (`_mt`)
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free);
|
static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free);
|
||||||
static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, size_t atmost) {
|
static inline bool mi_page_queue_len_is_lower_as( mi_heap_t* heap, size_t block_size, long atmost) {
|
||||||
|
if (atmost <= 0) { return false; }
|
||||||
mi_page_queue_t* const pq = mi_page_queue(heap,block_size);
|
mi_page_queue_t* const pq = mi_page_queue(heap,block_size);
|
||||||
mi_assert_internal(pq!=NULL);
|
mi_assert_internal(pq!=NULL);
|
||||||
return (pq->count <= atmost);
|
return (pq->count < (size_t)atmost);
|
||||||
/*
|
/*
|
||||||
for(mi_page_t* p = pq->first; p!=NULL; p = p->next, atmost--) {
|
for(mi_page_t* p = pq->first; p!=NULL; p = p->next, atmost--) {
|
||||||
if (atmost == 0) { return false; }
|
if (atmost == 0) { return false; }
|
||||||
|
@ -256,7 +257,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
||||||
// can we reclaim into this heap?
|
// can we reclaim into this heap?
|
||||||
if (heap != NULL && heap->allow_page_reclaim) {
|
if (heap != NULL && heap->allow_page_reclaim) {
|
||||||
const long reclaim_max = _mi_option_get_fast(mi_option_page_reclaim_max);
|
const long reclaim_max = _mi_option_get_fast(mi_option_page_reclaim_max);
|
||||||
if ((heap == page->heap && mi_page_queue_len_is_atmost(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already
|
if ((heap == page->heap && mi_page_queue_len_is_lower_as(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already
|
||||||
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed
|
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed
|
||||||
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
||||||
!heap->tld->is_in_threadpool && // and not part of a threadpool
|
!heap->tld->is_in_threadpool && // and not part of a threadpool
|
||||||
|
|
|
@ -169,14 +169,14 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
|
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
|
||||||
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
||||||
{ 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
|
{ 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
|
||||||
{ 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
|
{ -1, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
|
||||||
{ 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
|
{ 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
|
||||||
{ 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
|
{ 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
|
||||||
{ 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
|
{ 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
|
||||||
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
||||||
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
||||||
{ 1, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux))
|
{ 0, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux))
|
||||||
{ 16, UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own N pages (in that size class)
|
{ 16, UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own < N pages (in that size class)
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mi_option_init(mi_option_desc_t* desc);
|
static void mi_option_init(mi_option_desc_t* desc);
|
||||||
|
|
Loading…
Add table
Reference in a new issue