mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
reduce object class sizes (/8), add max reclaim queue size
This commit is contained in:
parent
2efc71ef57
commit
6fce7b90a4
4 changed files with 32 additions and 27 deletions
|
@ -404,6 +404,7 @@ typedef enum mi_option_e {
|
|||
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
|
||||
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
|
||||
mi_option_page_commit_on_demand, // commit page memory on-demand
|
||||
mi_option_page_reclaim_max, // don't reclaim pages if we already own N pages (in that size class) (=16)
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
|
|
@ -339,9 +339,9 @@ typedef struct mi_page_s {
|
|||
#endif
|
||||
|
||||
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
||||
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < 16 KiB
|
||||
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~8 KiB
|
||||
#if MI_ENABLE_LARGE_PAGES
|
||||
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < 128 KiB
|
||||
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 64 KiB
|
||||
#define MI_LARGE_MAX_OBJ_SIZE (MI_LARGE_PAGE_SIZE/8) // <= 512KiB // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin`
|
||||
#else
|
||||
#define MI_MEDIUM_MAX_OBJ_SIZE (MI_MEDIUM_PAGE_SIZE/4) // <= 128 KiB
|
||||
|
|
51
src/free.c
51
src/free.c
|
@ -239,33 +239,36 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
|||
|
||||
// 2. we can try to reclaim the page for ourselves
|
||||
// note: we only reclaim if the page originated from our heap (the heap field is preserved on abandonment)
|
||||
// to avoid claiming arbitrary object sizes and limit indefinite expansion. This helps benchmarks like `larson`
|
||||
const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free);
|
||||
if (reclaim_on_free >= 0 && page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks
|
||||
// to avoid claiming arbitrary object sizes and limit indefinite expansion. This helps benchmarks like `larson`
|
||||
if (page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks
|
||||
{
|
||||
// get our heap (with the right tag)
|
||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
||||
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||
if (heap != page->heap) {
|
||||
if (mi_heap_is_initialized(heap)) {
|
||||
heap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||
const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free);
|
||||
if (reclaim_on_free >= 0) { // and reclaiming is allowed
|
||||
// get our heap (with the right tag)
|
||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
||||
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||
if (heap != page->heap) {
|
||||
if (mi_heap_is_initialized(heap)) {
|
||||
heap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
// can we reclaim?
|
||||
if (heap != NULL && heap->allow_page_reclaim) {
|
||||
if ((heap == page->heap && mi_page_queue_len_is_atmost(heap, page->block_size, 4)) || // only reclaim if we were the originating heap, and we have at most N pages already
|
||||
// can we reclaim into this heap?
|
||||
if (heap != NULL && heap->allow_page_reclaim) {
|
||||
const long reclaim_max = _mi_option_get_fast(mi_option_page_reclaim_max);
|
||||
if ((heap == page->heap && mi_page_queue_len_is_atmost(heap, page->block_size, reclaim_max)) || // only reclaim if we were the originating heap, and we have at most N pages already
|
||||
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed
|
||||
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
||||
!heap->tld->is_in_threadpool && // and not part of a threadpool
|
||||
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
|
||||
)
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arenas_page_unabandon(page);
|
||||
_mi_heap_page_reclaim(heap, page);
|
||||
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);
|
||||
return;
|
||||
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
||||
!heap->tld->is_in_threadpool && // and not part of a threadpool
|
||||
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
|
||||
)
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arenas_page_unabandon(page);
|
||||
_mi_heap_page_reclaim(heap, page);
|
||||
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -174,7 +174,8 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
|
||||
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
||||
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
||||
{ 0, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this on overcommit systems (like Linux))
|
||||
{ 0, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux))
|
||||
{ 16, UNINIT, MI_OPTION(page_reclaim_max) }, // don't reclaim pages if we already own N pages (in that size class)
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
Loading…
Add table
Reference in a new issue