change to full_page_retain

This commit is contained in:
daanx 2024-12-05 11:29:25 -08:00
parent bc67be4d79
commit 0616ee151e
5 changed files with 21 additions and 16 deletions

View file

@ -379,7 +379,7 @@ typedef enum mi_option_e {
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
mi_option_target_segments_per_thread, // experimental (=0)
mi_option_eager_abandon, // eagerly abandon pages from the heap if suitable (to reduce memory footprint in multi-threaded code)
mi_option_full_page_retain, // retain N full pages per size class (=4, lower it to reduce memory footprint in multi-thread applications)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,

View file

@ -417,7 +417,7 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
bool eager_abandon; // `true` if this heap can abandon pages to reduce memory footprint
bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
uint8_t tag; // custom tag, can be used for separating heaps based on the object types
#if MI_GUARDED
size_t guarded_size_min; // minimal size for guarded objects

View file

@ -206,7 +206,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool
heap->thread_id = _mi_thread_id();
heap->arena_id = arena_id;
heap->no_reclaim = noreclaim;
heap->eager_abandon = (!noreclaim && mi_option_is_enabled(mi_option_eager_abandon));
heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_full_page_retain) >= 0);
heap->tag = tag;
if (heap == tld->heap_backing) {
_mi_random_init(&heap->random);

View file

@ -143,7 +143,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
{ 0, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
#if defined(MI_VISIT_ABANDONED)
@ -158,7 +158,7 @@ static mi_option_desc_t options[_mi_option_last] =
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
{ 1, UNINIT, MI_OPTION(eager_abandon) },
{ 2, UNINIT, MI_OPTION(full_page_retain) },
};
static void mi_option_init(mi_option_desc_t* desc);

View file

@ -381,7 +381,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(mi_page_is_in_full(page));
mi_assert_internal(!mi_page_heap(page)->eager_abandon);
mi_assert_internal(!mi_page_heap(page)->allow_page_abandon);
if (!mi_page_is_in_full(page)) return;
mi_heap_t* heap = mi_page_heap(page);
@ -398,7 +398,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(!mi_page_is_in_full(page));
mi_heap_t* heap = mi_page_heap(page);
if (heap->eager_abandon) {
if (heap->allow_page_abandon) {
// abandon full pages
_mi_page_abandon(page, pq);
}
@ -761,9 +761,10 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
#define MI_MAX_CANDIDATE_SEARCH (8)
#define MI_MAX_FULL_PAGES_PER_QUEUE (4)
// Find a page with free blocks of `page->block_size`.
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
// search through the pages in "next fit" order
#if MI_STAT
@ -772,6 +773,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
#if MI_MAX_CANDIDATE_SEARCH > 1
size_t candidate_count = 0; // we reset this on the first candidate to limit the search
#endif
size_t full_page_count = 0;
mi_page_t* page_candidate = NULL; // a page with free space
mi_page_t* page = pq->first;
@ -797,8 +799,11 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// if the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often.
if (!immediate_available && !mi_page_is_expandable(page)) {
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq);
full_page_count++;
if (full_page_count > MI_MAX_FULL_PAGES_PER_QUEUE) {
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq);
}
}
else {
// the page has free space, make it a candidate
@ -1000,7 +1005,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
mi_assert_internal(mi_heap_is_initialized(heap));
// call potential deferred free routines
_mi_deferred_free(heap, false);
// _mi_deferred_free(heap, false);
// free delayed frees from other threads (but skip contended ones)
// _mi_heap_delayed_free_partial(heap);