change to full_page_retain

This commit is contained in:
daanx 2024-12-05 11:29:25 -08:00
parent bc67be4d79
commit 0616ee151e
5 changed files with 21 additions and 16 deletions

View file

@ -379,7 +379,7 @@ typedef enum mi_option_e {
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000) mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0) mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
mi_option_target_segments_per_thread, // experimental (=0) mi_option_target_segments_per_thread, // experimental (=0)
mi_option_eager_abandon, // eagerly abandon pages from the heap if suitable (to reduce memory footprint in multi-threaded code) mi_option_full_page_retain, // retain N full pages per size class (=4, lower it to reduce memory footprint in multi-thread applications)
_mi_option_last, _mi_option_last,
// legacy option names // legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages, mi_option_large_os_pages = mi_option_allow_large_os_pages,

View file

@ -305,7 +305,7 @@ typedef struct mi_page_s {
#endif #endif
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
mi_heap_t* heap; // heap this threads belong to. mi_heap_t* heap; // heap this threads belong to.
struct mi_page_s* next; // next page owned by the heap with the same `block_size` struct mi_page_s* next; // next page owned by the heap with the same `block_size`
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size` struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
@ -417,7 +417,7 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array. size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
bool eager_abandon; // `true` if this heap can abandon pages to reduce memory footprint bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
uint8_t tag; // custom tag, can be used for separating heaps based on the object types uint8_t tag; // custom tag, can be used for separating heaps based on the object types
#if MI_GUARDED #if MI_GUARDED
size_t guarded_size_min; // minimal size for guarded objects size_t guarded_size_min; // minimal size for guarded objects

View file

@ -206,7 +206,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool
heap->thread_id = _mi_thread_id(); heap->thread_id = _mi_thread_id();
heap->arena_id = arena_id; heap->arena_id = arena_id;
heap->no_reclaim = noreclaim; heap->no_reclaim = noreclaim;
heap->eager_abandon = (!noreclaim && mi_option_is_enabled(mi_option_eager_abandon)); heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_full_page_retain) >= 0);
heap->tag = tag; heap->tag = tag;
if (heap == tld->heap_backing) { if (heap == tld->heap_backing) {
_mi_random_init(&heap->random); _mi_random_init(&heap->random);

View file

@ -143,7 +143,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) { MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) }, { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
{ 0, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free { 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) { MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
#if defined(MI_VISIT_ABANDONED) #if defined(MI_VISIT_ABANDONED)
@ -158,7 +158,7 @@ static mi_option_desc_t options[_mi_option_last] =
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)}, { 0, UNINIT, MI_OPTION(guarded_sample_seed)},
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable. { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
{ 1, UNINIT, MI_OPTION(eager_abandon) }, { 2, UNINIT, MI_OPTION(full_page_retain) },
}; };
static void mi_option_init(mi_option_desc_t* desc); static void mi_option_init(mi_option_desc_t* desc);

View file

@ -212,7 +212,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_assert_internal(page!=NULL); mi_assert_internal(page!=NULL);
// collect the thread free list // collect the thread free list
_mi_page_thread_free_collect(page); _mi_page_thread_free_collect(page);
// and the local free list // and the local free list
if (page->local_free != NULL) { if (page->local_free != NULL) {
@ -264,7 +264,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
*/ */
// called from `mi_free` on a reclaim, and fresh_alloc if we get an abandoned page // called from `mi_free` on a reclaim, and fresh_alloc if we get an abandoned page
void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page) void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page)
{ {
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
@ -381,7 +381,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(mi_page_is_in_full(page)); mi_assert_internal(mi_page_is_in_full(page));
mi_assert_internal(!mi_page_heap(page)->eager_abandon); mi_assert_internal(!mi_page_heap(page)->allow_page_abandon);
if (!mi_page_is_in_full(page)) return; if (!mi_page_is_in_full(page)) return;
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* heap = mi_page_heap(page);
@ -398,7 +398,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(!mi_page_is_in_full(page)); mi_assert_internal(!mi_page_is_in_full(page));
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* heap = mi_page_heap(page);
if (heap->eager_abandon) { if (heap->allow_page_abandon) {
// abandon full pages // abandon full pages
_mi_page_abandon(page, pq); _mi_page_abandon(page, pq);
} }
@ -761,9 +761,10 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
// search for a best next page to use for at most N pages (often cut short if immediate blocks are available) // search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
#define MI_MAX_CANDIDATE_SEARCH (8) #define MI_MAX_CANDIDATE_SEARCH (8)
#define MI_MAX_FULL_PAGES_PER_QUEUE (4)
// Find a page with free blocks of `page->block_size`. // Find a page with free blocks of `page->block_size`.
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{ {
// search through the pages in "next fit" order // search through the pages in "next fit" order
#if MI_STAT #if MI_STAT
@ -772,6 +773,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
#if MI_MAX_CANDIDATE_SEARCH > 1 #if MI_MAX_CANDIDATE_SEARCH > 1
size_t candidate_count = 0; // we reset this on the first candidate to limit the search size_t candidate_count = 0; // we reset this on the first candidate to limit the search
#endif #endif
size_t full_page_count = 0;
mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page_candidate = NULL; // a page with free space
mi_page_t* page = pq->first; mi_page_t* page = pq->first;
@ -797,8 +799,11 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// if the page is completely full, move it to the `mi_pages_full` // if the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often. // queue so we don't visit long-lived pages too often.
if (!immediate_available && !mi_page_is_expandable(page)) { if (!immediate_available && !mi_page_is_expandable(page)) {
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); full_page_count++;
mi_page_to_full(page, pq); if (full_page_count > MI_MAX_FULL_PAGES_PER_QUEUE) {
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq);
}
} }
else { else {
// the page has free space, make it a candidate // the page has free space, make it a candidate
@ -807,8 +812,8 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page_candidate = page; page_candidate = page;
candidate_count = 0; candidate_count = 0;
} }
else if (mi_page_all_free(page_candidate)) { else if (mi_page_all_free(page_candidate)) {
_mi_page_free(page_candidate, pq); _mi_page_free(page_candidate, pq);
page_candidate = page; page_candidate = page;
} }
else if (page->used >= page_candidate->used) { // && !mi_page_is_mostly_used(page)) { else if (page->used >= page_candidate->used) { // && !mi_page_is_mostly_used(page)) {
@ -1000,7 +1005,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
mi_assert_internal(mi_heap_is_initialized(heap)); mi_assert_internal(mi_heap_is_initialized(heap));
// call potential deferred free routines // call potential deferred free routines
_mi_deferred_free(heap, false); // _mi_deferred_free(heap, false);
// free delayed frees from other threads (but skip contended ones) // free delayed frees from other threads (but skip contended ones)
// _mi_heap_delayed_free_partial(heap); // _mi_heap_delayed_free_partial(heap);