diff --git a/src/free.c b/src/free.c index 5e83ad95..b1827f1e 100644 --- a/src/free.c +++ b/src/free.c @@ -217,43 +217,40 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* return; } - const bool too_full = mi_page_is_used_at_frac(page, 8); // more than 7/8th of the page is in use? - // 2. if the page is not too full, we can try to reclaim it for ourselves - // note: this seems a bad idea but it speeds up some benchmarks (like `larson`) quite a bit. - if (!too_full && - _mi_option_get_fast(mi_option_page_reclaim_on_free) != 0 && - page->block_size <= MI_SMALL_MAX_OBJ_SIZE // only for small sized blocks - ) + // note: + // we only reclaim if the page originated from our heap (the heap field is preserved on abandonment) + // to avoid claiming arbitrary object sizes and limit indefinite expansion. + // this helps benchmarks like `larson` + const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free); + if (reclaim_on_free >= 0 && page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks { // the page has still some blocks in use (but not too many) // reclaim in our heap if compatible, or otherwise abandon again // todo: optimize this check further? // note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should // not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944) - mi_heap_t* const heap = mi_prim_get_default_heap(); - if (mi_heap_is_initialized(heap)) // we did not already terminate our thread - { - mi_heap_t* const tagheap = _mi_heap_by_tag(heap, page->heap_tag); - if ((tagheap != NULL) && // don't reclaim across heap object types - (tagheap->allow_page_reclaim) && // and we are allowed to reclaim abandoned pages - // (page->subproc == tagheap->tld->subproc) && // don't reclaim across sub-processes; todo: make this check faster (integrate with _mi_heap_by_tag ? ) - (_mi_arena_memid_is_suitable(page->memid, tagheap->exclusive_arena)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?) - ) - { - if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for a block_size we don't use - // first remove it from the abandoned pages in the arena -- this waits for any readers to finish - _mi_arenas_page_unabandon(page); - _mi_heap_page_reclaim(tagheap, page); - mi_heap_stat_counter_increase(tagheap, pages_reclaim_on_free, 1); - return; - } + mi_heap_t* heap = mi_prim_get_default_heap(); + if (heap != page->heap) { + if (mi_heap_is_initialized(heap)) { + heap = _mi_heap_by_tag(heap, page->heap_tag); } } + if (heap != NULL && heap->allow_page_reclaim && + (heap == page->heap || (reclaim_on_free == 1 && !mi_page_is_used_at_frac(page, 8))) && // only reclaim if we were the originating heap, or if reclaim_on_free == 1 and the pages is not too full + _mi_arena_memid_is_suitable(page->memid,heap->exclusive_arena) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?) + ) + { + // first remove it from the abandoned pages in the arena -- this waits for any readers to finish + _mi_arenas_page_unabandon(page); + _mi_heap_page_reclaim(heap, page); + mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1); + return; + } } // 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations - if (!too_full && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page + if (!mi_page_is_used_at_frac(page, 8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page !mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA && _mi_arenas_page_try_reabandon_to_mapped(page)) { diff --git a/src/heap.c b/src/heap.c index 10c65ff2..5ac79996 100644 --- a/src/heap.c +++ b/src/heap.c @@ -175,7 +175,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, heap->memid = memid; heap->tld = tld; // avoid reading the thread-local tld during initialization heap->exclusive_arena = _mi_arena_from_id(arena_id); - heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_page_reclaim_on_free)); + heap->allow_page_reclaim = (!allow_destroy && mi_option_get(mi_option_page_reclaim_on_free) >= 0); heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0); heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); heap->tag = heap_tag; diff --git a/src/init.c b/src/init.c index ced30104..d5bfe935 100644 --- a/src/init.c +++ b/src/init.c @@ -259,7 +259,7 @@ static void mi_heap_main_init(void) { //heap_main.keys[0] = _mi_heap_random_next(&heap_main); //heap_main.keys[1] = _mi_heap_random_next(&heap_main); _mi_heap_guarded_init(&heap_main); - heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_page_reclaim_on_free); + heap_main.allow_page_reclaim = (mi_option_get(mi_option_page_reclaim_on_free) >= 0); heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0); heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); } diff --git a/src/options.c b/src/options.c index 9ebb0b6a..9caffbd3 100644 --- a/src/options.c +++ b/src/options.c @@ -168,13 +168,13 @@ static mi_option_desc_t options[_mi_option_last] = { MI_DEFAULT_GUARDED_SAMPLE_RATE, UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) { 0, UNINIT, MI_OPTION(guarded_sample_seed)}, - { 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free - { 2, UNINIT, MI_OPTION(page_full_retain) }, - { 4, UNINIT, MI_OPTION(page_max_candidates) }, - { 0, UNINIT, MI_OPTION(max_vabits) }, + { 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps + { 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues + { 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate + { 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits { MI_DEFAULT_PAGEMAP_COMMIT, UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront? - { 2, UNINIT, MI_OPTION(page_commit_on_demand) }, + { 2, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this on overcommit systems (like Linux)) }; static void mi_option_init(mi_option_desc_t* desc); diff --git a/src/page.c b/src/page.c index 4b0c810c..2a51bea6 100644 --- a/src/page.c +++ b/src/page.c @@ -278,10 +278,11 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { } else { mi_page_queue_remove(pq, page); - mi_tld_t* tld = page->heap->tld; - mi_page_set_heap(page, NULL); - _mi_arenas_page_abandon(page,tld); - _mi_arenas_collect(false, false, tld); // allow purging + mi_heap_t* heap = page->heap; + mi_page_set_heap(page, NULL); + page->heap = heap; // dont set heap to NULL so we can reclaim_on_free within the same heap + _mi_arenas_page_abandon(page, heap->tld); + _mi_arenas_collect(false, false, heap->tld); // allow purging } } @@ -717,7 +718,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m count++; #endif candidate_limit--; - + // search up to N pages for a best candidate // is the local free list non-empty? @@ -744,7 +745,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m page_candidate = page; candidate_limit = _mi_option_get_fast(mi_option_page_max_candidates); } - else if (mi_page_all_free(page_candidate)) { + else if (mi_page_all_free(page_candidate)) { _mi_page_free(page_candidate, pq); page_candidate = page; }