diff --git a/include/mimalloc.h b/include/mimalloc.h index 8b453247..46335619 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -381,11 +381,11 @@ typedef enum mi_option_e { mi_option_os_tag, // tag used for OS logging (macOS only for now) (=100) mi_option_max_errors, // issue at most N error messages mi_option_max_warnings, // issue at most N warning messages - mi_option_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%) + mi_option_deprecated_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%) mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`) mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10) - mi_option_purge_extend_delay, + mi_option_deprecated_purge_extend_delay, mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0) @@ -394,8 +394,7 @@ typedef enum mi_option_e { mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000) mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0) - mi_option_target_segments_per_thread, // experimental (=0) - mi_option_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1) + mi_option_page_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1) mi_option_page_full_retain, // retain N full pages per size class (=2) mi_option_page_max_candidates, // max candidate pages to consider for allocation (=4) mi_option_max_vabits, // max user space virtual address bits to consider (=48) diff --git a/src/free.c b/src/free.c index 5d9628f0..865efafa 100644 --- a/src/free.c +++ b/src/free.c @@ -217,7 +217,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) mi_attr_noe // 2. if the page is not too full, we can try to reclaim it for ourselves // note: this seems a bad idea but it speeds up some benchmarks (like `larson`) quite a bit. - if (_mi_option_get_fast(mi_option_reclaim_on_free) != 0 && + if (_mi_option_get_fast(mi_option_page_reclaim_on_free) != 0 && !mi_page_is_used_at_frac(page,8) // && !mi_page_is_abandoned_mapped(page) ) @@ -237,7 +237,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) mi_attr_noe (_mi_arena_memid_is_suitable(page->memid, tagheap->exclusive_arena)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?) ) { - if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for an block_size we don't use + if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for a block_size we don't use // first remove it from the abandoned pages in the arena -- this waits for any readers to finish _mi_arenas_page_unabandon(page); _mi_heap_page_reclaim(tagheap, page); diff --git a/src/heap.c b/src/heap.c index 82ca05cb..1ae7e99f 100644 --- a/src/heap.c +++ b/src/heap.c @@ -175,7 +175,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, heap->memid = memid; heap->tld = tld; // avoid reading the thread-local tld during initialization heap->exclusive_arena = _mi_arena_from_id(arena_id); - heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_reclaim_on_free)); + heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_page_reclaim_on_free)); heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0); heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); heap->tag = heap_tag; diff --git a/src/init.c b/src/init.c index ac49d292..33c9794d 100644 --- a/src/init.c +++ b/src/init.c @@ -259,7 +259,7 @@ static void mi_heap_main_init(void) { //heap_main.keys[0] = _mi_heap_random_next(&heap_main); //heap_main.keys[1] = _mi_heap_random_next(&heap_main); _mi_heap_guarded_init(&heap_main); - heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_reclaim_on_free); + heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_page_reclaim_on_free); heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0); heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); } diff --git a/src/options.c b/src/options.c index 8d66b320..7b643092 100644 --- a/src/options.c +++ b/src/options.c @@ -150,11 +150,11 @@ static mi_option_desc_t options[_mi_option_last] = { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose { 32, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output { 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output - { 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try. + { 10, UNINIT, MI_OPTION(deprecated_max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try. { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees! { MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) { 1, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's - { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) }, + { 1, UNINIT, MI_OPTION_LEGACY(deprecated_purge_extend_delay, decommit_extend_delay) }, { MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. #if defined(MI_VISIT_ABANDONED) @@ -168,8 +168,7 @@ static mi_option_desc_t options[_mi_option_last] = { MI_DEFAULT_GUARDED_SAMPLE_RATE, UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) { 0, UNINIT, MI_OPTION(guarded_sample_seed)}, - { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable. - { 0, UNINIT, MI_OPTION_LEGACY(reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free + { 1, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free { 2, UNINIT, MI_OPTION(page_full_retain) }, { 4, UNINIT, MI_OPTION(page_max_candidates) }, { 0, UNINIT, MI_OPTION(max_vabits) }, diff --git a/src/page.c b/src/page.c index d2d6a854..af1d5072 100644 --- a/src/page.c +++ b/src/page.c @@ -680,7 +680,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m size_t count = 0; #endif long candidate_limit = 0; // we reset this on the first candidate to limit the search - long page_full_retain = heap->page_full_retain; + long page_full_retain = (pq->block_size > MI_SMALL_MAX_OBJ_SIZE ? 0 : heap->page_full_retain); // only retain small pages mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; diff --git a/test/main-override-dep.cpp b/test/main-override-dep.cpp index e92f6fc4..edb57f1f 100644 --- a/test/main-override-dep.cpp +++ b/test/main-override-dep.cpp @@ -12,4 +12,14 @@ std::string TestAllocInDll::GetString() std::string r = test; delete[] test; return r; +} + +#include + +void TestAllocInDll::TestHeapAlloc() +{ + HANDLE heap = GetProcessHeap(); + int* p = (int*)HeapAlloc(heap, 0, sizeof(int)); + *p = 42; + HeapFree(heap, 0, p); } \ No newline at end of file diff --git a/test/main-override-dep.h b/test/main-override-dep.h index 4826f25f..9d4aabfd 100644 --- a/test/main-override-dep.h +++ b/test/main-override-dep.h @@ -8,4 +8,5 @@ class TestAllocInDll { public: __declspec(dllexport) std::string GetString(); + __declspec(dllexport) void TestHeapAlloc(); }; diff --git a/test/main-override.cpp b/test/main-override.cpp index db594acc..af385992 100644 --- a/test/main-override.cpp +++ b/test/main-override.cpp @@ -37,7 +37,7 @@ static void test_thread_local(); // issue #944 static void test_mixed1(); // issue #942 static void test_stl_allocators(); -#if x_WIN32 +#if _WIN32 #include "main-override-dep.h" static void test_dep(); // issue #981: test overriding in another DLL #else @@ -46,8 +46,8 @@ static void test_dep() { }; int main() { mi_stats_reset(); // ignore earlier allocations - various_tests(); - test_mixed1(); + //various_tests(); + //test_mixed1(); test_dep(); @@ -145,11 +145,13 @@ static bool test_stl_allocator1() { struct some_struct { int i; int j; double z; }; -#if x_WIN32 +#if _WIN32 static void test_dep() { TestAllocInDll t; std::string s = t.GetString(); + + t.TestHeapAlloc(); } #endif diff --git a/test/test-stress.c b/test/test-stress.c index f7ae6fea..303d9f42 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -308,7 +308,7 @@ int main(int argc, char** argv) { #endif #if defined(NDEBUG) && !defined(USE_STD_MALLOC) // mi_option_set(mi_option_purge_delay,-1); - mi_option_set(mi_option_reclaim_on_free, 0); + mi_option_set(mi_option_page_reclaim_on_free, 0); #endif #ifndef USE_STD_MALLOC mi_stats_reset();