From e61ab67185f1c89c71dec7f7e4508bc8ed9c7f82 Mon Sep 17 00:00:00 2001 From: daanx Date: Sun, 22 Dec 2024 18:31:33 -0800 Subject: [PATCH] cleanup --- ide/vs2022/mimalloc.vcxproj | 2 +- src/arena.c | 46 ++++++++++++++++++++++--------------- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj index dc112272..87e866bb 100644 --- a/ide/vs2022/mimalloc.vcxproj +++ b/ide/vs2022/mimalloc.vcxproj @@ -190,7 +190,7 @@ true Default ../../include - MI_DEBUG=3;MI_GUARDED=1;%(PreprocessorDefinitions); + MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions); CompileAsCpp false stdcpp20 diff --git a/src/arena.c b/src/arena.c index 7a016165..b5c17d95 100644 --- a/src/arena.c +++ b/src/arena.c @@ -42,6 +42,7 @@ typedef struct mi_arena_s { int numa_node; // associated NUMA node bool is_exclusive; // only allow allocations if specifically for this arena bool is_large; // memory area consists of large- or huge OS pages (always committed) + long purge_delay; // from the options, but allows setting per arena _Atomic(mi_msecs_t) purge_expire; // expiration time when slices can be purged from `slices_purge`. mi_bitmap_t* slices_free; // is the slice free? @@ -793,6 +794,7 @@ void _mi_arenas_page_free(mi_page_t* page) { #endif // recommit guard page at the end? + // we must do this since we may later allocate large spans over this page and cannot have a guard page in between #if MI_SECURE >= 2 if (!page->memid.is_pinned) { _mi_os_commit((uint8_t*)page + mi_memid_size(page->memid) - MI_ARENA_GUARD_PAGE_SIZE, MI_ARENA_GUARD_PAGE_SIZE, NULL); @@ -1047,7 +1049,7 @@ void _mi_arenas_unsafe_destroy_all(void) { Add an arena. ----------------------------------------------------------- */ -static bool mi_arena_add(mi_subproc_t* subproc, mi_arena_t* arena, mi_arena_id_t* arena_id) { +static bool mi_arenas_add(mi_subproc_t* subproc, mi_arena_t* arena, mi_arena_id_t* arena_id) { mi_assert_internal(arena != NULL); mi_assert_internal(arena->slice_count > 0); if (arena_id != NULL) { *arena_id = NULL; } @@ -1089,7 +1091,7 @@ static size_t mi_arena_info_slices_needed(size_t slice_count, size_t* bitmap_bas const size_t size = base_size + bitmaps_size; const size_t os_page_size = _mi_os_page_size(); - const size_t info_size = _mi_align_up(size, os_page_size) + os_page_size; // + guard page + const size_t info_size = _mi_align_up(size, os_page_size) + MI_ARENA_GUARD_PAGE_SIZE; const size_t info_slices = mi_slice_count_of_size(info_size); if (bitmap_base != NULL) *bitmap_base = base_size; @@ -1132,18 +1134,19 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s mi_arena_t* arena = (mi_arena_t*)start; - // commit & zero if needed - const size_t os_page_size = _mi_os_page_size(); + // commit & zero if needed if (!memid.initially_committed) { - // security: always leave a guard OS page decommitted at the end (already part of info_slices) - _mi_os_commit(arena, mi_size_of_slices(info_slices) - os_page_size, NULL); + // if MI_SECURE, leave a guard OS page decommitted at the end + _mi_os_commit(arena, mi_size_of_slices(info_slices) - MI_ARENA_GUARD_PAGE_SIZE, NULL); } else if (!memid.is_pinned) { - // security: decommit a guard OS page at the end of the arena info - _mi_os_decommit((uint8_t*)arena + mi_size_of_slices(info_slices) - os_page_size, os_page_size); + #if MI_SECURE > 0 + // if MI_SECURE, decommit a guard OS page at the end of the arena info + _mi_os_decommit((uint8_t*)arena + mi_size_of_slices(info_slices) - MI_ARENA_GUARD_PAGE_SIZE, MI_ARENA_GUARD_PAGE_SIZE); + #endif } if (!memid.initially_zero) { - _mi_memzero(arena, mi_size_of_slices(info_slices) - os_page_size); + _mi_memzero(arena, mi_size_of_slices(info_slices) - MI_ARENA_GUARD_PAGE_SIZE); } // init @@ -1155,6 +1158,7 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) arena->is_large = is_large; arena->purge_expire = 0; + arena->purge_delay = mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult); // mi_lock_init(&arena->abandoned_visit_lock); // init bitmaps @@ -1184,7 +1188,7 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s mi_bitmap_setN(arena->slices_dirty, 0, info_slices, NULL); } - return mi_arena_add(subproc, arena, arena_id); + return mi_arenas_add(subproc, arena, arena_id); } @@ -1427,9 +1431,14 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv Arena purge ----------------------------------------------------------- */ -static long mi_arena_purge_delay(void) { +static long mi_arena_purge_delay(mi_arena_t* arena) { // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay - return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult)); + if (arena==NULL) { + return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult)); + } + else { + return arena->purge_delay; + } } // reset or decommit in an arena and update the commit bitmap @@ -1459,8 +1468,8 @@ static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_c // Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls. // Note: assumes we (still) own the area as we may purge immediately static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) { - const long delay = mi_arena_purge_delay(); - if (delay < 0 || _mi_preloading()) return; // is purging allowed at all? + const long delay = mi_arena_purge_delay(arena); + if (arena->memid.is_pinned || delay < 0 || _mi_preloading()) return; // is purging allowed at all? mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count)); if (delay == 0) { @@ -1542,7 +1551,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force) // go through all purge info's (with max MI_BFIELD_BITS ranges at a time) // this also clears those ranges atomically (so any newly freed blocks will get purged next // time around) - mi_purge_visit_info_t vinfo = { now, mi_arena_purge_delay(), true /*all?*/, false /*any?*/}; + mi_purge_visit_info_t vinfo = { now, mi_arena_purge_delay(arena), true /*all?*/, false /*any?*/}; _mi_bitmap_forall_setc_ranges(arena->slices_purge, &mi_arena_try_purge_visitor, arena, &vinfo); return vinfo.any_purged; @@ -1551,7 +1560,8 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force) static void mi_arenas_try_purge(bool force, bool visit_all) { - if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled + const long delay = mi_arena_purge_delay(NULL); + if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled // check if any arena needs purging? mi_tld_t* tld = _mi_tld(); @@ -1568,7 +1578,7 @@ static void mi_arenas_try_purge(bool force, bool visit_all) mi_atomic_guard(&purge_guard) { // increase global expire: at most one purge per delay cycle - mi_atomic_store_release(&subproc->purge_expire, now + mi_arena_purge_delay()); + mi_atomic_store_release(&subproc->purge_expire, now + delay); const size_t arena_start = tld->thread_seq % max_arena; size_t max_purge_count = (visit_all ? max_arena : 2); bool all_visited = true; @@ -1688,7 +1698,7 @@ mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* are arena->is_exclusive = true; arena->subproc = _mi_subproc(); - if (!mi_arena_add(arena->subproc, arena, arena_id)) { + if (!mi_arenas_add(arena->subproc, arena, arena_id)) { return false; } mi_arena_pages_reregister(arena);