From d1d01deea7c6941b4d42b65ad538c4efb5368f61 Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 10 Mar 2025 18:11:27 -0700 Subject: [PATCH] add support for custom commit in arena loading --- include/mimalloc.h | 7 +- include/mimalloc/internal.h | 10 +-- include/mimalloc/types.h | 4 + src/arena-meta.c | 4 +- src/arena.c | 147 +++++++++++++++++++++++++++++++----- src/os.c | 55 ++------------ 6 files changed, 153 insertions(+), 74 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 67685ac3..3be9f619 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -330,11 +330,16 @@ mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexce // experimental +typedef bool (mi_cdecl mi_commit_fun_t)(bool commit, void* start, size_t size, bool* is_zero, void* user_arg); +mi_decl_export bool mi_manage_memory(void* start, size_t size, bool is_committed, bool is_pinned, bool is_zero, int numa_node, bool exclusive, + mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept; + mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* accessed_size, size_t* size); -mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* arena_id); +mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id); mi_decl_export bool mi_heap_reload(mi_heap_t* heap, mi_arena_id_t arena); mi_decl_export void mi_heap_unload(mi_heap_t* heap); + // Is a pointer contained in the given arena area? mi_decl_export bool mi_arena_contains(mi_arena_id_t arena_id, const void* p); diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 8a880b8d..7ac1c816 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -150,14 +150,14 @@ bool _mi_os_decommit(void* addr, size_t size); bool _mi_os_protect(void* addr, size_t size); bool _mi_os_unprotect(void* addr, size_t size); bool _mi_os_purge(void* p, size_t size); -bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size, mi_commit_fun_t* commit_fun, void* commit_fun_arg); bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size); size_t _mi_os_secure_guard_page_size(void); -bool _mi_os_secure_guard_page_set_at(void* addr, bool is_pinned); -bool _mi_os_secure_guard_page_set_before(void* addr, bool is_pinned); -bool _mi_os_secure_guard_page_reset_at(void* addr); -bool _mi_os_secure_guard_page_reset_before(void* addr); +bool _mi_os_secure_guard_page_set_at(void* addr, mi_memid_t memid); +bool _mi_os_secure_guard_page_set_before(void* addr, mi_memid_t memid); +bool _mi_os_secure_guard_page_reset_at(void* addr, mi_memid_t memid); +bool _mi_os_secure_guard_page_reset_before(void* addr, mi_memid_t memid); int _mi_os_numa_node(void); int _mi_os_numa_node_count(void); diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index fbf4ea03..2e8d744c 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -242,6 +242,10 @@ static inline bool mi_memid_needs_no_free(mi_memid_t memid) { return mi_memkind_needs_no_free(memid.memkind); } +static inline mi_arena_t* mi_memid_arena(mi_memid_t memid) { + return (memid.memkind == MI_MEM_ARENA ? memid.mem.arena.arena : NULL); +} + // ------------------------------------------------------ // Mimalloc pages contain allocated blocks // ------------------------------------------------------ diff --git a/src/arena-meta.c b/src/arena-meta.c index 56a7505c..3b64ab9b 100644 --- a/src/arena-meta.c +++ b/src/arena-meta.c @@ -82,8 +82,8 @@ static mi_meta_page_t* mi_meta_page_zalloc(void) { // guard pages #if MI_SECURE >= 1 - _mi_os_secure_guard_page_set_at(base, memid.is_pinned); - _mi_os_secure_guard_page_set_before(base + MI_META_PAGE_SIZE, memid.is_pinned); + _mi_os_secure_guard_page_set_at(base, memid); + _mi_os_secure_guard_page_set_before(base + MI_META_PAGE_SIZE, memid); #endif // initialize the page and free block bitmap diff --git a/src/arena.c b/src/arena.c index 85d144e0..f0948055 100644 --- a/src/arena.c +++ b/src/arena.c @@ -42,6 +42,8 @@ typedef struct mi_arena_s { int numa_node; // associated NUMA node bool is_exclusive; // only allow allocations if specifically for this arena _Atomic(mi_msecs_t) purge_expire; // expiration time when slices can be purged from `slices_purge`. + mi_commit_fun_t* commit_fun; // custom commit/decommit memory + void* commit_fun_arg; // user argument for a custom commit function mi_bbitmap_t* slices_free; // is the slice free? (a binned bitmap with size classes) mi_bitmap_t* slices_committed; // is the slice committed? (i.e. accessible) @@ -55,6 +57,74 @@ typedef struct mi_arena_s { } mi_arena_t; + +/* ----------------------------------------------------------- + Guard page allocation +----------------------------------------------------------- */ + +// In secure mode, return the size of a guard page, otherwise 0 +size_t _mi_os_secure_guard_page_size(void) { +#if MI_SECURE > 0 + return _mi_os_guard_page_size(); +#else + return 0; +#endif +} + +// In secure mode, try to decommit an area and output a warning if this fails. +bool _mi_os_secure_guard_page_set_at(void* addr, mi_memid_t memid) { + if (addr == NULL) return true; +#if MI_SECURE > 0 + bool ok = false; + if (!memid.is_pinned) { + mi_arena_t* const arena = mi_memid_arena(memid); + if (arena != NULL && arena->commit_fun != NULL) { + ok = (*(arena->commit_fun))(false /* decommit */, addr, _mi_os_secure_guard_page_size(), NULL, arena->commit_fun_arg); + } + else { + ok = _mi_os_decommit(addr, _mi_os_secure_guard_page_size()); + } + } + if (!ok) { + _mi_error_message(EINVAL, "secure level %d, but failed to commit guard page (at %p of size %zu)\n", MI_SECURE, addr, _mi_os_secure_guard_page_size()); + } + return ok; +#else + MI_UNUSED(memid); + return true; +#endif +} + +// In secure mode, try to decommit an area and output a warning if this fails. +bool _mi_os_secure_guard_page_set_before(void* addr, mi_memid_t memid) { + return _mi_os_secure_guard_page_set_at((uint8_t*)addr - _mi_os_secure_guard_page_size(), memid); +} + +// In secure mode, try to recommit an area +bool _mi_os_secure_guard_page_reset_at(void* addr, mi_memid_t memid) { + if (addr == NULL) return true; + #if MI_SECURE > 0 + if (!memid.is_pinned) { + mi_arena_t* const arena = mi_memid_arena(memid); + if (arena != NULL && arena->commit_fun != NULL) { + return (*(arena->commit_fun))(true, addr, _mi_os_secure_guard_page_size(), NULL, arena->commit_fun_arg); + } + else { + return _mi_os_commit(addr, _mi_os_secure_guard_page_size(), NULL); + } + } + #else + MI_UNUSED(memid); + #endif + return true; +} + +// In secure mode, try to recommit an area +bool _mi_os_secure_guard_page_reset_before(void* addr, mi_memid_t memid) { + return _mi_os_secure_guard_page_reset_at((uint8_t*)addr - _mi_os_secure_guard_page_size(), memid); +} + + /* ----------------------------------------------------------- Arena id's ----------------------------------------------------------- */ @@ -107,6 +177,20 @@ size_t mi_arena_min_alignment() { return MI_ARENA_SLICE_ALIGN; } +static bool mi_arena_commit(mi_arena_t* arena, void* start, size_t size, bool* is_zero, size_t already_committed) { + if (arena != NULL && arena->commit_fun != NULL) { + return (*arena->commit_fun)(true, start, size, is_zero, arena->commit_fun_arg); + } + else if (already_committed > 0) { + return _mi_os_commit_ex(start, size, is_zero, already_committed); + } + else { + return _mi_os_commit(start, size, is_zero); + } +} + + + /* ----------------------------------------------------------- Util ----------------------------------------------------------- */ @@ -179,6 +263,7 @@ static size_t mi_page_full_size(mi_page_t* page) { } } + /* ----------------------------------------------------------- Arena Allocation ----------------------------------------------------------- */ @@ -215,7 +300,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at( mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count); // now actually commit bool commit_zero = false; - if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed_count))) { + if (!mi_arena_commit(arena, p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed_count))) { memid->initially_committed = false; } else { @@ -627,7 +712,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice page = (mi_page_t*)mi_arena_os_alloc_aligned(alloc_size, page_alignment, 0 /* align offset */, commit, allow_large, req_arena, &memid); } } - + if (page == NULL) return NULL; mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(!os_align || _mi_is_aligned((uint8_t*)page + page_alignment, block_alignment)); @@ -639,7 +724,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice mi_assert(alloc_size > _mi_os_secure_guard_page_size()); const size_t page_noguard_size = alloc_size - _mi_os_secure_guard_page_size(); if (memid.initially_committed) { - _mi_os_secure_guard_page_set_at((uint8_t*)page + page_noguard_size, memid.is_pinned); + _mi_os_secure_guard_page_set_at((uint8_t*)page + page_noguard_size, memid); } #endif @@ -693,7 +778,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE); if (commit_size > page_noguard_size) { commit_size = page_noguard_size; } bool is_zero; - _mi_os_commit(page, commit_size, &is_zero); + mi_arena_commit( mi_memid_arena(memid), page, commit_size, &is_zero, 0); if (!memid.initially_zero && !is_zero) { _mi_memzero_aligned(page, commit_size); } @@ -814,8 +899,7 @@ void _mi_arenas_page_free(mi_page_t* page) { size_t bin = _mi_bin(mi_page_block_size(page)); size_t slice_index; size_t slice_count; - mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count); - + mi_arena_t* const arena = mi_page_arena(page, &slice_index, &slice_count); mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); mi_assert_internal(mi_bitmap_is_clearN(arena->pages_abandoned[bin], slice_index, 1)); @@ -830,14 +914,14 @@ void _mi_arenas_page_free(mi_page_t* page) { // we must do this since we may later allocate large spans over this page and cannot have a guard page in between #if MI_SECURE >= 2 if (!page->memid.is_pinned) { - _mi_os_secure_guard_page_reset_before((uint8_t*)page + mi_page_full_size(page)); + _mi_os_secure_guard_page_reset_before((uint8_t*)page + mi_page_full_size(page), page->memid); } #endif // unregister page _mi_page_map_unregister(page); if (page->memid.memkind == MI_MEM_ARENA) { - mi_arena_t* arena = page->memid.mem.arena.arena; + mi_arena_t* const arena = page->memid.mem.arena.arena; mi_bitmap_clear(arena->pages, page->memid.mem.arena.slice_index); if (page->slice_committed > 0) { // if committed on-demand, set the commit bits to account commit properly @@ -1164,7 +1248,8 @@ static mi_bbitmap_t* mi_arena_bbitmap_init(size_t slice_count, uint8_t** base) { } -static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept +static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive, + mi_memid_t memid, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept { mi_assert(_mi_is_aligned(start,MI_ARENA_SLICE_SIZE)); mi_assert(start!=NULL); @@ -1203,12 +1288,20 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s // commit & zero if needed if (!memid.initially_committed) { - // leave a guard OS page decommitted at the end - _mi_os_commit(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size(), NULL); + size_t commit_size = mi_size_of_slices(info_slices); + // leave a guard OS page decommitted at the end? + if (!memid.is_pinned) { commit_size -= _mi_os_secure_guard_page_size(); } + if (commit_fun != NULL) { + (*commit_fun)(true /* commit */, arena, commit_size, NULL, commit_fun_arg); + } + else { + _mi_os_commit(arena, commit_size, NULL); + } } - else { + else if (!memid.is_pinned) { // if MI_SECURE, set a guard page at the end - _mi_os_secure_guard_page_set_before((uint8_t*)arena + mi_size_of_slices(info_slices), memid.is_pinned); + // todo: this does not respect the commit_fun as the memid is of external memory + _mi_os_secure_guard_page_set_before((uint8_t*)arena + mi_size_of_slices(info_slices), memid); } if (!memid.initially_zero) { _mi_memzero(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size()); @@ -1222,6 +1315,8 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s arena->info_slices = info_slices; arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) arena->purge_expire = 0; + arena->commit_fun = commit_fun; + arena->commit_fun_arg = commit_fun_arg; // mi_lock_init(&arena->abandoned_visit_lock); // init bitmaps @@ -1262,9 +1357,21 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is memid.initially_committed = is_committed; memid.initially_zero = is_zero; memid.is_pinned = is_pinned; - return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, arena_id); + return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, NULL, NULL, arena_id); } +bool mi_manage_memory(void* start, size_t size, bool is_committed, bool is_zero, bool is_pinned, int numa_node, bool exclusive, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept +{ + mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL); + memid.mem.os.base = start; + memid.mem.os.size = size; + memid.initially_committed = is_committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_pinned; + return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, commit_fun, commit_fun_arg, arena_id); +} + + // Reserve a range of regular OS memory static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) { if (arena_id != NULL) *arena_id = _mi_arena_id_none(); @@ -1272,7 +1379,7 @@ static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool com mi_memid_t memid; void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid); if (start == NULL) return ENOMEM; - if (!mi_manage_os_memory_ex2(subproc, start, size, -1 /* numa node */, exclusive, memid, arena_id)) { + if (!mi_manage_os_memory_ex2(subproc, start, size, -1 /* numa node */, exclusive, memid, NULL, NULL, arena_id)) { _mi_os_free_ex(start, size, commit, memid); _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); return ENOMEM; @@ -1420,7 +1527,7 @@ static size_t mi_debug_show_chunks(const char* header1, const char* header2, con if (bit_count > used_slice_count) { const size_t diff = chunk_count - 1 - i; bit_count += diff*MI_BCHUNK_BITS; - _mi_raw_message(" ...\n"); + _mi_raw_message(" |\n"); i = chunk_count-1; } @@ -1542,7 +1649,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m } _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); - if (!mi_manage_os_memory_ex2(_mi_subproc(), p, hsize, numa_node, exclusive, memid, arena_id)) { + if (!mi_manage_os_memory_ex2(_mi_subproc(), p, hsize, numa_node, exclusive, memid, NULL, NULL, arena_id)) { _mi_os_free(p, hsize, memid); return ENOMEM; } @@ -1616,7 +1723,7 @@ static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_c size_t already_committed; mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed); // pretend all committed.. (as we lack a clearN call that counts the already set bits..) const bool all_committed = (already_committed == slice_count); - const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */, mi_size_of_slices(already_committed)); + const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */, mi_size_of_slices(already_committed), arena->commit_fun, arena->commit_fun_arg); if (needs_recommit) { // no longer committed @@ -1906,7 +2013,7 @@ mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* return true; } -mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* arena_id) { +mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) { // assume the memory area is already containing the arena if (arena_id != NULL) { *arena_id = _mi_arena_id_none(); } if (start == NULL || size == 0) return false; @@ -1931,6 +2038,8 @@ mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* are // re-initialize arena->is_exclusive = true; + arena->commit_fun = commit_fun; + arena->commit_fun_arg = commit_fun_arg; arena->subproc = _mi_subproc(); if (!mi_arenas_add(arena->subproc, arena, arena_id)) { return false; diff --git a/src/os.c b/src/os.c index 01ec2c46..204416a4 100644 --- a/src/os.c +++ b/src/os.c @@ -96,49 +96,6 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { return NULL; } -// In secure mode, return the size of a guard page, otherwise 0 -size_t _mi_os_secure_guard_page_size(void) { - #if MI_SECURE > 0 - return _mi_os_guard_page_size(); - #else - return 0; - #endif -} - -// In secure mode, try to decommit an area and output a warning if this fails. -bool _mi_os_secure_guard_page_set_at(void* addr, bool is_pinned) { - if (addr == NULL) return true; - #if MI_SECURE > 0 - const bool ok = (is_pinned ? false : _mi_os_decommit(addr, _mi_os_secure_guard_page_size())); - if (!ok) { - _mi_error_message(EINVAL, "secure level %d, but failed to commit guard page (at %p of size %zu)\n", MI_SECURE, addr, _mi_os_secure_guard_page_size()); - } - return ok; - #else - MI_UNUSED(is_pinned); - return true; - #endif -} - -// In secure mode, try to decommit an area and output a warning if this fails. -bool _mi_os_secure_guard_page_set_before(void* addr, bool is_pinned) { - return _mi_os_secure_guard_page_set_at((uint8_t*)addr - _mi_os_secure_guard_page_size(), is_pinned); -} - -// In secure mode, try to recommit an area -bool _mi_os_secure_guard_page_reset_at(void* addr) { - if (addr == NULL) return true; - #if MI_SECURE > 0 - return _mi_os_commit(addr, _mi_os_secure_guard_page_size(), NULL); - #else - return true; - #endif -} - -// In secure mode, try to recommit an area -bool _mi_os_secure_guard_page_reset_before(void* addr) { - return _mi_os_secure_guard_page_reset_at((uint8_t*)addr - _mi_os_secure_guard_page_size()); -} /* ----------------------------------------------------------- Free memory @@ -507,14 +464,18 @@ bool _mi_os_reset(void* addr, size_t size) { // either resets or decommits memory, returns true if the memory needs // to be recommitted if it is to be re-used later on. -bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size) +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size, mi_commit_fun_t* commit_fun, void* commit_fun_arg) { if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? mi_os_stat_counter_increase(purge_calls, 1); mi_os_stat_increase(purged, size); - if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? - !_mi_preloading()) // don't decommit during preloading (unsafe) + if (commit_fun != NULL) { + bool decommitted = (*commit_fun)(false, p, size, NULL, commit_fun_arg); + return decommitted; // needs_recommit? + } + else if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? + !_mi_preloading()) // don't decommit during preloading (unsafe) { bool needs_recommit = true; mi_os_decommit_ex(p, size, &needs_recommit, stat_size); @@ -531,7 +492,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size) // either resets or decommits memory, returns true if the memory needs // to be recommitted if it is to be re-used later on. bool _mi_os_purge(void* p, size_t size) { - return _mi_os_purge_ex(p, size, true, size); + return _mi_os_purge_ex(p, size, true, size, NULL, NULL); }