ensure use of subproc_main during unsafe destroy to avoid derefercing the heap

This commit is contained in:
daanx 2025-06-06 11:49:44 -07:00
parent a238d2bbbd
commit 7c7ecf096f
5 changed files with 28 additions and 24 deletions

View file

@ -135,7 +135,7 @@ void _mi_os_init(void); // c
void* _mi_os_alloc(size_t size, mi_memid_t* memid);
void* _mi_os_zalloc(size_t size, mi_memid_t* memid);
void _mi_os_free(void* p, size_t size, mi_memid_t memid);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_subproc_t* subproc );
size_t _mi_os_page_size(void);
size_t _mi_os_guard_page_size(void);
@ -200,7 +200,7 @@ void _mi_page_map_register(mi_page_t* page);
void _mi_page_map_unregister(mi_page_t* page);
void _mi_page_map_unregister_range(void* start, size_t size);
mi_page_t* _mi_safe_ptr_page(const void* p);
void _mi_page_map_unsafe_destroy(void);
void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;

View file

@ -1074,6 +1074,7 @@ bool _mi_arenas_contain(const void* p) {
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
mi_assert_internal(subproc != NULL);
const size_t max_arena = mi_arenas_get_count(subproc);
size_t new_max_arena = 0;
for (size_t i = 0; i < max_arena; i++) {
@ -1082,7 +1083,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
// mi_lock_done(&arena->abandoned_visit_lock);
mi_atomic_store_ptr_release(mi_arena_t, &subproc->arenas[i], NULL);
if (mi_memkind_is_os(arena->memid.memkind)) {
_mi_os_free(mi_arena_start(arena), mi_arena_size(arena), arena->memid);
_mi_os_free_ex(mi_arena_start(arena), mi_arena_size(arena), true, arena->memid, subproc); // pass `subproc` to avoid accessing the heap pointer (in `_mi_subproc()`)
}
}
}
@ -1096,7 +1097,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) {
mi_arenas_unsafe_destroy(_mi_subproc());
mi_arenas_unsafe_destroy(tld->subproc);
_mi_arenas_collect(true /* force purge */, true /* visit all*/, tld); // purge non-owned arenas
}
@ -1304,7 +1305,7 @@ static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool com
void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM;
if (!mi_manage_os_memory_ex2(subproc, start, size, -1 /* numa node */, exclusive, memid, NULL, NULL, arena_id)) {
_mi_os_free_ex(start, size, commit, memid);
_mi_os_free_ex(start, size, commit, memid, NULL);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
}

View file

@ -779,8 +779,9 @@ void mi_cdecl _mi_process_done(void) {
mi_heap_collect(heap, true /* force */);
_mi_heap_unsafe_destroy_all(heap); // forcefully release all memory held by all heaps (of this thread only!)
_mi_arenas_unsafe_destroy_all(heap->tld);
_mi_page_map_unsafe_destroy();
_mi_page_map_unsafe_destroy(_mi_subproc_main());
}
//_mi_page_map_unsafe_destroy(_mi_subproc_main());
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);

View file

@ -168,22 +168,23 @@ bool _mi_os_secure_guard_page_reset_before(void* addr, mi_memid_t memid) {
Free memory
-------------------------------------------------------------- */
static void mi_os_free_huge_os_pages(void* p, size_t size);
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_subproc_t* subproc);
static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) {
static void mi_os_prim_free(void* addr, size_t size, size_t commit_size, mi_subproc_t* subproc) {
mi_assert_internal((size % _mi_os_page_size()) == 0);
if (addr == NULL) return; // || _mi_os_is_huge_reserved(addr)
int err = _mi_prim_free(addr, size); // allow size==0 (issue #1041)
if (err != 0) {
_mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
}
if (subproc == NULL) { subproc = _mi_subproc(); } // from `mi_arenas_unsafe_destroy` we pass subproc_main explicitly as we can no longer use the heap pointer
if (commit_size > 0) {
mi_os_stat_decrease(committed, commit_size);
mi_subproc_stat_decrease(subproc, committed, commit_size);
}
mi_os_stat_decrease(reserved, size);
mi_subproc_stat_decrease(subproc, reserved, size);
}
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_subproc_t* subproc /* can be NULL */) {
if (mi_memkind_is_os(memid.memkind)) {
size_t csize = memid.mem.os.size;
if (csize==0) { csize = _mi_os_good_alloc_size(size); }
@ -204,10 +205,10 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
// free it
if (memid.memkind == MI_MEM_OS_HUGE) {
mi_assert(memid.is_pinned);
mi_os_free_huge_os_pages(base, csize);
mi_os_free_huge_os_pages(base, csize, subproc);
}
else {
mi_os_prim_free(base, csize, (still_committed ? commit_size : 0));
mi_os_prim_free(base, csize, (still_committed ? commit_size : 0), subproc);
}
}
else {
@ -217,7 +218,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
}
void _mi_os_free(void* p, size_t size, mi_memid_t memid) {
_mi_os_free_ex(p, size, true, memid);
_mi_os_free_ex(p, size, true, memid, NULL);
}
@ -292,7 +293,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
}
#endif
if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0)); }
if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0), NULL); }
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
const size_t over_size = size + alignment;
@ -310,7 +311,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
// explicitly commit only the aligned part
if (commit) {
if (!_mi_os_commit(p, size, NULL)) {
mi_os_prim_free(*base, over_size, 0);
mi_os_prim_free(*base, over_size, 0, NULL);
return NULL;
}
}
@ -326,8 +327,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
size_t post_size = over_size - pre_size - mid_size;
mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); }
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); }
if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0), NULL); }
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0), NULL); }
// we can return the aligned pointer on `mmap` systems
p = aligned_p;
*base = aligned_p; // since we freed the pre part, `*base == p`.
@ -668,7 +669,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// no success, issue a warning and break
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE, NULL);
}
break;
}
@ -710,11 +711,11 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// free every huge page in a range individually (as we allocated per page)
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
static void mi_os_free_huge_os_pages(void* p, size_t size) {
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_subproc_t* subproc) {
if (p==NULL || size==0) return;
uint8_t* base = (uint8_t*)p;
while (size >= MI_HUGE_OS_PAGE_SIZE) {
mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE, subproc);
size -= MI_HUGE_OS_PAGE_SIZE;
base += MI_HUGE_OS_PAGE_SIZE;
}

View file

@ -265,7 +265,8 @@ bool _mi_page_map_init(void) {
}
void _mi_page_map_unsafe_destroy(void) {
void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) {
mi_assert_internal(subproc != NULL);
mi_assert_internal(_mi_page_map != NULL);
if (_mi_page_map == NULL) return;
for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 (as we allocate that submap at the end of the page_map)
@ -274,12 +275,12 @@ void _mi_page_map_unsafe_destroy(void) {
mi_page_t** sub = _mi_page_map_at(idx);
if (sub != NULL) {
mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_SIZE, true, false, false);
_mi_os_free(memid.mem.os.base, memid.mem.os.size, memid);
_mi_os_free_ex(memid.mem.os.base, memid.mem.os.size, true, memid, subproc);
mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL);
}
}
}
_mi_os_free(_mi_page_map, mi_page_map_memid.mem.os.size, mi_page_map_memid);
_mi_os_free_ex(_mi_page_map, mi_page_map_memid.mem.os.size, true, mi_page_map_memid, subproc);
_mi_page_map = NULL;
mi_page_map_count = 0;
mi_page_map_memid = _mi_memid_none();