From a7411ae9c5426b179b10015b6104b5105fa83313 Mon Sep 17 00:00:00 2001 From: daanx Date: Tue, 20 May 2025 21:04:17 -0700 Subject: [PATCH 1/2] use proper atomics for the page_map --- include/mimalloc/internal.h | 14 +++++++++----- src/page-map.c | 14 +++++++------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 9c5eb362..aab7dffe 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -547,16 +547,16 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { // 2-level page map: // double indirection, but low commit and low virtual reserve. // -// the page-map is usually 4 MiB (for 48 bits virtual addresses) and points to sub maps of 64 KiB. +// the page-map is usually 4 MiB (for 48 bit virtual addresses) and points to sub maps of 64 KiB. // the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well) // one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space -// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 4 MiB size. +// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 2^22 bytes = 4 MiB reserved size. #define MI_PAGE_MAP_SUB_SHIFT (13) #define MI_PAGE_MAP_SUB_COUNT (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT) #define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT) #define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT) -extern mi_decl_hidden mi_page_t*** _mi_page_map; +extern mi_decl_hidden _Atomic(mi_page_t**)* _mi_page_map; static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE); @@ -564,16 +564,20 @@ static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { return (u / MI_PAGE_MAP_SUB_COUNT); } +static inline mi_page_t** _mi_page_map_at(size_t idx) { + return mi_atomic_load_ptr_relaxed(mi_page_t*, &_mi_page_map[idx]); +} + static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { size_t sub_idx; const size_t idx = _mi_page_map_index(p, &sub_idx); - return _mi_page_map[idx][sub_idx]; // NULL if p==NULL + return (_mi_page_map_at(idx))[sub_idx]; // NULL if p==NULL } static inline mi_page_t* _mi_checked_ptr_page(const void* p) { size_t sub_idx; const size_t idx = _mi_page_map_index(p, &sub_idx); - mi_page_t** const sub = _mi_page_map[idx]; + mi_page_t** const sub = _mi_page_map_at(idx); if mi_unlikely(sub == NULL) return NULL; return sub[sub_idx]; } diff --git a/src/page-map.c b/src/page-map.c index c8686924..48ac47d2 100644 --- a/src/page-map.c +++ b/src/page-map.c @@ -78,7 +78,7 @@ void _mi_page_map_unsafe_destroy(void) { _mi_page_map = NULL; mi_page_map_commit = NULL; mi_page_map_max_address = NULL; - mi_page_map_memid = _mi_memid_none(); + mi_page_map_memid = _mi_memid_none(); } @@ -173,7 +173,7 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att // A 2-level page map #define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) -mi_decl_cache_align mi_page_t*** _mi_page_map; +mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map; static size_t mi_page_map_count; static void* mi_page_map_max_address; static mi_memid_t mi_page_map_memid; @@ -203,7 +203,7 @@ bool _mi_page_map_init(void) { const size_t reserve_size = page_map_size + os_page_size; const bool commit = page_map_size <= 64*MI_KiB || mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit(); - _mi_page_map = (mi_page_t***)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); + _mi_page_map = (_Atomic(mi_page_t**)*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); if (_mi_page_map==NULL) { _mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB); return false; @@ -236,11 +236,11 @@ void _mi_page_map_unsafe_destroy(void) { for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 // free all sub-maps if (mi_page_map_is_committed(idx, NULL)) { - mi_page_t** sub = _mi_page_map[idx]; + mi_page_t** sub = _mi_page_map_at(idx); if (sub != NULL) { mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), true, false, false); _mi_os_free(memid.mem.os.base, memid.mem.os.size, memid); - _mi_page_map[idx] = NULL; + mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL); } } } @@ -270,7 +270,7 @@ static mi_page_t** mi_page_map_ensure_committed(size_t idx) { _mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL); mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx); } - return _mi_page_map[idx]; + return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx); } static mi_page_t** mi_page_map_ensure_at(size_t idx) { @@ -288,7 +288,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) { if (!memid.initially_zero) { _mi_memzero_aligned(sub, submap_size); } - if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, ((_Atomic(mi_page_t**)*)&_mi_page_map[idx]), &expect, sub)) { + if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) { // another thread already allocated it.. free and continue _mi_os_free(sub, submap_size, memid); sub = expect; From 4161152d17edbfb891733493310ea65278298f76 Mon Sep 17 00:00:00 2001 From: daanx Date: Wed, 21 May 2025 16:04:13 -0700 Subject: [PATCH 2/2] fix valid pointer check for low addresses (#issue 1087) --- src/page-map.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/page-map.c b/src/page-map.c index 48ac47d2..6066336e 100644 --- a/src/page-map.c +++ b/src/page-map.c @@ -200,7 +200,8 @@ bool _mi_page_map_init(void) { mi_assert(mi_page_map_count <= MI_PAGE_MAP_COUNT); const size_t os_page_size = _mi_os_page_size(); const size_t page_map_size = _mi_align_up( mi_page_map_count * sizeof(mi_page_t**), os_page_size); - const size_t reserve_size = page_map_size + os_page_size; + const size_t submap_size = MI_PAGE_MAP_SUB_SIZE; + const size_t reserve_size = page_map_size + submap_size; const bool commit = page_map_size <= 64*MI_KiB || mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit(); _mi_page_map = (_Atomic(mi_page_t**)*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); @@ -220,10 +221,10 @@ bool _mi_page_map_init(void) { } _mi_page_map[0] = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved a submap part at the end already if (!mi_page_map_memid.initially_committed) { - _mi_os_commit(_mi_page_map[0], os_page_size, NULL); // only first OS page + _mi_os_commit(_mi_page_map[0], submap_size, NULL); // commit full submap (issue #1087) } - if (!mi_page_map_memid.initially_zero) { // initialize first addresses with NULL - _mi_memzero_aligned(_mi_page_map[0], os_page_size); + if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL + _mi_memzero_aligned(_mi_page_map[0], submap_size); } mi_assert_internal(_mi_ptr_page(NULL)==NULL); @@ -233,12 +234,12 @@ bool _mi_page_map_init(void) { void _mi_page_map_unsafe_destroy(void) { mi_assert_internal(_mi_page_map != NULL); if (_mi_page_map == NULL) return; - for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 + for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 (as we allocate that submap at the end of the page_map) // free all sub-maps if (mi_page_map_is_committed(idx, NULL)) { mi_page_t** sub = _mi_page_map_at(idx); if (sub != NULL) { - mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), true, false, false); + mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_SIZE, true, false, false); _mi_os_free(memid.mem.os.base, memid.mem.os.size, memid); mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL); } @@ -279,7 +280,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) { // sub map not yet allocated, alloc now mi_memid_t memid; mi_page_t** expect = sub; - const size_t submap_size = MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*); + const size_t submap_size = MI_PAGE_MAP_SUB_SIZE; sub = (mi_page_t**)_mi_os_alloc(submap_size, &memid); if (sub == NULL) { _mi_error_message(EFAULT, "internal error: unable to extend the page map\n");