diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 9c5eb362..aab7dffe 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -547,16 +547,16 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { // 2-level page map: // double indirection, but low commit and low virtual reserve. // -// the page-map is usually 4 MiB (for 48 bits virtual addresses) and points to sub maps of 64 KiB. +// the page-map is usually 4 MiB (for 48 bit virtual addresses) and points to sub maps of 64 KiB. // the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well) // one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space -// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 4 MiB size. +// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 2^22 bytes = 4 MiB reserved size. #define MI_PAGE_MAP_SUB_SHIFT (13) #define MI_PAGE_MAP_SUB_COUNT (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT) #define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT) #define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT) -extern mi_decl_hidden mi_page_t*** _mi_page_map; +extern mi_decl_hidden _Atomic(mi_page_t**)* _mi_page_map; static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE); @@ -564,16 +564,20 @@ static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { return (u / MI_PAGE_MAP_SUB_COUNT); } +static inline mi_page_t** _mi_page_map_at(size_t idx) { + return mi_atomic_load_ptr_relaxed(mi_page_t*, &_mi_page_map[idx]); +} + static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { size_t sub_idx; const size_t idx = _mi_page_map_index(p, &sub_idx); - return _mi_page_map[idx][sub_idx]; // NULL if p==NULL + return (_mi_page_map_at(idx))[sub_idx]; // NULL if p==NULL } static inline mi_page_t* _mi_checked_ptr_page(const void* p) { size_t sub_idx; const size_t idx = _mi_page_map_index(p, &sub_idx); - mi_page_t** const sub = _mi_page_map[idx]; + mi_page_t** const sub = _mi_page_map_at(idx); if mi_unlikely(sub == NULL) return NULL; return sub[sub_idx]; } diff --git a/src/page-map.c b/src/page-map.c index c8686924..48ac47d2 100644 --- a/src/page-map.c +++ b/src/page-map.c @@ -78,7 +78,7 @@ void _mi_page_map_unsafe_destroy(void) { _mi_page_map = NULL; mi_page_map_commit = NULL; mi_page_map_max_address = NULL; - mi_page_map_memid = _mi_memid_none(); + mi_page_map_memid = _mi_memid_none(); } @@ -173,7 +173,7 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att // A 2-level page map #define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) -mi_decl_cache_align mi_page_t*** _mi_page_map; +mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map; static size_t mi_page_map_count; static void* mi_page_map_max_address; static mi_memid_t mi_page_map_memid; @@ -203,7 +203,7 @@ bool _mi_page_map_init(void) { const size_t reserve_size = page_map_size + os_page_size; const bool commit = page_map_size <= 64*MI_KiB || mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit(); - _mi_page_map = (mi_page_t***)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); + _mi_page_map = (_Atomic(mi_page_t**)*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); if (_mi_page_map==NULL) { _mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB); return false; @@ -236,11 +236,11 @@ void _mi_page_map_unsafe_destroy(void) { for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 // free all sub-maps if (mi_page_map_is_committed(idx, NULL)) { - mi_page_t** sub = _mi_page_map[idx]; + mi_page_t** sub = _mi_page_map_at(idx); if (sub != NULL) { mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), true, false, false); _mi_os_free(memid.mem.os.base, memid.mem.os.size, memid); - _mi_page_map[idx] = NULL; + mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL); } } } @@ -270,7 +270,7 @@ static mi_page_t** mi_page_map_ensure_committed(size_t idx) { _mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL); mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx); } - return _mi_page_map[idx]; + return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx); } static mi_page_t** mi_page_map_ensure_at(size_t idx) { @@ -288,7 +288,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) { if (!memid.initially_zero) { _mi_memzero_aligned(sub, submap_size); } - if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, ((_Atomic(mi_page_t**)*)&_mi_page_map[idx]), &expect, sub)) { + if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) { // another thread already allocated it.. free and continue _mi_os_free(sub, submap_size, memid); sub = expect;