use proper atomics for the page_map

This commit is contained in:
daanx 2025-05-20 21:04:17 -07:00
parent 20a0b1ffde
commit a7411ae9c5
2 changed files with 16 additions and 12 deletions

View file

@ -547,16 +547,16 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
// 2-level page map: // 2-level page map:
// double indirection, but low commit and low virtual reserve. // double indirection, but low commit and low virtual reserve.
// //
// the page-map is usually 4 MiB (for 48 bits virtual addresses) and points to sub maps of 64 KiB. // the page-map is usually 4 MiB (for 48 bit virtual addresses) and points to sub maps of 64 KiB.
// the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well) // the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well)
// one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space // one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space
// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 4 MiB size. // the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 2^22 bytes = 4 MiB reserved size.
#define MI_PAGE_MAP_SUB_SHIFT (13) #define MI_PAGE_MAP_SUB_SHIFT (13)
#define MI_PAGE_MAP_SUB_COUNT (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT) #define MI_PAGE_MAP_SUB_COUNT (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT)
#define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT) #define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT)
#define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT) #define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT)
extern mi_decl_hidden mi_page_t*** _mi_page_map; extern mi_decl_hidden _Atomic(mi_page_t**)* _mi_page_map;
static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) {
const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE); const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE);
@ -564,16 +564,20 @@ static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) {
return (u / MI_PAGE_MAP_SUB_COUNT); return (u / MI_PAGE_MAP_SUB_COUNT);
} }
static inline mi_page_t** _mi_page_map_at(size_t idx) {
return mi_atomic_load_ptr_relaxed(mi_page_t*, &_mi_page_map[idx]);
}
static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
size_t sub_idx; size_t sub_idx;
const size_t idx = _mi_page_map_index(p, &sub_idx); const size_t idx = _mi_page_map_index(p, &sub_idx);
return _mi_page_map[idx][sub_idx]; // NULL if p==NULL return (_mi_page_map_at(idx))[sub_idx]; // NULL if p==NULL
} }
static inline mi_page_t* _mi_checked_ptr_page(const void* p) { static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
size_t sub_idx; size_t sub_idx;
const size_t idx = _mi_page_map_index(p, &sub_idx); const size_t idx = _mi_page_map_index(p, &sub_idx);
mi_page_t** const sub = _mi_page_map[idx]; mi_page_t** const sub = _mi_page_map_at(idx);
if mi_unlikely(sub == NULL) return NULL; if mi_unlikely(sub == NULL) return NULL;
return sub[sub_idx]; return sub[sub_idx];
} }

View file

@ -78,7 +78,7 @@ void _mi_page_map_unsafe_destroy(void) {
_mi_page_map = NULL; _mi_page_map = NULL;
mi_page_map_commit = NULL; mi_page_map_commit = NULL;
mi_page_map_max_address = NULL; mi_page_map_max_address = NULL;
mi_page_map_memid = _mi_memid_none(); mi_page_map_memid = _mi_memid_none();
} }
@ -173,7 +173,7 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att
// A 2-level page map // A 2-level page map
#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) #define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*))
mi_decl_cache_align mi_page_t*** _mi_page_map; mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map;
static size_t mi_page_map_count; static size_t mi_page_map_count;
static void* mi_page_map_max_address; static void* mi_page_map_max_address;
static mi_memid_t mi_page_map_memid; static mi_memid_t mi_page_map_memid;
@ -203,7 +203,7 @@ bool _mi_page_map_init(void) {
const size_t reserve_size = page_map_size + os_page_size; const size_t reserve_size = page_map_size + os_page_size;
const bool commit = page_map_size <= 64*MI_KiB || const bool commit = page_map_size <= 64*MI_KiB ||
mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit(); mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit();
_mi_page_map = (mi_page_t***)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); _mi_page_map = (_Atomic(mi_page_t**)*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid);
if (_mi_page_map==NULL) { if (_mi_page_map==NULL) {
_mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB); _mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB);
return false; return false;
@ -236,11 +236,11 @@ void _mi_page_map_unsafe_destroy(void) {
for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0
// free all sub-maps // free all sub-maps
if (mi_page_map_is_committed(idx, NULL)) { if (mi_page_map_is_committed(idx, NULL)) {
mi_page_t** sub = _mi_page_map[idx]; mi_page_t** sub = _mi_page_map_at(idx);
if (sub != NULL) { if (sub != NULL) {
mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), true, false, false); mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), true, false, false);
_mi_os_free(memid.mem.os.base, memid.mem.os.size, memid); _mi_os_free(memid.mem.os.base, memid.mem.os.size, memid);
_mi_page_map[idx] = NULL; mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL);
} }
} }
} }
@ -270,7 +270,7 @@ static mi_page_t** mi_page_map_ensure_committed(size_t idx) {
_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL); _mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL);
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx); mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
} }
return _mi_page_map[idx]; return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx);
} }
static mi_page_t** mi_page_map_ensure_at(size_t idx) { static mi_page_t** mi_page_map_ensure_at(size_t idx) {
@ -288,7 +288,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) {
if (!memid.initially_zero) { if (!memid.initially_zero) {
_mi_memzero_aligned(sub, submap_size); _mi_memzero_aligned(sub, submap_size);
} }
if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, ((_Atomic(mi_page_t**)*)&_mi_page_map[idx]), &expect, sub)) { if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) {
// another thread already allocated it.. free and continue // another thread already allocated it.. free and continue
_mi_os_free(sub, submap_size, memid); _mi_os_free(sub, submap_size, memid);
sub = expect; sub = expect;