use proper atomics for the page_map

This commit is contained in:
daanx 2025-05-20 21:04:17 -07:00
parent 20a0b1ffde
commit a7411ae9c5
2 changed files with 16 additions and 12 deletions

View file

@ -78,7 +78,7 @@ void _mi_page_map_unsafe_destroy(void) {
_mi_page_map = NULL;
mi_page_map_commit = NULL;
mi_page_map_max_address = NULL;
mi_page_map_memid = _mi_memid_none();
mi_page_map_memid = _mi_memid_none();
}
@ -173,7 +173,7 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att
// A 2-level page map
#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*))
mi_decl_cache_align mi_page_t*** _mi_page_map;
mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map;
static size_t mi_page_map_count;
static void* mi_page_map_max_address;
static mi_memid_t mi_page_map_memid;
@ -203,7 +203,7 @@ bool _mi_page_map_init(void) {
const size_t reserve_size = page_map_size + os_page_size;
const bool commit = page_map_size <= 64*MI_KiB ||
mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit();
_mi_page_map = (mi_page_t***)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid);
_mi_page_map = (_Atomic(mi_page_t**)*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid);
if (_mi_page_map==NULL) {
_mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB);
return false;
@ -236,11 +236,11 @@ void _mi_page_map_unsafe_destroy(void) {
for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0
// free all sub-maps
if (mi_page_map_is_committed(idx, NULL)) {
mi_page_t** sub = _mi_page_map[idx];
mi_page_t** sub = _mi_page_map_at(idx);
if (sub != NULL) {
mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), true, false, false);
_mi_os_free(memid.mem.os.base, memid.mem.os.size, memid);
_mi_page_map[idx] = NULL;
mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL);
}
}
}
@ -270,7 +270,7 @@ static mi_page_t** mi_page_map_ensure_committed(size_t idx) {
_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL);
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
}
return _mi_page_map[idx];
return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx);
}
static mi_page_t** mi_page_map_ensure_at(size_t idx) {
@ -288,7 +288,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) {
if (!memid.initially_zero) {
_mi_memzero_aligned(sub, submap_size);
}
if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, ((_Atomic(mi_page_t**)*)&_mi_page_map[idx]), &expect, sub)) {
if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) {
// another thread already allocated it.. free and continue
_mi_os_free(sub, submap_size, memid);
sub = expect;