check commit success for pagemap extension so NULL can be returned instead of faulting (issue #1098)

This commit is contained in:
Daan 2025-06-17 18:05:12 -07:00
parent 878627072b
commit d8321f6d66
4 changed files with 97 additions and 47 deletions

View file

@ -238,7 +238,7 @@ bool _mi_meta_is_meta_page(void* p);
// "page-map.c" // "page-map.c"
bool _mi_page_map_init(void); bool _mi_page_map_init(void);
void _mi_page_map_register(mi_page_t* page); mi_decl_nodiscard bool _mi_page_map_register(mi_page_t* page);
void _mi_page_map_unregister(mi_page_t* page); void _mi_page_map_unregister(mi_page_t* page);
void _mi_page_map_unregister_range(void* start, size_t size); void _mi_page_map_unregister_range(void* start, size_t size);
mi_page_t* _mi_safe_ptr_page(const void* p); mi_page_t* _mi_safe_ptr_page(const void* p);
@ -604,7 +604,8 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
#define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT) #define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT)
#define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT) #define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT)
extern mi_decl_hidden _Atomic(mi_page_t**)* _mi_page_map; typedef mi_page_t** mi_submap_t;
extern mi_decl_hidden _Atomic(mi_submap_t)* _mi_page_map;
static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) {
const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE); const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE);
@ -625,7 +626,7 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
static inline mi_page_t* _mi_checked_ptr_page(const void* p) { static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
size_t sub_idx; size_t sub_idx;
const size_t idx = _mi_page_map_index(p, &sub_idx); const size_t idx = _mi_page_map_index(p, &sub_idx);
mi_page_t** const sub = _mi_page_map_at(idx); mi_submap_t const sub = _mi_page_map_at(idx);
if mi_unlikely(sub == NULL) return NULL; if mi_unlikely(sub == NULL) return NULL;
return sub[sub_idx]; return sub[sub_idx];
} }

View file

@ -684,7 +684,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si
commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE); commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE);
if (commit_size > page_noguard_size) { commit_size = page_noguard_size; } if (commit_size > page_noguard_size) { commit_size = page_noguard_size; }
bool is_zero; bool is_zero;
if (!mi_arena_commit( mi_memid_arena(memid), page, commit_size, &is_zero, 0)) { if mi_unlikely(!mi_arena_commit( mi_memid_arena(memid), page, commit_size, &is_zero, 0)) {
_mi_arenas_free(page, alloc_size, memid); _mi_arenas_free(page, alloc_size, memid);
return NULL; return NULL;
} }
@ -710,7 +710,10 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si
mi_page_try_claim_ownership(page); mi_page_try_claim_ownership(page);
// register in the page map // register in the page map
_mi_page_map_register(page); if mi_unlikely(!_mi_page_map_register(page)) {
_mi_arenas_free( page, alloc_size, memid );
return NULL;
}
// stats // stats
mi_tld_stat_increase(tld, pages, 1); mi_tld_stat_increase(tld, pages, 1);
@ -1894,12 +1897,12 @@ static bool mi_arena_page_register(size_t slice_index, size_t slice_count, mi_ar
mi_assert_internal(slice_count == 1); mi_assert_internal(slice_count == 1);
mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index); mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index);
mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1)); mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1));
_mi_page_map_register(page); if (!_mi_page_map_register(page)) return false; // break
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
return true; return true;
} }
static bool mi_arena_pages_reregister(mi_arena_t* arena) { static mi_decl_nodiscard bool mi_arena_pages_reregister(mi_arena_t* arena) {
return _mi_bitmap_forall_set(arena->pages, &mi_arena_page_register, arena, NULL); return _mi_bitmap_forall_set(arena->pages, &mi_arena_page_register, arena, NULL);
} }
@ -1979,7 +1982,10 @@ mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_commit_fun_t* c
if (!mi_arenas_add(arena->subproc, arena, arena_id)) { if (!mi_arenas_add(arena->subproc, arena, arena_id)) {
return false; return false;
} }
mi_arena_pages_reregister(arena); if (!mi_arena_pages_reregister(arena)) {
// todo: clear arena entry in the subproc?
return false;
}
// adjust abandoned page count // adjust abandoned page count
for (size_t bin = 0; bin < MI_BIN_COUNT; bin++) { for (size_t bin = 0; bin < MI_BIN_COUNT; bin++) {

View file

@ -711,7 +711,7 @@ void mi_process_init(void) mi_attr_noexcept {
mi_detect_cpu_features(); mi_detect_cpu_features();
_mi_stats_init(); _mi_stats_init();
_mi_os_init(); _mi_os_init();
_mi_page_map_init(); _mi_page_map_init(); // this could fail.. should we abort in that case?
mi_heap_main_init(); mi_heap_main_init();
mi_tld_main_init(); mi_tld_main_init();
// the following two can potentially allocate (on freeBSD for locks and thread keys) // the following two can potentially allocate (on freeBSD for locks and thread keys)

View file

@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "bitmap.h" #include "bitmap.h"
static void mi_page_map_cannot_commit(void) { static void mi_page_map_cannot_commit(void) {
_mi_error_message(EFAULT,"unable to commit memory for the page address map\n"); _mi_warning_message("unable to commit the allocation page-map on-demand\n" );
} }
#if MI_PAGE_MAP_FLAT #if MI_PAGE_MAP_FLAT
@ -32,7 +32,7 @@ static mi_memid_t mi_page_map_memid;
#define MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT MI_ARENA_SLICE_SIZE #define MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT MI_ARENA_SLICE_SIZE
static mi_bitmap_t* mi_page_map_commit; // one bit per committed 64 KiB entries static mi_bitmap_t* mi_page_map_commit; // one bit per committed 64 KiB entries
static void mi_page_map_ensure_committed(size_t idx, size_t slice_count); static mi_decl_nodiscard bool mi_page_map_ensure_committed(size_t idx, size_t slice_count);
bool _mi_page_map_init(void) { bool _mi_page_map_init(void) {
size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS); size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS);
@ -71,7 +71,10 @@ bool _mi_page_map_init(void) {
// commit the first part so NULL pointers get resolved without an access violation // commit the first part so NULL pointers get resolved without an access violation
if (!commit) { if (!commit) {
mi_page_map_ensure_committed(0, 1); if (!mi_page_map_ensure_committed(0, 1)) {
mi_page_map_cannot_commit();
return false;
}
} }
_mi_page_map[0] = 1; // so _mi_ptr_page(NULL) == NULL _mi_page_map[0] = 1; // so _mi_ptr_page(NULL) == NULL
mi_assert_internal(_mi_ptr_page(NULL)==NULL); mi_assert_internal(_mi_ptr_page(NULL)==NULL);
@ -90,7 +93,7 @@ void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) {
} }
static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) { static bool mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
// is the page map area that contains the page address committed? // is the page map area that contains the page address committed?
// we always set the commit bits so we can track what ranges are in-use. // we always set the commit bits so we can track what ranges are in-use.
// we only actually commit if the map wasn't committed fully already. // we only actually commit if the map wasn't committed fully already.
@ -103,7 +106,10 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
bool is_zero; bool is_zero;
uint8_t* const start = _mi_page_map + (i * MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT); uint8_t* const start = _mi_page_map + (i * MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT);
const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT; const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT;
if (!_mi_os_commit(start, size, &is_zero)) return; if (!_mi_os_commit(start, size, &is_zero)) {
mi_page_map_cannot_commit();
return false;
}
if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); } if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); }
mi_bitmap_set(mi_page_map_commit, i); mi_bitmap_set(mi_page_map_commit, i);
} }
@ -113,6 +119,7 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
_mi_page_map[idx] = 0; _mi_page_map[idx] = 0;
_mi_page_map[idx+slice_count-1] = 0; _mi_page_map[idx+slice_count-1] = 0;
#endif #endif
return true;
} }
@ -124,25 +131,28 @@ static size_t mi_page_map_get_idx(mi_page_t* page, uint8_t** page_start, size_t*
return _mi_page_map_index(page); return _mi_page_map_index(page);
} }
void _mi_page_map_register(mi_page_t* page) { bool _mi_page_map_register(mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access! mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access!
if mi_unlikely(_mi_page_map == NULL) { if mi_unlikely(_mi_page_map == NULL) {
if (!_mi_page_map_init()) return; if (!_mi_page_map_init()) return false;
} }
mi_assert(_mi_page_map!=NULL); mi_assert(_mi_page_map!=NULL);
uint8_t* page_start; uint8_t* page_start;
size_t slice_count; size_t slice_count;
const size_t idx = mi_page_map_get_idx(page, &page_start, &slice_count); const size_t idx = mi_page_map_get_idx(page, &page_start, &slice_count);
mi_page_map_ensure_committed(idx, slice_count); if (!mi_page_map_ensure_committed(idx, slice_count)) {
return false;
}
// set the offsets // set the offsets
for (size_t i = 0; i < slice_count; i++) { for (size_t i = 0; i < slice_count; i++) {
mi_assert_internal(i < 128); mi_assert_internal(i < 128);
_mi_page_map[idx + i] = (uint8_t)(i+1); _mi_page_map[idx + i] = (uint8_t)(i+1);
} }
return true;
} }
void _mi_page_map_unregister(mi_page_t* page) { void _mi_page_map_unregister(mi_page_t* page) {
@ -158,7 +168,10 @@ void _mi_page_map_unregister(mi_page_t* page) {
void _mi_page_map_unregister_range(void* start, size_t size) { void _mi_page_map_unregister_range(void* start, size_t size) {
const size_t slice_count = _mi_divide_up(size, MI_ARENA_SLICE_SIZE); const size_t slice_count = _mi_divide_up(size, MI_ARENA_SLICE_SIZE);
const uintptr_t index = _mi_page_map_index(start); const uintptr_t index = _mi_page_map_index(start);
mi_page_map_ensure_committed(index, slice_count); // we commit the range in total; todo: scan the commit bits and clear only those ranges? // todo: scan the commit bits and clear only those ranges?
if (!mi_page_map_ensure_committed(index, slice_count)) { // we commit the range in total;
return;
}
_mi_memzero(&_mi_page_map[index], slice_count); _mi_memzero(&_mi_page_map[index], slice_count);
} }
@ -180,8 +193,9 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att
// A 2-level page map // A 2-level page map
#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) #define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*))
#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS)
mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map; mi_decl_cache_align _Atomic(mi_submap_t)* _mi_page_map;
static size_t mi_page_map_count; static size_t mi_page_map_count;
static void* mi_page_map_max_address; static void* mi_page_map_max_address;
static mi_memid_t mi_page_map_memid; static mi_memid_t mi_page_map_memid;
@ -189,7 +203,9 @@ static mi_memid_t mi_page_map_memid;
// divide the main map in 64 (`MI_BFIELD_BITS`) parts commit those parts on demand // divide the main map in 64 (`MI_BFIELD_BITS`) parts commit those parts on demand
static _Atomic(mi_bfield_t) mi_page_map_commit; static _Atomic(mi_bfield_t) mi_page_map_commit;
#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS) static mi_decl_nodiscard bool mi_page_map_ensure_committed(size_t idx, mi_submap_t* submap);
static mi_decl_nodiscard bool mi_page_map_ensure_submap_at(size_t idx, mi_submap_t* submap);
static bool mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count);
static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) { static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) {
mi_bfield_t commit = mi_atomic_load_relaxed(&mi_page_map_commit); mi_bfield_t commit = mi_atomic_load_relaxed(&mi_page_map_commit);
@ -199,16 +215,18 @@ static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) {
return ((commit & (MI_ZU(1) << bit_idx)) != 0); return ((commit & (MI_ZU(1) << bit_idx)) != 0);
} }
static mi_page_t** mi_page_map_ensure_committed(size_t idx) { static bool mi_page_map_ensure_committed(size_t idx, mi_submap_t* submap) {
mi_assert_internal(submap!=NULL && *submap==NULL);
size_t bit_idx; size_t bit_idx;
if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) { if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) {
uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT]; uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT];
if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL)) { if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_submap_t), NULL)) {
return NULL; mi_page_map_cannot_commit();
return false;
} }
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
} }
return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx); *submap = _mi_page_map[idx];
return true;
} }
// initialize the page map // initialize the page map
@ -258,7 +276,11 @@ bool _mi_page_map_init(void) {
if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL
_mi_memzero_aligned(sub0, submap_size); _mi_memzero_aligned(sub0, submap_size);
} }
mi_page_map_ensure_committed(0); mi_submap_t nullsub = NULL;
if (!mi_page_map_ensure_committed(0,&nullsub)) {
mi_page_map_cannot_commit();
return false;
}
mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[0], sub0); mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[0], sub0);
mi_assert_internal(_mi_ptr_page(NULL)==NULL); mi_assert_internal(_mi_ptr_page(NULL)==NULL);
@ -290,41 +312,62 @@ void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) {
} }
static mi_page_t** mi_page_map_ensure_submap_at(size_t idx) { static bool mi_page_map_ensure_submap_at(size_t idx, mi_submap_t* submap) {
mi_page_t** sub = mi_page_map_ensure_committed(idx); mi_assert_internal(submap!=NULL && *submap==NULL);
mi_submap_t sub = NULL;
if (!mi_page_map_ensure_committed(idx, &sub)) {
return false;
}
if mi_unlikely(sub == NULL) { if mi_unlikely(sub == NULL) {
// sub map not yet allocated, alloc now // sub map not yet allocated, alloc now
mi_memid_t memid; mi_memid_t memid;
mi_page_t** expect = sub; mi_page_t** expect = sub;
const size_t submap_size = MI_PAGE_MAP_SUB_SIZE; const size_t submap_size = MI_PAGE_MAP_SUB_SIZE;
sub = (mi_page_t**)_mi_os_zalloc(submap_size, &memid); sub = (mi_submap_t)_mi_os_zalloc(submap_size, &memid);
if (sub == NULL) { if (sub==NULL) {
_mi_error_message(EFAULT, "internal error: unable to extend the page map\n"); _mi_warning_message("internal error: unable to extend the page map\n");
return NULL; return false;
} }
if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) { if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) {
// another thread already allocated it.. free and continue // another thread already allocated it.. free and continue
_mi_os_free(sub, submap_size, memid); _mi_os_free(sub, submap_size, memid);
sub = expect; sub = expect;
}
}
mi_assert_internal(sub!=NULL); mi_assert_internal(sub!=NULL);
} *submap = sub;
} return true;
return sub;
} }
static void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) { static bool mi_page_map_set_range_prim(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) {
// is the page map area that contains the page address committed? // is the page map area that contains the page address committed?
while (slice_count > 0) { while (slice_count > 0) {
mi_page_t** sub = mi_page_map_ensure_submap_at(idx); mi_submap_t sub = NULL;
if (!mi_page_map_ensure_submap_at(idx, &sub)) {
return false;
};
mi_assert_internal(sub!=NULL);
// set the offsets for the page // set the offsets for the page
while (sub_idx < MI_PAGE_MAP_SUB_COUNT) { while (slice_count > 0 && sub_idx < MI_PAGE_MAP_SUB_COUNT) {
sub[sub_idx] = page; sub[sub_idx] = page;
slice_count--; if (slice_count == 0) return; slice_count--;
sub_idx++; sub_idx++;
} }
idx++; // potentially wrap around to the next idx idx++; // potentially wrap around to the next idx
sub_idx = 0; sub_idx = 0;
} }
return true;
}
static bool mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) {
if mi_unlikely(!mi_page_map_set_range_prim(page,idx,sub_idx,slice_count)) {
// failed to commit, call again to reset the page pointer if needed
if (page!=NULL) {
mi_page_map_set_range_prim(NULL,idx,sub_idx,slice_count);
}
return false;
}
return true;
} }
static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slice_count) { static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slice_count) {
@ -335,18 +378,18 @@ static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slic
return _mi_page_map_index(page, sub_idx); return _mi_page_map_index(page, sub_idx);
} }
void _mi_page_map_register(mi_page_t* page) { bool _mi_page_map_register(mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access! mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access!
if mi_unlikely(_mi_page_map == NULL) { if mi_unlikely(_mi_page_map == NULL) {
if (!_mi_page_map_init()) return; if (!_mi_page_map_init()) return false;
} }
mi_assert(_mi_page_map!=NULL); mi_assert(_mi_page_map!=NULL);
size_t slice_count; size_t slice_count;
size_t sub_idx; size_t sub_idx;
const size_t idx = mi_page_map_get_idx(page, &sub_idx, &slice_count); const size_t idx = mi_page_map_get_idx(page, &sub_idx, &slice_count);
mi_page_map_set_range(page, idx, sub_idx, slice_count); return mi_page_map_set_range(page, idx, sub_idx, slice_count);
} }
void _mi_page_map_unregister(mi_page_t* page) { void _mi_page_map_unregister(mi_page_t* page) {