check commit success for pagemap extension so NULL can be returned instead of faulting (issue #1098)

This commit is contained in:
Daan 2025-06-17 18:05:12 -07:00
parent ac4e3fd177
commit a18b692262
4 changed files with 92 additions and 50 deletions

View file

@ -193,7 +193,7 @@ bool _mi_meta_is_meta_page(void* p);
// "page-map.c"
bool _mi_page_map_init(void);
void _mi_page_map_register(mi_page_t* page);
mi_decl_nodiscard bool _mi_page_map_register(mi_page_t* page);
void _mi_page_map_unregister(mi_page_t* page);
void _mi_page_map_unregister_range(void* start, size_t size);
mi_page_t* _mi_safe_ptr_page(const void* p);
@ -557,7 +557,9 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
#define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT)
#define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT)
extern mi_decl_hidden mi_page_t*** _mi_page_map;
typedef mi_page_t** mi_submap_t;
extern mi_decl_hidden mi_submap_t* _mi_page_map;
static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) {
const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE);
@ -574,7 +576,7 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
size_t sub_idx;
const size_t idx = _mi_page_map_index(p, &sub_idx);
mi_page_t** const sub = _mi_page_map[idx];
mi_submap_t const sub = _mi_page_map[idx];
if mi_unlikely(sub == NULL) return (mi_page_t*)&_mi_page_empty;
return sub[sub_idx];
}

View file

@ -688,7 +688,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si
commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE);
if (commit_size > page_noguard_size) { commit_size = page_noguard_size; }
bool is_zero;
if (!_mi_os_commit(page, commit_size, &is_zero)) {
if mi_unlikely(!_mi_os_commit(page, commit_size, &is_zero)) {
_mi_arenas_free( page, alloc_size, memid );
return NULL;
}
@ -714,7 +714,10 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si
mi_page_try_claim_ownership(page);
// register in the page map
_mi_page_map_register(page);
if mi_unlikely(!_mi_page_map_register(page)) {
_mi_arenas_free( page, alloc_size, memid );
return NULL;
}
// stats
mi_tld_stat_increase(tld, pages, 1);
@ -1845,12 +1848,12 @@ static bool mi_arena_page_register(size_t slice_index, size_t slice_count, mi_ar
mi_assert_internal(slice_count == 1);
mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index);
mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1));
_mi_page_map_register(page);
if (!_mi_page_map_register(page)) return false; // break
mi_assert_internal(_mi_ptr_page(page)==page);
return true;
}
static bool mi_arena_pages_reregister(mi_arena_t* arena) {
static mi_decl_nodiscard bool mi_arena_pages_reregister(mi_arena_t* arena) {
return _mi_bitmap_forall_set(arena->pages, &mi_arena_page_register, arena, NULL);
}
@ -1935,7 +1938,10 @@ mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* are
if (!mi_arenas_add(arena->subproc, arena, arena_id)) {
return false;
}
mi_arena_pages_reregister(arena);
if (!mi_arena_pages_reregister(arena)) {
// todo: clear arena entry in the subproc?
return false;
}
return true;
}

View file

@ -680,7 +680,7 @@ void mi_process_init(void) mi_attr_noexcept {
mi_detect_cpu_features();
_mi_os_init();
_mi_page_map_init();
_mi_page_map_init(); // this could fail.. should we abort in that case?
mi_heap_main_init();
mi_tld_main_init();
// the following two can potentially allocate (on freeBSD for locks and thread keys)

View file

@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "bitmap.h"
static void mi_page_map_cannot_commit(void) {
_mi_error_message(ENOMEM, "unable to commit the allocation page-map on-demand\n" );
_mi_warning_message("unable to commit the allocation page-map on-demand\n" );
}
#if MI_PAGE_MAP_FLAT
@ -32,7 +32,7 @@ static mi_memid_t mi_page_map_memid;
#define MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT MI_ARENA_SLICE_SIZE
static mi_bitmap_t* mi_page_map_commit; // one bit per committed 64 KiB entries
static void mi_page_map_ensure_committed(size_t idx, size_t slice_count);
static mi_decl_nodiscard bool mi_page_map_ensure_committed(size_t idx, size_t slice_count);
bool _mi_page_map_init(void) {
size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS);
@ -71,14 +71,17 @@ bool _mi_page_map_init(void) {
// commit the first part so NULL pointers get resolved without an access violation
if (!commit) {
mi_page_map_ensure_committed(0, 1);
if (!mi_page_map_ensure_committed(0, 1)) {
mi_page_map_cannot_commit();
return false;
}
}
_mi_page_map[0] = 1; // so _mi_ptr_page(NULL) == NULL
mi_assert_internal(_mi_ptr_page(NULL)==NULL);
return true;
}
static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
static bool mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
// is the page map area that contains the page address committed?
// we always set the commit bits so we can track what ranges are in-use.
// we only actually commit if the map wasn't committed fully already.
@ -93,7 +96,7 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT;
if (!_mi_os_commit(start, size, &is_zero)) {
mi_page_map_cannot_commit();
return;
return false;
}
if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); }
mi_bitmap_set(mi_page_map_commit, i);
@ -104,6 +107,7 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
_mi_page_map[idx] = 0;
_mi_page_map[idx+slice_count-1] = 0;
#endif
return true;
}
@ -115,25 +119,28 @@ static size_t mi_page_map_get_idx(mi_page_t* page, uint8_t** page_start, size_t*
return _mi_page_map_index(page);
}
void _mi_page_map_register(mi_page_t* page) {
bool _mi_page_map_register(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access!
if mi_unlikely(_mi_page_map == NULL) {
if (!_mi_page_map_init()) return;
if (!_mi_page_map_init()) return false;
}
mi_assert(_mi_page_map!=NULL);
uint8_t* page_start;
size_t slice_count;
const size_t idx = mi_page_map_get_idx(page, &page_start, &slice_count);
mi_page_map_ensure_committed(idx, slice_count);
if (!mi_page_map_ensure_committed(idx, slice_count)) {
return false;
}
// set the offsets
for (size_t i = 0; i < slice_count; i++) {
mi_assert_internal(i < 128);
_mi_page_map[idx + i] = (uint8_t)(i+1);
}
return true;
}
void _mi_page_map_unregister(mi_page_t* page) {
@ -149,7 +156,10 @@ void _mi_page_map_unregister(mi_page_t* page) {
void _mi_page_map_unregister_range(void* start, size_t size) {
const size_t slice_count = _mi_divide_up(size, MI_ARENA_SLICE_SIZE);
const uintptr_t index = _mi_page_map_index(start);
mi_page_map_ensure_committed(index, slice_count); // we commit the range in total; todo: scan the commit bits and clear only those ranges?
// todo: scan the commit bits and clear only those ranges?
if (!mi_page_map_ensure_committed(index, slice_count)) { // we commit the range in total;
return;
}
_mi_memzero(&_mi_page_map[index], slice_count);
}
@ -172,15 +182,15 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att
// A 2-level page map
#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*))
mi_decl_cache_align mi_page_t*** _mi_page_map;
mi_decl_cache_align mi_submap_t* _mi_page_map;
static void* mi_page_map_max_address;
static mi_memid_t mi_page_map_memid;
static _Atomic(mi_bfield_t) mi_page_map_commit;
static mi_page_t** mi_page_map_ensure_committed(size_t idx);
static mi_page_t** mi_page_map_ensure_at(size_t idx);
static inline void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count);
static mi_decl_nodiscard bool mi_page_map_ensure_committed(size_t idx, mi_submap_t* submap);
static mi_decl_nodiscard bool mi_page_map_ensure_at(size_t idx, mi_submap_t* submap);
static bool mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count);
bool _mi_page_map_init(void) {
size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS);
@ -243,52 +253,76 @@ static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) {
return ((commit & (MI_ZU(1) << bit_idx)) != 0);
}
static mi_page_t** mi_page_map_ensure_committed(size_t idx) {
static bool mi_page_map_ensure_committed(size_t idx, mi_submap_t* submap) {
mi_assert_internal(submap!=NULL && *submap==NULL);
size_t bit_idx;
if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) {
uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT];
if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL)) {
if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_submap_t), NULL)) {
mi_page_map_cannot_commit();
return NULL;
return false;
}
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
}
return _mi_page_map[idx];
*submap = _mi_page_map[idx];
return true;
}
static mi_page_t** mi_page_map_ensure_at(size_t idx) {
mi_page_t** sub = mi_page_map_ensure_committed(idx);
if mi_unlikely(sub == NULL) {
// sub map not yet allocated, alloc now
mi_memid_t memid;
sub = (mi_page_t**)_mi_os_alloc(MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), &memid);
mi_page_t** expect = NULL;
if (!mi_atomic_cas_strong_acq_rel(((_Atomic(mi_page_t**)*)&_mi_page_map[idx]), &expect, sub)) {
// another thread already allocated it.. free and continue
_mi_os_free(sub, MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*), memid);
sub = expect;
mi_assert_internal(sub!=NULL);
}
if (sub == NULL) {
_mi_error_message(EFAULT, "internal error: unable to extend the page map\n");
}
static bool mi_page_map_ensure_at(size_t idx, mi_submap_t* submap) {
mi_assert_internal(submap!=NULL && *submap==NULL);
mi_submap_t sub = NULL;
if (!mi_page_map_ensure_committed(idx,&sub)) {
return false;
}
return sub;
if mi_unlikely(sub==NULL) {
// sub map not yet allocated, allocate it now
mi_memid_t memid;
sub = (mi_submap_t)_mi_os_alloc(MI_PAGE_MAP_SUB_SIZE, &memid);
if (sub==NULL) {
_mi_warning_message("unable to extend the page map\n");
return false;
}
mi_page_t** expect = NULL;
if (!mi_atomic_cas_strong_acq_rel(((_Atomic(mi_submap_t)*)&_mi_page_map[idx]), &expect, sub)) {
// another thread already allocated it.. free and continue
_mi_os_free(sub, MI_PAGE_MAP_SUB_SIZE, memid);
sub = expect;
}
}
mi_assert_internal(sub!=NULL);
*submap = sub;
return true;
}
static void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) {
static bool mi_page_map_set_range_prim(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) {
// is the page map area that contains the page address committed?
while (slice_count > 0) {
mi_page_t** sub = mi_page_map_ensure_at(idx);
mi_submap_t sub = NULL;
if (!mi_page_map_ensure_at(idx, &sub)) {
return false;
};
mi_assert_internal(sub!=NULL);
// set the offsets for the page
while (sub_idx < MI_PAGE_MAP_SUB_COUNT) {
while (slice_count > 0 && sub_idx < MI_PAGE_MAP_SUB_COUNT) {
sub[sub_idx] = page;
slice_count--; if (slice_count == 0) return;
slice_count--;
sub_idx++;
}
idx++; // potentially wrap around to the next idx
sub_idx = 0;
}
return true;
}
static bool mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) {
if mi_unlikely(!mi_page_map_set_range_prim(page,idx,sub_idx,slice_count)) {
// failed to commit, call again to reset the page pointer if needed
if (page!=NULL) {
mi_page_map_set_range_prim(NULL,idx,sub_idx,slice_count);
}
return false;
}
return true;
}
static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slice_count) {
@ -299,18 +333,18 @@ static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slic
return _mi_page_map_index(page, sub_idx);
}
void _mi_page_map_register(mi_page_t* page) {
bool _mi_page_map_register(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access!
if mi_unlikely(_mi_page_map == NULL) {
if (!_mi_page_map_init()) return;
if (!_mi_page_map_init()) return false;
}
mi_assert(_mi_page_map!=NULL);
size_t slice_count;
size_t sub_idx;
const size_t idx = mi_page_map_get_idx(page, &sub_idx, &slice_count);
mi_page_map_set_range(page, idx, sub_idx, slice_count);
return mi_page_map_set_range(page, idx, sub_idx, slice_count);
}
void _mi_page_map_unregister(mi_page_t* page) {