diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 62387dc3..ce666162 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -238,7 +238,7 @@ bool _mi_meta_is_meta_page(void* p); // "page-map.c" bool _mi_page_map_init(void); -void _mi_page_map_register(mi_page_t* page); +mi_decl_nodiscard bool _mi_page_map_register(mi_page_t* page); void _mi_page_map_unregister(mi_page_t* page); void _mi_page_map_unregister_range(void* start, size_t size); mi_page_t* _mi_safe_ptr_page(const void* p); @@ -604,7 +604,8 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { #define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT) #define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT) -extern mi_decl_hidden _Atomic(mi_page_t**)* _mi_page_map; +typedef mi_page_t** mi_submap_t; +extern mi_decl_hidden _Atomic(mi_submap_t)* _mi_page_map; static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE); @@ -625,7 +626,7 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { static inline mi_page_t* _mi_checked_ptr_page(const void* p) { size_t sub_idx; const size_t idx = _mi_page_map_index(p, &sub_idx); - mi_page_t** const sub = _mi_page_map_at(idx); + mi_submap_t const sub = _mi_page_map_at(idx); if mi_unlikely(sub == NULL) return NULL; return sub[sub_idx]; } diff --git a/src/arena.c b/src/arena.c index b26f4442..c7869797 100644 --- a/src/arena.c +++ b/src/arena.c @@ -684,7 +684,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE); if (commit_size > page_noguard_size) { commit_size = page_noguard_size; } bool is_zero; - if (!mi_arena_commit( mi_memid_arena(memid), page, commit_size, &is_zero, 0)) { + if mi_unlikely(!mi_arena_commit( mi_memid_arena(memid), page, commit_size, &is_zero, 0)) { _mi_arenas_free(page, alloc_size, memid); return NULL; } @@ -710,7 +710,10 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si mi_page_try_claim_ownership(page); // register in the page map - _mi_page_map_register(page); + if mi_unlikely(!_mi_page_map_register(page)) { + _mi_arenas_free( page, alloc_size, memid ); + return NULL; + } // stats mi_tld_stat_increase(tld, pages, 1); @@ -1894,12 +1897,12 @@ static bool mi_arena_page_register(size_t slice_index, size_t slice_count, mi_ar mi_assert_internal(slice_count == 1); mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index); mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1)); - _mi_page_map_register(page); + if (!_mi_page_map_register(page)) return false; // break mi_assert_internal(_mi_ptr_page(page)==page); return true; } -static bool mi_arena_pages_reregister(mi_arena_t* arena) { +static mi_decl_nodiscard bool mi_arena_pages_reregister(mi_arena_t* arena) { return _mi_bitmap_forall_set(arena->pages, &mi_arena_page_register, arena, NULL); } @@ -1979,7 +1982,10 @@ mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_commit_fun_t* c if (!mi_arenas_add(arena->subproc, arena, arena_id)) { return false; } - mi_arena_pages_reregister(arena); + if (!mi_arena_pages_reregister(arena)) { + // todo: clear arena entry in the subproc? + return false; + } // adjust abandoned page count for (size_t bin = 0; bin < MI_BIN_COUNT; bin++) { diff --git a/src/init.c b/src/init.c index 73847a4b..52a8f11c 100644 --- a/src/init.c +++ b/src/init.c @@ -711,7 +711,7 @@ void mi_process_init(void) mi_attr_noexcept { mi_detect_cpu_features(); _mi_stats_init(); _mi_os_init(); - _mi_page_map_init(); + _mi_page_map_init(); // this could fail.. should we abort in that case? mi_heap_main_init(); mi_tld_main_init(); // the following two can potentially allocate (on freeBSD for locks and thread keys) diff --git a/src/page-map.c b/src/page-map.c index ce70495b..18a3a035 100644 --- a/src/page-map.c +++ b/src/page-map.c @@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file #include "bitmap.h" static void mi_page_map_cannot_commit(void) { - _mi_error_message(EFAULT,"unable to commit memory for the page address map\n"); + _mi_warning_message("unable to commit the allocation page-map on-demand\n" ); } #if MI_PAGE_MAP_FLAT @@ -32,7 +32,7 @@ static mi_memid_t mi_page_map_memid; #define MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT MI_ARENA_SLICE_SIZE static mi_bitmap_t* mi_page_map_commit; // one bit per committed 64 KiB entries -static void mi_page_map_ensure_committed(size_t idx, size_t slice_count); +static mi_decl_nodiscard bool mi_page_map_ensure_committed(size_t idx, size_t slice_count); bool _mi_page_map_init(void) { size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS); @@ -71,7 +71,10 @@ bool _mi_page_map_init(void) { // commit the first part so NULL pointers get resolved without an access violation if (!commit) { - mi_page_map_ensure_committed(0, 1); + if (!mi_page_map_ensure_committed(0, 1)) { + mi_page_map_cannot_commit(); + return false; + } } _mi_page_map[0] = 1; // so _mi_ptr_page(NULL) == NULL mi_assert_internal(_mi_ptr_page(NULL)==NULL); @@ -90,7 +93,7 @@ void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) { } -static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) { +static bool mi_page_map_ensure_committed(size_t idx, size_t slice_count) { // is the page map area that contains the page address committed? // we always set the commit bits so we can track what ranges are in-use. // we only actually commit if the map wasn't committed fully already. @@ -103,8 +106,11 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) { bool is_zero; uint8_t* const start = _mi_page_map + (i * MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT); const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT; - if (!_mi_os_commit(start, size, &is_zero)) return; - if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); } + if (!_mi_os_commit(start, size, &is_zero)) { + mi_page_map_cannot_commit(); + return false; + } + if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); } mi_bitmap_set(mi_page_map_commit, i); } } @@ -113,6 +119,7 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) { _mi_page_map[idx] = 0; _mi_page_map[idx+slice_count-1] = 0; #endif + return true; } @@ -124,25 +131,28 @@ static size_t mi_page_map_get_idx(mi_page_t* page, uint8_t** page_start, size_t* return _mi_page_map_index(page); } -void _mi_page_map_register(mi_page_t* page) { +bool _mi_page_map_register(mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access! if mi_unlikely(_mi_page_map == NULL) { - if (!_mi_page_map_init()) return; + if (!_mi_page_map_init()) return false; } mi_assert(_mi_page_map!=NULL); uint8_t* page_start; size_t slice_count; const size_t idx = mi_page_map_get_idx(page, &page_start, &slice_count); - mi_page_map_ensure_committed(idx, slice_count); + if (!mi_page_map_ensure_committed(idx, slice_count)) { + return false; + } // set the offsets for (size_t i = 0; i < slice_count; i++) { mi_assert_internal(i < 128); _mi_page_map[idx + i] = (uint8_t)(i+1); } + return true; } void _mi_page_map_unregister(mi_page_t* page) { @@ -158,7 +168,10 @@ void _mi_page_map_unregister(mi_page_t* page) { void _mi_page_map_unregister_range(void* start, size_t size) { const size_t slice_count = _mi_divide_up(size, MI_ARENA_SLICE_SIZE); const uintptr_t index = _mi_page_map_index(start); - mi_page_map_ensure_committed(index, slice_count); // we commit the range in total; todo: scan the commit bits and clear only those ranges? + // todo: scan the commit bits and clear only those ranges? + if (!mi_page_map_ensure_committed(index, slice_count)) { // we commit the range in total; + return; + } _mi_memzero(&_mi_page_map[index], slice_count); } @@ -179,9 +192,10 @@ mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_att #else // A 2-level page map -#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) +#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) +#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS) -mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map; +mi_decl_cache_align _Atomic(mi_submap_t)* _mi_page_map; static size_t mi_page_map_count; static void* mi_page_map_max_address; static mi_memid_t mi_page_map_memid; @@ -189,26 +203,30 @@ static mi_memid_t mi_page_map_memid; // divide the main map in 64 (`MI_BFIELD_BITS`) parts commit those parts on demand static _Atomic(mi_bfield_t) mi_page_map_commit; -#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS) +static mi_decl_nodiscard bool mi_page_map_ensure_committed(size_t idx, mi_submap_t* submap); +static mi_decl_nodiscard bool mi_page_map_ensure_submap_at(size_t idx, mi_submap_t* submap); +static bool mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count); static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) { mi_bfield_t commit = mi_atomic_load_relaxed(&mi_page_map_commit); - const size_t bit_idx = idx/MI_PAGE_MAP_ENTRIES_PER_CBIT; + const size_t bit_idx = idx/MI_PAGE_MAP_ENTRIES_PER_CBIT; mi_assert_internal(bit_idx < MI_BFIELD_BITS); if (pbit_idx != NULL) { *pbit_idx = bit_idx; } return ((commit & (MI_ZU(1) << bit_idx)) != 0); } -static mi_page_t** mi_page_map_ensure_committed(size_t idx) { +static bool mi_page_map_ensure_committed(size_t idx, mi_submap_t* submap) { + mi_assert_internal(submap!=NULL && *submap==NULL); size_t bit_idx; if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) { uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT]; - if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL)) { - return NULL; + if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_submap_t), NULL)) { + mi_page_map_cannot_commit(); + return false; } - mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx); } - return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx); + *submap = _mi_page_map[idx]; + return true; } // initialize the page map @@ -258,7 +276,11 @@ bool _mi_page_map_init(void) { if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL _mi_memzero_aligned(sub0, submap_size); } - mi_page_map_ensure_committed(0); + mi_submap_t nullsub = NULL; + if (!mi_page_map_ensure_committed(0,&nullsub)) { + mi_page_map_cannot_commit(); + return false; + } mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[0], sub0); mi_assert_internal(_mi_ptr_page(NULL)==NULL); @@ -290,41 +312,62 @@ void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) { } -static mi_page_t** mi_page_map_ensure_submap_at(size_t idx) { - mi_page_t** sub = mi_page_map_ensure_committed(idx); +static bool mi_page_map_ensure_submap_at(size_t idx, mi_submap_t* submap) { + mi_assert_internal(submap!=NULL && *submap==NULL); + mi_submap_t sub = NULL; + if (!mi_page_map_ensure_committed(idx, &sub)) { + return false; + } if mi_unlikely(sub == NULL) { // sub map not yet allocated, alloc now mi_memid_t memid; mi_page_t** expect = sub; const size_t submap_size = MI_PAGE_MAP_SUB_SIZE; - sub = (mi_page_t**)_mi_os_zalloc(submap_size, &memid); - if (sub == NULL) { - _mi_error_message(EFAULT, "internal error: unable to extend the page map\n"); - return NULL; + sub = (mi_submap_t)_mi_os_zalloc(submap_size, &memid); + if (sub==NULL) { + _mi_warning_message("internal error: unable to extend the page map\n"); + return false; } if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) { // another thread already allocated it.. free and continue _mi_os_free(sub, submap_size, memid); - sub = expect; - mi_assert_internal(sub!=NULL); + sub = expect; } - } - return sub; + } + mi_assert_internal(sub!=NULL); + *submap = sub; + return true; } -static void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) { +static bool mi_page_map_set_range_prim(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) { // is the page map area that contains the page address committed? while (slice_count > 0) { - mi_page_t** sub = mi_page_map_ensure_submap_at(idx); + mi_submap_t sub = NULL; + if (!mi_page_map_ensure_submap_at(idx, &sub)) { + return false; + }; + mi_assert_internal(sub!=NULL); // set the offsets for the page - while (sub_idx < MI_PAGE_MAP_SUB_COUNT) { + while (slice_count > 0 && sub_idx < MI_PAGE_MAP_SUB_COUNT) { sub[sub_idx] = page; - slice_count--; if (slice_count == 0) return; - sub_idx++; + slice_count--; + sub_idx++; } idx++; // potentially wrap around to the next idx sub_idx = 0; } + return true; +} + +static bool mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) { + if mi_unlikely(!mi_page_map_set_range_prim(page,idx,sub_idx,slice_count)) { + // failed to commit, call again to reset the page pointer if needed + if (page!=NULL) { + mi_page_map_set_range_prim(NULL,idx,sub_idx,slice_count); + } + return false; + } + return true; } static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slice_count) { @@ -335,18 +378,18 @@ static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slic return _mi_page_map_index(page, sub_idx); } -void _mi_page_map_register(mi_page_t* page) { +bool _mi_page_map_register(mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access! if mi_unlikely(_mi_page_map == NULL) { - if (!_mi_page_map_init()) return; + if (!_mi_page_map_init()) return false; } mi_assert(_mi_page_map!=NULL); size_t slice_count; size_t sub_idx; const size_t idx = mi_page_map_get_idx(page, &sub_idx, &slice_count); - mi_page_map_set_range(page, idx, sub_idx, slice_count); + return mi_page_map_set_range(page, idx, sub_idx, slice_count); } void _mi_page_map_unregister(mi_page_t* page) {