merge from dev; check all os_commit calls

This commit is contained in:
Daan 2025-06-03 15:26:41 -07:00
commit 488304053e
5 changed files with 64 additions and 29 deletions

View file

@ -145,13 +145,13 @@ bool _mi_os_has_virtual_reserve(void);
size_t _mi_os_virtual_address_bits(void);
bool _mi_os_reset(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, bool* is_zero);
bool _mi_os_decommit(void* addr, size_t size);
bool _mi_os_protect(void* addr, size_t size);
mi_decl_nodiscard bool _mi_os_commit(void* p, size_t size, bool* is_zero);
mi_decl_nodiscard bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size);
mi_decl_nodiscard bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size, mi_commit_fun_t* commit_fun, void* commit_fun_arg);
bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size);
size_t _mi_os_secure_guard_page_size(void);
bool _mi_os_secure_guard_page_set_at(void* addr, mi_memid_t memid);
@ -216,7 +216,7 @@ void _mi_deferred_free(mi_heap_t* heap, bool force);
void _mi_page_free_collect(mi_page_t* page, bool force);
void _mi_page_free_collect_partly(mi_page_t* page, mi_block_t* head);
void _mi_page_init(mi_heap_t* heap, mi_page_t* page);
mi_decl_nodiscard bool _mi_page_init(mi_heap_t* heap, mi_page_t* page);
bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq);
size_t _mi_page_bin(const mi_page_t* page); // for stats

View file

@ -711,6 +711,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si
mi_assert_internal(mi_page_block_size(page) == block_size);
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(mi_page_is_owned(page));
return page;
}
@ -730,13 +731,15 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou
const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages
(commit_on_demand == 2 && _mi_os_has_overcommit()) || (commit_on_demand == 0));
page = mi_arenas_page_alloc_fresh(slice_count, block_size, 1, req_arena, heap->numa_node, commit, tld);
if (page != NULL) {
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
_mi_page_init(heap, page);
return page;
}
if (page == NULL) return NULL;
return NULL;
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
if (!_mi_page_init(heap, page)) {
_mi_arenas_free( page, mi_page_full_size(page), page->memid);
return NULL;
}
return page;
}
// Allocate a page containing one block (very large, or with large alignment)
@ -755,7 +758,10 @@ static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_s
if (page == NULL) return NULL;
mi_assert(page->reserved == 1);
_mi_page_init(heap, page);
if (!_mi_page_init(heap, page)) {
_mi_arenas_free( page, mi_page_full_size(page), page->memid);
return NULL;
}
return page;
}
@ -1204,11 +1210,16 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s
size_t commit_size = mi_size_of_slices(info_slices);
// leave a guard OS page decommitted at the end?
if (!memid.is_pinned) { commit_size -= _mi_os_secure_guard_page_size(); }
bool ok = false;
if (commit_fun != NULL) {
(*commit_fun)(true /* commit */, arena, commit_size, NULL, commit_fun_arg);
ok = (*commit_fun)(true /* commit */, arena, commit_size, NULL, commit_fun_arg);
}
else {
_mi_os_commit(arena, commit_size, NULL);
ok = _mi_os_commit(arena, commit_size, NULL);
}
if (!ok) {
_mi_warning_message("unable to commit meta-data for OS memory");
return false;
}
}
else if (!memid.is_pinned) {

View file

@ -186,7 +186,7 @@ static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) {
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
if (mi_memkind_is_os(memid.memkind)) {
size_t csize = memid.mem.os.size;
if (csize==0) { _mi_os_good_alloc_size(size); }
if (csize==0) { csize = _mi_os_good_alloc_size(size); }
size_t commit_size = (still_committed ? csize : 0);
void* base = addr;
// different base? (due to alignment)
@ -309,7 +309,10 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
// explicitly commit only the aligned part
if (commit) {
_mi_os_commit(p, size, NULL);
if (!_mi_os_commit(p, size, NULL)) {
mi_os_prim_free(*base, over_size, 0);
return NULL;
}
}
}
else { // mmap can free inside an allocation

View file

@ -9,6 +9,10 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc/internal.h"
#include "bitmap.h"
static void mi_page_map_cannot_commit(void) {
_mi_error_message(EFAULT,"unable to commit memory for the page address map\n");
}
#if MI_PAGE_MAP_FLAT
// The page-map contains a byte for each 64kb slice in the address space.
@ -57,7 +61,10 @@ bool _mi_page_map_init(void) {
}
if (bitmap_size > 0) {
mi_page_map_commit = (mi_bitmap_t*)base;
_mi_os_commit(mi_page_map_commit, bitmap_size, NULL);
if (!_mi_os_commit(mi_page_map_commit, bitmap_size, NULL)) {
mi_page_map_cannot_commit();
return false;
}
mi_bitmap_init(mi_page_map_commit, commit_bits, true);
}
_mi_page_map = base + bitmap_size;
@ -95,7 +102,7 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
bool is_zero;
uint8_t* const start = _mi_page_map + (i * MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT);
const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT;
_mi_os_commit(start, size, &is_zero);
if (!_mi_os_commit(start, size, &is_zero)) return;
if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); }
mi_bitmap_set(mi_page_map_commit, i);
}
@ -221,11 +228,17 @@ bool _mi_page_map_init(void) {
// note: for the NULL range we only commit one OS page (in the map and sub)
if (!mi_page_map_memid.initially_committed) {
_mi_os_commit(&_mi_page_map[0], os_page_size, NULL); // commit first part of the map
if (!_mi_os_commit(&_mi_page_map[0], os_page_size, NULL)) { // commit first part of the map
mi_page_map_cannot_commit();
return false;
}
}
_mi_page_map[0] = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved a submap part at the end already
if (!mi_page_map_memid.initially_committed) {
_mi_os_commit(_mi_page_map[0], submap_size, NULL); // commit full submap (issue #1087)
if (!_mi_os_commit(_mi_page_map[0], submap_size, NULL)) { // commit full submap (issue #1087)
mi_page_map_cannot_commit();
return false;
}
}
if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL
_mi_memzero_aligned(_mi_page_map[0], submap_size);
@ -272,7 +285,9 @@ static mi_page_t** mi_page_map_ensure_committed(size_t idx) {
size_t bit_idx;
if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) {
uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT];
_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL);
if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL)) {
return NULL;
}
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
}
return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx);

View file

@ -37,7 +37,7 @@ static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_sta
}
//static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page);
static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page);
#if (MI_DEBUG>=3)
static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
@ -605,14 +605,14 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
// Note: we also experimented with "bump" allocation on the first
// allocations but this did not speed up any benchmark (due to an
// extra test in malloc? or cache effects?)
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
#if (MI_SECURE<3)
mi_assert(page->free == NULL);
mi_assert(page->local_free == NULL);
if (page->free != NULL) return;
if (page->free != NULL) return true;
#endif
if (page->capacity >= page->reserved) return;
if (page->capacity >= page->reserved) return true;
size_t page_size;
//uint8_t* page_start =
@ -645,7 +645,9 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
const size_t needed_commit = _mi_align_up( mi_page_slice_offset_of(page, needed_size), MI_PAGE_MIN_COMMIT_SIZE );
if (needed_commit > page->slice_committed) {
mi_assert_internal(((needed_commit - page->slice_committed) % _mi_os_page_size()) == 0);
_mi_os_commit(mi_page_slice_start(page) + page->slice_committed, needed_commit - page->slice_committed, NULL);
if (!_mi_os_commit(mi_page_slice_start(page) + page->slice_committed, needed_commit - page->slice_committed, NULL)) {
return false;
}
page->slice_committed = needed_commit;
}
}
@ -663,10 +665,11 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
mi_heap_stat_increase(heap, page_committed, extend * bsize);
#endif
mi_assert_expensive(mi_page_is_valid_init(page));
return true;
}
// Initialize a fresh page (that is already partially initialized)
void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
mi_decl_nodiscard bool _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
mi_assert(page != NULL);
mi_page_set_heap(page, heap);
@ -703,8 +706,9 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
// initialize an initial free list
mi_page_extend_free(heap,page);
if (!mi_page_extend_free(heap,page)) return false;
mi_assert(mi_page_immediate_available(page));
return true;
}
@ -794,9 +798,11 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
if (page != NULL) {
if (!mi_page_immediate_available(page)) {
mi_assert_internal(mi_page_is_expandable(page));
mi_page_extend_free(heap, page);
if (!mi_page_extend_free(heap, page)) {
page = NULL; // failed to extend
}
}
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
}
if (page == NULL) {