From 5c0effd421a0a6c13e3dea70df70b7784debb76f Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 11:36:07 -0700 Subject: [PATCH 1/7] fix missing csize assignment in _mi_os_free_ex --- src/os.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/os.c b/src/os.c index 8d8824f6..b1f60596 100644 --- a/src/os.c +++ b/src/os.c @@ -165,7 +165,7 @@ static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) { void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) { if (mi_memkind_is_os(memid.memkind)) { size_t csize = memid.mem.os.size; - if (csize==0) { _mi_os_good_alloc_size(size); } + if (csize==0) { csize = _mi_os_good_alloc_size(size); } size_t commit_size = (still_committed ? csize : 0); void* base = addr; // different base? (due to alignment) From 9232c5c8a0111c4e104e85201a0fdaf453b54834 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 12:20:02 -0700 Subject: [PATCH 2/7] check all _mi_os_commit calls and return NULL if failing to commit --- include/mimalloc/internal.h | 8 ++++---- src/arena.c | 20 ++++++++++++++++---- src/os.c | 5 ++++- src/page-map.c | 29 ++++++++++++++++++++++++----- src/page.c | 28 ++++++++++++++++++---------- 5 files changed, 66 insertions(+), 24 deletions(-) diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 4dade9dd..8743cbf3 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -144,13 +144,13 @@ bool _mi_os_has_virtual_reserve(void); size_t _mi_os_virtual_address_bits(void); bool _mi_os_reset(void* addr, size_t size); -bool _mi_os_commit(void* p, size_t size, bool* is_zero); +mi_decl_nodiscard bool _mi_os_commit(void* p, size_t size, bool* is_zero); bool _mi_os_decommit(void* addr, size_t size); -bool _mi_os_protect(void* addr, size_t size); +mi_decl_nodiscard bool _mi_os_protect(void* addr, size_t size); bool _mi_os_unprotect(void* addr, size_t size); bool _mi_os_purge(void* p, size_t size); bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size); -bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size); +mi_decl_nodiscard bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size); size_t _mi_os_secure_guard_page_size(void); bool _mi_os_secure_guard_page_set_at(void* addr, bool is_pinned); @@ -212,7 +212,7 @@ void _mi_deferred_free(mi_heap_t* heap, bool force); void _mi_page_free_collect(mi_page_t* page, bool force); void _mi_page_free_collect_partly(mi_page_t* page, mi_block_t* head); -void _mi_page_init(mi_heap_t* heap, mi_page_t* page); +mi_decl_nodiscard bool _mi_page_init(mi_heap_t* heap, mi_page_t* page); bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq); size_t _mi_page_bin(const mi_page_t* page); // for stats diff --git a/src/arena.c b/src/arena.c index e040f220..225bff17 100644 --- a/src/arena.c +++ b/src/arena.c @@ -686,7 +686,10 @@ static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_si commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE); if (commit_size > page_noguard_size) { commit_size = page_noguard_size; } bool is_zero; - _mi_os_commit(page, commit_size, &is_zero); + if (!_mi_os_commit(page, commit_size, &is_zero)) { + _mi_arenas_free( page, alloc_size, memid ); + return NULL; + } if (!memid.initially_zero && !is_zero) { _mi_memzero_aligned(page, commit_size); } @@ -741,7 +744,10 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou page = mi_arenas_page_alloc_fresh(slice_count, block_size, 1, req_arena, commit, tld); if (page != NULL) { mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count); - _mi_page_init(heap, page); + if (!_mi_page_init(heap, page)) { + _mi_arenas_free( page, mi_page_full_size(page), page->memid ); + return NULL; + } return page; } @@ -764,7 +770,10 @@ static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_s if (page == NULL) return NULL; mi_assert(page->reserved == 1); - _mi_page_init(heap, page); + if (!_mi_page_init(heap, page)) { + _mi_arenas_free(page, mi_page_full_size(page), page->memid); + return NULL; + } return page; } @@ -1216,7 +1225,10 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s // commit & zero if needed if (!memid.initially_committed) { // leave a guard OS page decommitted at the end - _mi_os_commit(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size(), NULL); + if (!_mi_os_commit(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size(), NULL)) { + _mi_warning_message("unable to commit meta data for provided OS memory"); + return false; + } } else { // if MI_SECURE, set a guard page at the end diff --git a/src/os.c b/src/os.c index b1f60596..82832223 100644 --- a/src/os.c +++ b/src/os.c @@ -288,7 +288,10 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit // explicitly commit only the aligned part if (commit) { - _mi_os_commit(p, size, NULL); + if (!_mi_os_commit(p, size, NULL)) { + mi_os_prim_free(p, over_size, 0); + return NULL; + } } } else { // mmap can free inside an allocation diff --git a/src/page-map.c b/src/page-map.c index 44d6de4a..77261738 100644 --- a/src/page-map.c +++ b/src/page-map.c @@ -9,6 +9,10 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc/internal.h" #include "bitmap.h" +static void mi_page_map_cannot_commit(void) { + _mi_error_message(ENOMEM, "unable to commit the allocation page-map on-demand\n" ); +} + #if MI_PAGE_MAP_FLAT // The page-map contains a byte for each 64kb slice in the address space. @@ -57,7 +61,10 @@ bool _mi_page_map_init(void) { } if (bitmap_size > 0) { mi_page_map_commit = (mi_bitmap_t*)base; - _mi_os_commit(mi_page_map_commit, bitmap_size, NULL); + if (!_mi_os_commit(mi_page_map_commit, bitmap_size, NULL)) { + mi_page_map_cannot_commit(); + return false; + } mi_bitmap_init(mi_page_map_commit, commit_bits, true); } _mi_page_map = base + bitmap_size; @@ -84,7 +91,10 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) { bool is_zero; uint8_t* const start = _mi_page_map + (i * MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT); const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT; - _mi_os_commit(start, size, &is_zero); + if (!_mi_os_commit(start, size, &is_zero)) { + mi_page_map_cannot_commit(); + return; + } if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); } mi_bitmap_set(mi_page_map_commit, i); } @@ -204,11 +214,17 @@ bool _mi_page_map_init(void) { // note: for the NULL range we only commit one OS page (in the map and sub) if (!mi_page_map_memid.initially_committed) { - _mi_os_commit(&_mi_page_map[0], os_page_size, NULL); // commit first part of the map + if (!_mi_os_commit(&_mi_page_map[0], os_page_size, NULL)) { // commit first part of the map + mi_page_map_cannot_commit(); + return false; + } } _mi_page_map[0] = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved 2 sub maps at the end already if (!mi_page_map_memid.initially_committed) { - _mi_os_commit(_mi_page_map[0], os_page_size, NULL); // only first OS page + if (!_mi_os_commit(_mi_page_map[0], os_page_size, NULL)) { // only first OS page + mi_page_map_cannot_commit(); + return false; + } } _mi_page_map[0][0] = (mi_page_t*)&_mi_page_empty; // caught in `mi_free` @@ -231,7 +247,10 @@ static mi_page_t** mi_page_map_ensure_committed(size_t idx) { size_t bit_idx; if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) { uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT]; - _mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL); + if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL)) { + mi_page_map_cannot_commit(); + return NULL; + } mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx); } return _mi_page_map[idx]; diff --git a/src/page.c b/src/page.c index 73affd49..c3b25872 100644 --- a/src/page.c +++ b/src/page.c @@ -37,7 +37,7 @@ static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_sta } //static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page); +static mi_decl_nodiscard bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page); #if (MI_DEBUG>=3) static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { @@ -311,7 +311,9 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size _mi_heap_page_reclaim(heap, page); if (!mi_page_immediate_available(page)) { if (mi_page_is_expandable(page)) { - mi_page_extend_free(heap, page); + if (!mi_page_extend_free(heap, page)) { + return NULL; + } } else { mi_assert(false); // should not happen? @@ -605,14 +607,14 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co // Note: we also experimented with "bump" allocation on the first // allocations but this did not speed up any benchmark (due to an // extra test in malloc? or cache effects?) -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { +static mi_decl_nodiscard bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); #if (MI_SECURE<3) mi_assert(page->free == NULL); mi_assert(page->local_free == NULL); - if (page->free != NULL) return; + if (page->free != NULL) return true; #endif - if (page->capacity >= page->reserved) return; + if (page->capacity >= page->reserved) return true; size_t page_size; //uint8_t* page_start = @@ -645,7 +647,9 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { const size_t needed_commit = _mi_align_up( mi_page_slice_offset_of(page, needed_size), MI_PAGE_MIN_COMMIT_SIZE ); if (needed_commit > page->slice_committed) { mi_assert_internal(((needed_commit - page->slice_committed) % _mi_os_page_size()) == 0); - _mi_os_commit(mi_page_slice_start(page) + page->slice_committed, needed_commit - page->slice_committed, NULL); + if (!_mi_os_commit(mi_page_slice_start(page) + page->slice_committed, needed_commit - page->slice_committed, NULL)) { + return false; + } page->slice_committed = needed_commit; } } @@ -663,10 +667,11 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { mi_heap_stat_increase(heap, page_committed, extend * bsize); #endif mi_assert_expensive(mi_page_is_valid_init(page)); + return true; } // Initialize a fresh page (that is already partially initialized) -void _mi_page_init(mi_heap_t* heap, mi_page_t* page) { +mi_decl_nodiscard bool _mi_page_init(mi_heap_t* heap, mi_page_t* page) { mi_assert(page != NULL); mi_page_set_heap(page, heap); @@ -703,8 +708,9 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); // initialize an initial free list - mi_page_extend_free(heap,page); + if (!mi_page_extend_free(heap,page)) return false; mi_assert(mi_page_immediate_available(page)); + return true; } @@ -794,9 +800,11 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m if (page != NULL) { if (!mi_page_immediate_available(page)) { mi_assert_internal(mi_page_is_expandable(page)); - mi_page_extend_free(heap, page); + if (!mi_page_extend_free(heap, page)) { + page = NULL; // failed to extend + } } - mi_assert_internal(mi_page_immediate_available(page)); + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); } if (page == NULL) { From 2157947ecf70125264fc69f5a181d627f4b978a0 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 12:39:47 -0700 Subject: [PATCH 3/7] fix warning on Ubuntu 22 --- src/page.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/page.c b/src/page.c index c3b25872..d8c6cbca 100644 --- a/src/page.c +++ b/src/page.c @@ -36,8 +36,7 @@ static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_sta return (mi_block_t*)((uint8_t*)page_start + (i * block_size)); } -//static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); -static mi_decl_nodiscard bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page); +static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page); #if (MI_DEBUG>=3) static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { @@ -842,7 +841,7 @@ static mi_page_t* mi_find_free_page(mi_heap_t* heap, mi_page_queue_t* pq) { if mi_likely(page != NULL && mi_page_immediate_available(page)) { #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { - mi_page_extend_free(heap, page); + mi_page_extend_free(heap, page) mi_assert_internal(mi_page_immediate_available(page)); } #endif From 86757dfbd644fee2f13d5b8dfba2ad9f2fd7b9a2 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 13:36:05 -0700 Subject: [PATCH 4/7] improve precision of malloc_huge statistic by always using the global os stats --- src/arena.c | 11 ++++------- src/heap.c | 7 ++----- src/page.c | 4 ++-- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/src/arena.c b/src/arena.c index 225bff17..0d86669b 100644 --- a/src/arena.c +++ b/src/arena.c @@ -817,20 +817,17 @@ void _mi_arenas_page_free(mi_page_t* page, mi_tld_t* stats_tld /* can be NULL */ mi_assert_internal(page->next==NULL && page->prev==NULL); // statistics - const size_t block_size = mi_page_block_size(page); if (stats_tld != NULL) { mi_tld_stat_decrease(stats_tld, page_bins[_mi_page_bin(page)], 1); mi_tld_stat_decrease(stats_tld, pages, 1); - if (block_size > MI_LARGE_MAX_OBJ_SIZE) { - mi_tld_stat_decrease(stats_tld, malloc_huge, block_size); - } } else { mi_os_stat_decrease(page_bins[_mi_page_bin(page)], 1); mi_os_stat_decrease(pages, 1); - if (block_size > MI_LARGE_MAX_OBJ_SIZE) { - mi_os_stat_decrease(malloc_huge, block_size); - } + } + const size_t block_size = mi_page_block_size(page); + if (mi_page_is_huge(page)) { + mi_os_stat_decrease(malloc_huge, block_size); } // assertions diff --git a/src/heap.c b/src/heap.c index 85285bdb..ee373773 100644 --- a/src/heap.c +++ b/src/heap.c @@ -329,14 +329,11 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ //_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); // stats - const size_t bsize = mi_page_block_size(page); - if (bsize > MI_LARGE_MAX_OBJ_SIZE) { - mi_heap_stat_decrease(heap, malloc_huge, bsize); - } #if (MI_STAT) _mi_page_free_collect(page, false); // update used count - const size_t inuse = page->used; + const size_t bsize = mi_page_block_size(page); if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { + const size_t inuse = page->used; mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse); #if (MI_STAT>1) mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse); diff --git a/src/page.c b/src/page.c index d8c6cbca..36883adc 100644 --- a/src/page.c +++ b/src/page.c @@ -905,8 +905,8 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a mi_assert_internal(mi_page_is_abandoned(page)); mi_page_set_heap(page, NULL); #endif - mi_heap_stat_increase(heap, malloc_huge, mi_page_block_size(page)); - mi_heap_stat_counter_increase(heap, malloc_huge_count, 1); + mi_os_stat_increase(malloc_huge, mi_page_block_size(page)); + mi_os_stat_counter_increase(malloc_huge_count, 1); } return page; } From 4022df8085cc11ad3ddf995bbe98f03fba25f2ec Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 13:41:45 -0700 Subject: [PATCH 5/7] fix build error on Ubuntu --- src/page.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/page.c b/src/page.c index 36883adc..0df0434a 100644 --- a/src/page.c +++ b/src/page.c @@ -606,7 +606,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co // Note: we also experimented with "bump" allocation on the first // allocations but this did not speed up any benchmark (due to an // extra test in malloc? or cache effects?) -static mi_decl_nodiscard bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { +static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); #if (MI_SECURE<3) mi_assert(page->free == NULL); From 6454abb02b2c9ccbab262f43e6f35ae4db44d212 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 13:43:17 -0700 Subject: [PATCH 6/7] disable azure pipeline for windows 2019 as it is deprecated --- azure-pipelines.yml | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a4266ae1..acb92a28 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -180,35 +180,6 @@ jobs: # Other OS versions (just debug mode) # ---------------------------------------------------------- -- job: - displayName: Windows 2019 - pool: - vmImage: - windows-2019 - strategy: - matrix: - Debug: - BuildType: debug - cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON - MSBuildConfiguration: Debug - Release: - BuildType: release - cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release - MSBuildConfiguration: Release - steps: - - task: CMake@1 - inputs: - workingDirectory: $(BuildType) - cmakeArgs: .. $(cmakeExtraArgs) - - task: MSBuild@1 - inputs: - solution: $(BuildType)/libmimalloc.sln - configuration: '$(MSBuildConfiguration)' - msbuildArguments: -m - - script: ctest --verbose --timeout 240 -C $(MSBuildConfiguration) - workingDirectory: $(BuildType) - displayName: CTest - - job: displayName: Ubuntu 24.04 pool: From bea8a12d3d267fa900a5491e700ea73053d575ca Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 3 Jun 2025 13:56:12 -0700 Subject: [PATCH 7/7] fix error path on commit failure to use correct base address --- src/os.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/os.c b/src/os.c index 82832223..ad847ea7 100644 --- a/src/os.c +++ b/src/os.c @@ -280,19 +280,19 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero); if (p == NULL) return NULL; - // set p to the aligned part in the full region - // note: on Windows VirtualFree needs the actual base pointer - // this is handledby having the `base` field in the memid. - *base = p; // remember the base - p = _mi_align_up_ptr(p, alignment); - // explicitly commit only the aligned part + void* const aligned_p = _mi_align_up_ptr(p, alignment); if (commit) { - if (!_mi_os_commit(p, size, NULL)) { + if (!_mi_os_commit(aligned_p, size, NULL)) { mi_os_prim_free(p, over_size, 0); return NULL; } } + + // note: on Windows VirtualFree needs the actual base pointer + // this is handled by having the `base` field in the memid. + *base = p; + p = aligned_p; } else { // mmap can free inside an allocation // overallocate... @@ -300,7 +300,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit if (p == NULL) return NULL; // and selectively unmap parts around the over-allocated area. - void* aligned_p = _mi_align_up_ptr(p, alignment); + void* const aligned_p = _mi_align_up_ptr(p, alignment); size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; size_t mid_size = _mi_align_up(size, _mi_os_page_size()); size_t post_size = over_size - pre_size - mid_size;