From 7cd7224e3e389565715d8a3c3365675c71b25acd Mon Sep 17 00:00:00 2001 From: Frank Richter Date: Sun, 19 Dec 2021 20:51:28 +0100 Subject: [PATCH] Add mi_alloc_init_t to indicate whether memory should be zero-filled or unitialized. The latter means "fill with MI_DEBUG_UNINIT" in debug mode. --- include/mimalloc-internal.h | 4 ++-- include/mimalloc-types.h | 5 +++++ src/alloc-aligned.c | 38 ++++++++++++++++++------------------- src/alloc.c | 16 ++++++++-------- 4 files changed, 34 insertions(+), 29 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 06c3e95f..70bdd040 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -138,8 +138,8 @@ mi_msecs_t _mi_clock_start(void); // "alloc.c" void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic` -void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero); -void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero); +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, mi_alloc_init_t init); +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init); mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); bool _mi_free_delayed_block(mi_block_t* block); void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size); diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index b349dfc3..7212f199 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -167,6 +167,11 @@ typedef int32_t mi_ssize_t; // Used as a special value to encode block sizes in 32 bits. #define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX) +typedef enum mi_alloc_init_e { + MI_ALLOC_UNINIT = 0, // uninitialized memory (debug mode: fill with MI_DEBUG_UNINIT) + MI_ALLOC_ZERO_INIT = 1 // zero-initialize memory +} mi_alloc_init_t; + // ------------------------------------------------------ // Mimalloc pages contain allocated blocks diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 6b15e653..610d0abb 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -15,7 +15,7 @@ terms of the MIT license. A copy of the license can be found in the file // ------------------------------------------------------ // Fallback primitive aligned allocation -- split out for better codegen -static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept { mi_assert_internal(size <= PTRDIFF_MAX); mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX); @@ -25,13 +25,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* // use regular allocation if it is guaranteed to fit the alignment constraints if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) { - void* p = _mi_heap_malloc_zero(heap, size, zero); + void* p = _mi_heap_malloc_zero(heap, size, init); mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); return p; } // otherwise over-allocate - void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero); + void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, init); if (p == NULL) return NULL; // .. and align within the allocation @@ -45,7 +45,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* } // Primitive aligned allocation -static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept { // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. mi_assert(alignment > 0); @@ -82,12 +82,12 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t void* p = _mi_page_malloc(heap, page, padsize); // TODO: inline _mi_page_malloc mi_assert_internal(p != NULL); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); - if (zero) { _mi_block_zero_init(page, p, size); } + if (init == MI_ALLOC_ZERO_INIT) { _mi_block_zero_init(page, p, size); } return p; } } // fallback - return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero); + return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, init); } @@ -96,7 +96,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t // ------------------------------------------------------ mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false); + return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, MI_ALLOC_UNINIT); } mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { @@ -123,7 +123,7 @@ mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size // ------------------------------------------------------ mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true); + return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, MI_ALLOC_ZERO_INIT); } mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { @@ -169,10 +169,10 @@ mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t align // Aligned re-allocation // ------------------------------------------------------ -static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept { +static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, mi_alloc_init_t init) mi_attr_noexcept { mi_assert(alignment > 0); - if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); - if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero); + if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,init); + if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,init); size_t size = mi_usable_size(p); if (newsize <= size && newsize >= (size - (size / 2)) && (((uintptr_t)p + offset) % alignment) == 0) { @@ -181,7 +181,7 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne else { void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset); if (newp != NULL) { - if (zero && newsize > size) { + if (init == MI_ALLOC_ZERO_INIT && newsize > size) { const mi_page_t* page = _mi_ptr_page(newp); if (page->is_zero) { // already zero initialized @@ -200,27 +200,27 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne } } -static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept { +static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, mi_alloc_init_t init) mi_attr_noexcept { mi_assert(alignment > 0); - if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); + if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,init); size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL) - return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero); + return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,init); } void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false); + return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,MI_ALLOC_UNINIT); } void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { - return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false); + return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,MI_ALLOC_UNINIT); } void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true); + return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, MI_ALLOC_ZERO_INIT); } void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { - return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true); + return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, MI_ALLOC_ZERO_INIT); } void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { diff --git a/src/alloc.c b/src/alloc.c index ca32caba..49b95f8c 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -147,16 +147,16 @@ mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept { return p; } -void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) { +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, mi_alloc_init_t init) { void* p = mi_heap_malloc(heap,size); - if (zero && p != NULL) { + if (init == MI_ALLOC_ZERO_INIT && p != NULL) { _mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again? } return p; } extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { - return _mi_heap_malloc_zero(heap, size, true); + return _mi_heap_malloc_zero(heap, size, MI_ALLOC_ZERO_INIT); } mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { @@ -619,15 +619,15 @@ void* mi_expand(void* p, size_t newsize) mi_attr_noexcept { return p; // it fits } -void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) { - if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero); +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init) { + if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,init); size_t size = _mi_usable_size(p,"mi_realloc"); if (newsize <= size && newsize >= (size / 2)) { return p; // reallocation still fits and not more than 50% waste } void* newp = mi_heap_malloc(heap,newsize); if (mi_likely(newp != NULL)) { - if (zero && newsize > size) { + if (init == MI_ALLOC_ZERO_INIT && newsize > size) { // also set last word in the previous allocation to zero to ensure any padding is zero-initialized size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); memset((uint8_t*)newp + start, 0, newsize - start); @@ -639,7 +639,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) } void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { - return _mi_heap_realloc_zero(heap, p, newsize, false); + return _mi_heap_realloc_zero(heap, p, newsize, MI_ALLOC_UNINIT); } void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { @@ -657,7 +657,7 @@ void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcep } void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { - return _mi_heap_realloc_zero(heap, p, newsize, true); + return _mi_heap_realloc_zero(heap, p, newsize, MI_ALLOC_ZERO_INIT); } void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {