Add mi_alloc_init_t to indicate whether memory should be zero-filled or unitialized.

The latter means "fill with MI_DEBUG_UNINIT" in debug mode.
This commit is contained in:
Frank Richter 2021-12-19 20:51:28 +01:00
parent 5a7685de0e
commit 7cd7224e3e
4 changed files with 34 additions and 29 deletions

View file

@ -138,8 +138,8 @@ mi_msecs_t _mi_clock_start(void);
// "alloc.c" // "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic` void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero); void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, mi_alloc_init_t init);
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero); void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init);
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block); bool _mi_free_delayed_block(mi_block_t* block);
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size); void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);

View file

@ -167,6 +167,11 @@ typedef int32_t mi_ssize_t;
// Used as a special value to encode block sizes in 32 bits. // Used as a special value to encode block sizes in 32 bits.
#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX) #define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX)
typedef enum mi_alloc_init_e {
MI_ALLOC_UNINIT = 0, // uninitialized memory (debug mode: fill with MI_DEBUG_UNINIT)
MI_ALLOC_ZERO_INIT = 1 // zero-initialize memory
} mi_alloc_init_t;
// ------------------------------------------------------ // ------------------------------------------------------
// Mimalloc pages contain allocated blocks // Mimalloc pages contain allocated blocks

View file

@ -15,7 +15,7 @@ terms of the MIT license. A copy of the license can be found in the file
// ------------------------------------------------------ // ------------------------------------------------------
// Fallback primitive aligned allocation -- split out for better codegen // Fallback primitive aligned allocation -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept
{ {
mi_assert_internal(size <= PTRDIFF_MAX); mi_assert_internal(size <= PTRDIFF_MAX);
mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX); mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX);
@ -25,13 +25,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
// use regular allocation if it is guaranteed to fit the alignment constraints // use regular allocation if it is guaranteed to fit the alignment constraints
if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) { if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero); void* p = _mi_heap_malloc_zero(heap, size, init);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p; return p;
} }
// otherwise over-allocate // otherwise over-allocate
void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero); void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, init);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// .. and align within the allocation // .. and align within the allocation
@ -45,7 +45,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
} }
// Primitive aligned allocation // Primitive aligned allocation
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept
{ {
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0); mi_assert(alignment > 0);
@ -82,12 +82,12 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
void* p = _mi_page_malloc(heap, page, padsize); // TODO: inline _mi_page_malloc void* p = _mi_page_malloc(heap, page, padsize); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL); mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
if (zero) { _mi_block_zero_init(page, p, size); } if (init == MI_ALLOC_ZERO_INIT) { _mi_block_zero_init(page, p, size); }
return p; return p;
} }
} }
// fallback // fallback
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero); return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, init);
} }
@ -96,7 +96,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
// ------------------------------------------------------ // ------------------------------------------------------
mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false); return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, MI_ALLOC_UNINIT);
} }
mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
@ -123,7 +123,7 @@ mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size
// ------------------------------------------------------ // ------------------------------------------------------
mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true); return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, MI_ALLOC_ZERO_INIT);
} }
mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
@ -169,10 +169,10 @@ mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t align
// Aligned re-allocation // Aligned re-allocation
// ------------------------------------------------------ // ------------------------------------------------------
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept { static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, mi_alloc_init_t init) mi_attr_noexcept {
mi_assert(alignment > 0); mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,init);
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero); if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,init);
size_t size = mi_usable_size(p); size_t size = mi_usable_size(p);
if (newsize <= size && newsize >= (size - (size / 2)) if (newsize <= size && newsize >= (size - (size / 2))
&& (((uintptr_t)p + offset) % alignment) == 0) { && (((uintptr_t)p + offset) % alignment) == 0) {
@ -181,7 +181,7 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
else { else {
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset); void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
if (newp != NULL) { if (newp != NULL) {
if (zero && newsize > size) { if (init == MI_ALLOC_ZERO_INIT && newsize > size) {
const mi_page_t* page = _mi_ptr_page(newp); const mi_page_t* page = _mi_ptr_page(newp);
if (page->is_zero) { if (page->is_zero) {
// already zero initialized // already zero initialized
@ -200,27 +200,27 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
} }
} }
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept { static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, mi_alloc_init_t init) mi_attr_noexcept {
mi_assert(alignment > 0); mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,init);
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL) size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero); return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,init);
} }
void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false); return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,MI_ALLOC_UNINIT);
} }
void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false); return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,MI_ALLOC_UNINIT);
} }
void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true); return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, MI_ALLOC_ZERO_INIT);
} }
void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true); return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, MI_ALLOC_ZERO_INIT);
} }
void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {

View file

@ -147,16 +147,16 @@ mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
return p; return p;
} }
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) { void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, mi_alloc_init_t init) {
void* p = mi_heap_malloc(heap,size); void* p = mi_heap_malloc(heap,size);
if (zero && p != NULL) { if (init == MI_ALLOC_ZERO_INIT && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again? _mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
} }
return p; return p;
} }
extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true); return _mi_heap_malloc_zero(heap, size, MI_ALLOC_ZERO_INIT);
} }
mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
@ -619,15 +619,15 @@ void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
return p; // it fits return p; // it fits
} }
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) { void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init) {
if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero); if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,init);
size_t size = _mi_usable_size(p,"mi_realloc"); size_t size = _mi_usable_size(p,"mi_realloc");
if (newsize <= size && newsize >= (size / 2)) { if (newsize <= size && newsize >= (size / 2)) {
return p; // reallocation still fits and not more than 50% waste return p; // reallocation still fits and not more than 50% waste
} }
void* newp = mi_heap_malloc(heap,newsize); void* newp = mi_heap_malloc(heap,newsize);
if (mi_likely(newp != NULL)) { if (mi_likely(newp != NULL)) {
if (zero && newsize > size) { if (init == MI_ALLOC_ZERO_INIT && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start); memset((uint8_t*)newp + start, 0, newsize - start);
@ -639,7 +639,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
} }
void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false); return _mi_heap_realloc_zero(heap, p, newsize, MI_ALLOC_UNINIT);
} }
void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
@ -657,7 +657,7 @@ void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcep
} }
void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true); return _mi_heap_realloc_zero(heap, p, newsize, MI_ALLOC_ZERO_INIT);
} }
void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {