Somewhat reorganize functions to ensure control over block initialization (or lack thereof)

If all "public" functions internally call a helper with the desired "initialization"
mode (zero or unitialized) we can ensure that a block is only filled once.

Otherwise, if a "public" function would call another "public" function, it could happen
that a block is first filled with the "uninit memory" marker in debug mode, after which
it's immediately overwritten with all zeroes.
This commit is contained in:
Frank Richter 2021-12-19 21:03:38 +01:00
parent 7cd7224e3e
commit 1621b461c6
3 changed files with 42 additions and 40 deletions

View file

@ -138,8 +138,8 @@ mi_msecs_t _mi_clock_start(void);
// "alloc.c" // "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic` void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, mi_alloc_init_t init); void* _mi_heap_malloc_init(mi_heap_t* heap, size_t size, mi_alloc_init_t init) mi_attr_noexcept;
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init); void* _mi_heap_realloc_init(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init);
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block); bool _mi_free_delayed_block(mi_block_t* block);
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size); void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);

View file

@ -15,7 +15,7 @@ terms of the MIT license. A copy of the license can be found in the file
// ------------------------------------------------------ // ------------------------------------------------------
// Fallback primitive aligned allocation -- split out for better codegen // Fallback primitive aligned allocation -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept static mi_decl_noinline void* mi_heap_malloc_init_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept
{ {
mi_assert_internal(size <= PTRDIFF_MAX); mi_assert_internal(size <= PTRDIFF_MAX);
mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX); mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX);
@ -25,13 +25,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
// use regular allocation if it is guaranteed to fit the alignment constraints // use regular allocation if it is guaranteed to fit the alignment constraints
if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) { if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, init); void* p = _mi_heap_malloc_init(heap, size, init);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p; return p;
} }
// otherwise over-allocate // otherwise over-allocate
void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, init); void* p = _mi_heap_malloc_init(heap, size + alignment - 1, init);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// .. and align within the allocation // .. and align within the allocation
@ -45,7 +45,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
} }
// Primitive aligned allocation // Primitive aligned allocation
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept static void* mi_heap_malloc_init_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const mi_alloc_init_t init) mi_attr_noexcept
{ {
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0); mi_assert(alignment > 0);
@ -87,7 +87,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
} }
} }
// fallback // fallback
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, init); return mi_heap_malloc_init_aligned_at_fallback(heap, size, alignment, offset, init);
} }
@ -96,7 +96,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
// ------------------------------------------------------ // ------------------------------------------------------
mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, MI_ALLOC_UNINIT); return mi_heap_malloc_init_aligned_at(heap, size, alignment, offset, MI_ALLOC_UNINIT);
} }
mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
@ -123,7 +123,7 @@ mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size
// ------------------------------------------------------ // ------------------------------------------------------
mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, MI_ALLOC_ZERO_INIT); return mi_heap_malloc_init_aligned_at(heap, size, alignment, offset, MI_ALLOC_ZERO_INIT);
} }
mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
@ -169,10 +169,10 @@ mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t align
// Aligned re-allocation // Aligned re-allocation
// ------------------------------------------------------ // ------------------------------------------------------
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, mi_alloc_init_t init) mi_attr_noexcept { static void* mi_heap_realloc_init_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, mi_alloc_init_t init) mi_attr_noexcept {
mi_assert(alignment > 0); mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,init); if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_init(heap,p,newsize,init);
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,init); if (p == NULL) return mi_heap_malloc_init_aligned_at(heap,newsize,alignment,offset,init);
size_t size = mi_usable_size(p); size_t size = mi_usable_size(p);
if (newsize <= size && newsize >= (size - (size / 2)) if (newsize <= size && newsize >= (size - (size / 2))
&& (((uintptr_t)p + offset) % alignment) == 0) { && (((uintptr_t)p + offset) % alignment) == 0) {
@ -200,27 +200,27 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
} }
} }
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, mi_alloc_init_t init) mi_attr_noexcept { static void* mi_heap_realloc_init_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, mi_alloc_init_t init) mi_attr_noexcept {
mi_assert(alignment > 0); mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,init); if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_init(heap,p,newsize,init);
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL) size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,init); return mi_heap_realloc_init_aligned_at(heap,p,newsize,alignment,offset,init);
} }
void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,MI_ALLOC_UNINIT); return mi_heap_realloc_init_aligned_at(heap,p,newsize,alignment,offset,MI_ALLOC_UNINIT);
} }
void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,MI_ALLOC_UNINIT); return mi_heap_realloc_init_aligned(heap,p,newsize,alignment,MI_ALLOC_UNINIT);
} }
void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, MI_ALLOC_ZERO_INIT); return mi_heap_realloc_init_aligned_at(heap, p, newsize, alignment, offset, MI_ALLOC_ZERO_INIT);
} }
void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, MI_ALLOC_ZERO_INIT); return mi_heap_realloc_init_aligned(heap, p, newsize, alignment, MI_ALLOC_ZERO_INIT);
} }
void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {

View file

@ -70,7 +70,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
} }
// allocate a small block // allocate a small block
extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept { static inline mi_decl_restrict void* _mi_heap_malloc_small_init(mi_heap_t* heap, size_t size, mi_alloc_init_t init) mi_attr_noexcept {
mi_assert(heap!=NULL); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX); mi_assert(size <= MI_SMALL_SIZE_MAX);
@ -88,17 +88,24 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
} }
#endif #endif
if (init == MI_ALLOC_ZERO_INIT && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
}
return p; return p;
} }
extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_small_init(heap, size, MI_ALLOC_UNINIT);
}
extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept { extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size); return mi_heap_malloc_small(mi_get_default_heap(), size);
} }
// The main allocation function // The main allocation function
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_restrict void* _mi_heap_malloc_init(mi_heap_t* heap, size_t size, mi_alloc_init_t init) mi_attr_noexcept {
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) { if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
return mi_heap_malloc_small(heap, size); return _mi_heap_malloc_small_init(heap, size, init);
} }
else { else {
mi_assert(heap!=NULL); mi_assert(heap!=NULL);
@ -111,10 +118,17 @@ extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size
mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
} }
#endif #endif
if (init == MI_ALLOC_ZERO_INIT && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
}
return p; return p;
} }
} }
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_init(heap, size, MI_ALLOC_UNINIT);
}
extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept { extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size); return mi_heap_malloc(mi_get_default_heap(), size);
} }
@ -140,23 +154,11 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// zero initialized small block // zero initialized small block
mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept { mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
void* p = mi_malloc_small(size); return _mi_heap_malloc_small_init(mi_get_default_heap(), size, MI_ALLOC_ZERO_INIT);
if (p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
}
return p;
}
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, mi_alloc_init_t init) {
void* p = mi_heap_malloc(heap,size);
if (init == MI_ALLOC_ZERO_INIT && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
}
return p;
} }
extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, MI_ALLOC_ZERO_INIT); return _mi_heap_malloc_init(heap, size, MI_ALLOC_ZERO_INIT);
} }
mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
@ -619,8 +621,8 @@ void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
return p; // it fits return p; // it fits
} }
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init) { void* _mi_heap_realloc_init(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_init_t init) {
if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,init); if (p == NULL) return _mi_heap_malloc_init(heap,newsize,init);
size_t size = _mi_usable_size(p,"mi_realloc"); size_t size = _mi_usable_size(p,"mi_realloc");
if (newsize <= size && newsize >= (size / 2)) { if (newsize <= size && newsize >= (size / 2)) {
return p; // reallocation still fits and not more than 50% waste return p; // reallocation still fits and not more than 50% waste
@ -639,7 +641,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, mi_alloc_i
} }
void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, MI_ALLOC_UNINIT); return _mi_heap_realloc_init(heap, p, newsize, MI_ALLOC_UNINIT);
} }
void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
@ -657,7 +659,7 @@ void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcep
} }
void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, MI_ALLOC_ZERO_INIT); return _mi_heap_realloc_init(heap, p, newsize, MI_ALLOC_ZERO_INIT);
} }
void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {