merge from dev

This commit is contained in:
Daan Leijen 2022-04-20 17:16:25 -07:00
commit f2a2eb4ad0
23 changed files with 1625 additions and 317 deletions

View file

@ -49,19 +49,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
{
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0);
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}
if (mi_unlikely(alignment > MI_ALIGNMENT_MAX)) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
#endif
return NULL;
}
if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
@ -71,18 +71,17 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
// try first if there happens to be a small block available with just the right alignment
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
if mi_likely(padsize <= MI_SMALL_SIZE_MAX) {
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
if (mi_likely(page->free != NULL && is_aligned))
if mi_likely(page->free != NULL && is_aligned)
{
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc, size);
#endif
void* p = _mi_page_malloc(heap, page, padsize); // TODO: inline _mi_page_malloc
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
if (zero) { _mi_block_zero_init(page, p, size); }
return p;
}
}
@ -95,19 +94,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
// ------------------------------------------------------
mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
}
mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
#if !MI_PADDING
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
if (!_mi_is_power_of_two(alignment)) return NULL;
if (mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX))
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
#else
// with padding, we can only guarantee this for fixed alignments
if (mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
&& size <= MI_SMALL_SIZE_MAX))
if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
&& size <= MI_SMALL_SIZE_MAX)
#endif
{
// fast path for common alignment and size
@ -122,45 +121,45 @@ mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size
// Aligned Allocation
// ------------------------------------------------------
mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
}
mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
}
mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
}
mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
}
mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
}
mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
}
mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
}
mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
}
mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
}
mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
}
@ -207,55 +206,55 @@ static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsi
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
}
void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
}
void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
}
void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
}
void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
}
void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
}
void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
}
void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
}
void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
}

View file

@ -254,7 +254,7 @@ static malloc_zone_t mi_malloc_zone = {
static inline malloc_zone_t* mi_get_default_zone(void)
{
static bool init;
if (mi_unlikely(!init)) {
if mi_unlikely(!init) {
init = true;
malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see <http://eatmyrandom.blogspot.com/2010/03/mallocfree-interception-on-mac-os-x.html>)
}

View file

@ -83,7 +83,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcep
}
mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
if (mi_unlikely((size&(alignment-1)) != 0)) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
if mi_unlikely((size&(alignment-1)) != 0) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment);
#endif
@ -109,7 +109,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att
}
void** op = (void**)p;
void* newp = mi_reallocarray(*op, count, size);
if (mi_unlikely(newp == NULL)) return errno;
if mi_unlikely(newp == NULL) { return errno; }
*op = newp;
return 0;
}

View file

@ -25,11 +25,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* const block = page->free;
if (mi_unlikely(block == NULL)) {
return _mi_malloc_generic(heap, size);
if mi_unlikely(block == NULL) {
return _mi_malloc_generic(heap, size, zero);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
@ -37,10 +37,17 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
_mi_memzero_aligned(block, zsize);
}
#if (MI_DEBUG>0)
if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size); }
#elif (MI_SECURE!=0)
block->next = 0; // don't leak internal data
if (!zero) { block->next = 0; } // don't leak internal data
#endif
#if (MI_STAT>0)
@ -76,9 +83,8 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
return block;
}
// allocate a small block
extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_assert(heap!=NULL);
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap != NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX);
void* p;
@ -90,13 +96,13 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
#if (MI_PADDING_EXTRA > 0 || MI_DEBUG_TRACE > 0)
// with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE, so we need an extra check
if (size + MI_PADDING_SIZE > MI_SMALL_SIZE_MAX) {
p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);
p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero);
}
else
#endif
{
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
}
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
@ -104,24 +110,29 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
#endif
return p;
}
extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
// allocate a small block
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return mi_heap_malloc_small_zero(heap, size, false);
}
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size);
}
// The main allocation function
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
if (mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE)) {
return mi_heap_malloc_small(heap, size);
extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
if mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE) {
return mi_heap_malloc_small_zero(heap, size, zero);
}
else
{
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); // note: size can overflow but it is detected in malloc_generic
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero); // note: size can overflow but it is detected in malloc_generic
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
@ -133,51 +144,24 @@ extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size
}
}
extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, false);
}
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size);
}
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// note: we need to initialize the whole usable block size to zero, not just the requested size,
// or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
MI_UNUSED(size);
mi_assert_internal(p != NULL);
mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
mi_assert_internal(_mi_ptr_page(p)==page);
if (page->is_zero && size > sizeof(mi_block_t)) {
// already zero initialized memory
((mi_block_t*)p)->next = 0; // clear the free list pointer
mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
}
else {
// otherwise memset
memset(p, 0, mi_usable_size(p));
}
}
// zero initialized small block
mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
void* p = mi_malloc_small(size);
if (p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
}
return p;
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small_zero(mi_get_default_heap(), size, true);
}
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
void* p = mi_heap_malloc(heap,size);
if (zero && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
}
return p;
}
extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true);
}
mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
return mi_heap_zalloc(mi_get_default_heap(),size);
}
@ -251,7 +235,7 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
size_t size;
size_t wrong;
if (mi_unlikely(!mi_verify_padding(page,block,&size,&wrong))) {
if mi_unlikely(!mi_verify_padding(page,block,&size,&wrong)) {
_mi_show_block_trace_with_predecessor(page, block, NULL);
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
}
@ -442,7 +426,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
do {
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
if (mi_unlikely(use_delayed)) {
if mi_unlikely(use_delayed) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
}
@ -453,7 +437,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
}
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
if (mi_unlikely(use_delayed)) {
if mi_unlikely(use_delayed) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
mi_assert_internal(heap != NULL);
@ -479,9 +463,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
// and push it on the free list
if (mi_likely(local)) {
if mi_likely(local) {
// owning thread can free a block directly
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
@ -489,10 +473,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
if (mi_unlikely(mi_page_all_free(page))) {
if mi_unlikely(mi_page_all_free(page)) {
_mi_page_retire(page);
}
else if (mi_unlikely(mi_page_is_in_full(page))) {
else if mi_unlikely(mi_page_is_in_full(page)) {
_mi_page_unfull(page);
}
}
@ -525,26 +509,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
{
MI_UNUSED(msg);
#if (MI_DEBUG>0)
if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
#endif
mi_segment_t* const segment = _mi_ptr_segment(p);
if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
#if (MI_DEBUG>0)
if (mi_unlikely(!mi_is_in_heap_region(p))) {
if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
#endif
#if (MI_DEBUG>0 || MI_SECURE>=4)
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
return NULL;
}
@ -556,15 +540,15 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
void mi_free(void* p) mi_attr_noexcept
{
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
if (mi_unlikely(segment == NULL)) return;
if mi_unlikely(segment == NULL) return;
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* const block = (mi_block_t*)p;
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
if mi_unlikely(mi_check_is_double_free(page,block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0)
@ -572,7 +556,7 @@ void mi_free(void* p) mi_attr_noexcept
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
_mi_page_retire(page);
}
}
@ -618,7 +602,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
if (segment==NULL) return 0; // also returns 0 if `p == NULL`
const mi_page_t* const page = _mi_segment_page_of(segment, p);
if (mi_likely(!mi_page_has_aligned(page))) {
if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
}
@ -628,7 +612,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
}
}
size_t mi_usable_size(const void* p) mi_attr_noexcept {
mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
return _mi_usable_size(p, "mi_usable_size");
}
@ -640,6 +624,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
#ifdef __cplusplus
void* _mi_externs[] = {
(void*)&_mi_page_malloc,
(void*)&_mi_heap_malloc_zero,
(void*)&mi_malloc,
(void*)&mi_malloc_small,
(void*)&mi_zalloc_small,
@ -672,24 +657,24 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
mi_free(p);
}
extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
}
mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_calloc(mi_get_default_heap(),count,size);
}
// Uninitialized `calloc`
extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
}
mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_mallocn(mi_get_default_heap(),count,size);
}
@ -708,25 +693,24 @@ void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
}
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL
if (mi_unlikely(newsize <= size && newsize >= (size / 2))) {
// if p == NULL then behave as malloc.
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
// todo: adjust potential padding to reflect the new size?
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
if (mi_likely(newp != NULL)) {
if mi_likely(newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start);
}
if (mi_likely(p != NULL)) {
const size_t copysize = (newsize > size ? size : newsize);
if (mi_likely(((uintptr_t)p % MI_INTPTR_SIZE) == 0)) {
_mi_memcpy_aligned(newp, p, copysize);
}
else {
_mi_memcpy(newp, p, copysize);
if mi_likely(p != NULL) {
if mi_likely(_mi_is_aligned(p, sizeof(uintptr_t))) {
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
}
mi_free(p); // only free the original pointer if successful
}
@ -734,11 +718,11 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
return newp;
}
void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
}
void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
@ -746,41 +730,41 @@ void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_a
// Reallocate but free `p` on errors
void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* newp = mi_heap_realloc(heap, p, newsize);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
}
void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_realloc(mi_get_default_heap(),p,newsize);
}
void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
}
// Reallocate but free `p` on errors
void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
}
void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
}
void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
}
@ -791,7 +775,7 @@ void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
// ------------------------------------------------------
// `strdup` using mi_malloc
mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t n = strlen(s);
char* t = (char*)mi_heap_malloc(heap,n+1);
@ -799,12 +783,12 @@ mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_no
return t;
}
mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
return mi_heap_strdup(mi_get_default_heap(), s);
}
// `strndup` using mi_malloc
mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
if (s == NULL) return NULL;
const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found)
const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string
@ -816,7 +800,7 @@ mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n)
return t;
}
mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
return mi_heap_strndup(mi_get_default_heap(),s,n);
}
@ -827,7 +811,7 @@ mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#define PATH_MAX MAX_PATH
#endif
#include <windows.h>
mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
@ -873,7 +857,7 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
}
#endif
mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
}
#endif
@ -949,19 +933,19 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
return p;
}
mi_decl_restrict void* mi_new(size_t size) {
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
if mi_unlikely(p == NULL) return mi_try_new(size,false);
return p;
}
mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
if mi_unlikely(p == NULL) return mi_try_new(size, true);
return p;
}
mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@ -970,7 +954,7 @@ mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
return p;
}
mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@ -979,9 +963,9 @@ mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_
return p;
}
mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
size_t total;
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
@ -990,7 +974,7 @@ mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
}
}
void* mi_new_realloc(void* p, size_t newsize) {
mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
void* q;
do {
q = mi_realloc(p, newsize);
@ -998,9 +982,9 @@ void* mi_new_realloc(void* p, size_t newsize) {
return q;
}
void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
size_t total;
if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}

View file

@ -283,7 +283,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
MI_UNUSED_RELEASE(bitmap_fields);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
if (mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS)) {
if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
*pre_mask = mi_bitmap_mask_(count, bitidx);
*mid_mask = 0;
*post_mask = 0;

View file

@ -189,7 +189,7 @@ mi_heap_t* mi_heap_get_backing(void) {
return bheap;
}
mi_heap_t* mi_heap_new(void) {
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
if (heap==NULL) return NULL;
@ -410,7 +410,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
mi_segment_t* segment = _mi_ptr_segment(p);
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(valid);
if (mi_unlikely(!valid)) return NULL;
if mi_unlikely(!valid) return NULL;
return mi_page_heap(_mi_segment_page_of(segment,p));
}
@ -532,7 +532,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
xarea.area.reserved = page->reserved * bsize;
xarea.area.committed = page->capacity * bsize;
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
xarea.area.used = page->used * bsize;
xarea.area.used = page->used; // number of blocks in use (#553)
xarea.area.block_size = ubsize;
xarea.area.full_block_size = bsize;
return fun(heap, &xarea, arg);

View file

@ -119,7 +119,7 @@ mi_decl_nodiscard long mi_option_get(mi_option_t option) {
if (option < 0 || option >= _mi_option_last) return 0;
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
if (mi_unlikely(desc->init == UNINIT)) {
if mi_unlikely(desc->init == UNINIT) {
mi_option_init(desc);
}
return desc->value;

View file

@ -133,7 +133,7 @@ size_t _mi_os_good_alloc_size(size_t size) {
else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
else align_size = 4*MI_MiB;
if (mi_unlikely(size >= (SIZE_MAX - align_size))) return size; // possible overflow?
if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow?
return _mi_align_up(size, align_size);
}

View file

@ -130,7 +130,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
tfreex = mi_tf_set_delayed(tfree, delay);
old_delay = mi_tf_delayed(tfree);
if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) {
if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
}
@ -198,7 +198,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// and the local free list
if (page->local_free != NULL) {
if (mi_likely(page->free == NULL)) {
if mi_likely(page->free == NULL) {
// usual case
page->free = page->local_free;
page->local_free = NULL;
@ -400,7 +400,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
@ -797,8 +797,8 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) {
if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) ) {
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL;
}
@ -815,15 +815,15 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
// initialize if necessary
if (mi_unlikely(!mi_heap_is_initialized(heap))) {
if mi_unlikely(!mi_heap_is_initialized(heap)) {
mi_thread_init(); // calls `_mi_heap_init` in turn
heap = mi_get_default_heap();
if (mi_unlikely(!mi_heap_is_initialized(heap))) { return NULL; }
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
}
mi_assert_internal(mi_heap_is_initialized(heap));
@ -835,12 +835,12 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// find (or allocate) a page of the right size
mi_page_t* page = mi_find_page(heap, size);
if (mi_unlikely(page == NULL)) { // first time out of memory, try to collect and retry the allocation once more
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
mi_heap_collect(heap, true /* force */);
page = mi_find_page(heap, size);
}
if (mi_unlikely(page == NULL)) { // out of memory
if mi_unlikely(page == NULL) { // out of memory
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
return NULL;
@ -849,6 +849,15 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(mi_page_block_size(page) >= size);
// and try again, this time succeeding! (i.e. this should never recurse)
return _mi_page_malloc(heap, page, size);
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
if mi_unlikely(zero && page->xblock_size == 0) {
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
void* p = _mi_page_malloc(heap, page, size, false);
mi_assert_internal(p != NULL);
_mi_memzero_aligned(p, mi_page_usable_block_size(page));
return p;
}
else {
return _mi_page_malloc(heap, page, size, zero);
}
}

View file

@ -930,8 +930,8 @@ static mi_segment_t* mi_abandoned_pop(void) {
// Check efficiently if it is empty (or if the visited list needs to be moved)
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
segment = mi_tagged_segment_ptr(ts);
if (mi_likely(segment == NULL)) {
if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL
if mi_likely(segment == NULL) {
if mi_likely(!mi_abandoned_visited_revisit()) { // try to swap in the visited list on NULL
return NULL;
}
}