From 7bc602ebb428f23cc20fb40f6ca218497bc88d98 Mon Sep 17 00:00:00 2001 From: Daan Leijen Date: Tue, 19 Apr 2022 19:50:06 -0700 Subject: [PATCH] redefine mi_likely/mi_unlikely to work with C++ 20 [[likely]] attributes --- ide/vs2022/mimalloc.vcxproj | 4 +-- include/mimalloc-internal.h | 23 +++++++++------- src/alloc-aligned.c | 12 ++++----- src/alloc-override-osx.c | 2 +- src/alloc-posix.c | 4 +-- src/alloc.c | 54 ++++++++++++++++++------------------- src/bitmap.c | 2 +- src/heap.c | 2 +- src/options.c | 2 +- src/os.c | 2 +- src/page.c | 20 +++++++------- src/segment.c | 4 +-- 12 files changed, 67 insertions(+), 64 deletions(-) diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj index 125d4050..b0ffe335 100644 --- a/ide/vs2022/mimalloc.vcxproj +++ b/ide/vs2022/mimalloc.vcxproj @@ -119,7 +119,7 @@ MI_DEBUG=3;%(PreprocessorDefinitions); CompileAsCpp false - Default + stdcpp20 @@ -179,7 +179,7 @@ Default CompileAsCpp true - Default + stdcpp20 true diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 9a141057..ca9e9c41 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -154,8 +154,11 @@ bool _mi_page_is_valid(mi_page_t* page); // ------------------------------------------------------ #if defined(__GNUC__) || defined(__clang__) -#define mi_unlikely(x) __builtin_expect(!!(x),false) -#define mi_likely(x) __builtin_expect(!!(x),true) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] #else #define mi_unlikely(x) (x) #define mi_likely(x) (x) @@ -277,7 +280,7 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot *total = size; return false; } - else if (mi_unlikely(mi_mul_overflow(count, size, total))) { + else if mi_unlikely(mi_mul_overflow(count, size, total)) { #if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size); #endif @@ -351,7 +354,7 @@ extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate static inline mi_heap_t* mi_get_default_heap(void) { #if defined(MI_TLS_SLOT) mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT); - if (mi_unlikely(heap == NULL)) { + if mi_unlikely(heap == NULL) { #ifdef __GNUC__ __asm(""); // prevent conditional load of the address of _mi_heap_empty #endif @@ -453,7 +456,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) { static inline size_t mi_page_block_size(const mi_page_t* page) { const size_t bsize = page->xblock_size; mi_assert_internal(bsize > 0); - if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) { + if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) { return bsize; } else { @@ -607,11 +610,11 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) { static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) { void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]); - return (mi_unlikely(p==null) ? NULL : p); + return (p==null ? NULL : p); } static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) { - uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p); + uintptr_t x = (uintptr_t)(p==NULL ? null : p); return mi_rotl(x ^ keys[1], keys[0]) + keys[0]; } @@ -638,7 +641,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* mi_block_t* next = mi_block_nextx(page,block,page->keys); // check for free list corruption: is `next` at least in the same page? // TODO: check if `next` is `page->block_size` aligned? - if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) { + if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) { _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); next = NULL; } @@ -691,12 +694,12 @@ size_t _mi_os_numa_node_count_get(void); extern _Atomic(size_t) _mi_numa_node_count; static inline int _mi_os_numa_node(mi_os_tld_t* tld) { - if (mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1)) return 0; + if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } else return _mi_os_numa_node_get(tld); } static inline size_t _mi_os_numa_node_count(void) { const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); - if (mi_likely(count>0)) return count; + if mi_likely(count > 0) { return count; } else return _mi_os_numa_node_count_get(); } diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 15781359..c686e936 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -49,19 +49,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t { // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. mi_assert(alignment > 0); - if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) { // require power-of-two (see ) + if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see ) #if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment); #endif return NULL; } - if (mi_unlikely(alignment > MI_ALIGNMENT_MAX)) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers) + if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers) #if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment); #endif return NULL; } - if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see ) + if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see ) #if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); #endif @@ -71,10 +71,10 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check // try first if there happens to be a small block available with just the right alignment - if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) { + if mi_likely(padsize <= MI_SMALL_SIZE_MAX) { mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0; - if (mi_likely(page->free != NULL && is_aligned)) + if mi_likely(page->free != NULL && is_aligned) { #if MI_STAT>1 mi_heap_stat_increase(heap, malloc, size); @@ -102,7 +102,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, #if !MI_PADDING // without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`) if (!_mi_is_power_of_two(alignment)) return NULL; - if (mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)) + if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX) #else // with padding, we can only guarantee this for fixed alignments if (mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2))) diff --git a/src/alloc-override-osx.c b/src/alloc-override-osx.c index 41d0a386..ba2313a2 100644 --- a/src/alloc-override-osx.c +++ b/src/alloc-override-osx.c @@ -254,7 +254,7 @@ static malloc_zone_t mi_malloc_zone = { static inline malloc_zone_t* mi_get_default_zone(void) { static bool init; - if (mi_unlikely(!init)) { + if mi_unlikely(!init) { init = true; malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see ) } diff --git a/src/alloc-posix.c b/src/alloc-posix.c index 176e7ec3..e1b4a286 100644 --- a/src/alloc-posix.c +++ b/src/alloc-posix.c @@ -83,7 +83,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcep } mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept { - if (mi_unlikely((size&(alignment-1)) != 0)) { // C11 requires alignment>0 && integral multiple, see + if mi_unlikely((size&(alignment-1)) != 0) { // C11 requires alignment>0 && integral multiple, see #if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment); #endif @@ -109,7 +109,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att } void** op = (void**)p; void* newp = mi_reallocarray(*op, count, size); - if (mi_unlikely(newp == NULL)) return errno; + if mi_unlikely(newp == NULL) { return errno; } *op = newp; return 0; } diff --git a/src/alloc.c b/src/alloc.c index 1148ec72..02bce8d7 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept { mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size); mi_block_t* const block = page->free; - if (mi_unlikely(block == NULL)) { + if mi_unlikely(block == NULL) { return _mi_malloc_generic(heap, size, zero); } mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); @@ -38,9 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); // zero the block? - if (mi_unlikely(zero)) { + if mi_unlikely(zero) { mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks - const size_t zsize = (mi_unlikely(page->is_zero) ? sizeof(block->next) : page->xblock_size); + const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size); _mi_memzero_aligned(block, zsize); } @@ -108,7 +108,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si // The main allocation function mi_decl_nodiscard extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { - if (mi_likely(size <= MI_SMALL_SIZE_MAX)) { + if mi_likely(size <= MI_SMALL_SIZE_MAX) { return mi_heap_malloc_small_zero(heap, size, zero); } else { @@ -350,7 +350,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); do { use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE); - if (mi_unlikely(use_delayed)) { + if mi_unlikely(use_delayed) { // unlikely: this only happens on the first concurrent free in a page that is in the full list tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); } @@ -361,7 +361,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc } } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); - if (mi_unlikely(use_delayed)) { + if mi_unlikely(use_delayed) { // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page); mi_assert_internal(heap != NULL); @@ -387,9 +387,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) { // and push it on the free list - if (mi_likely(local)) { + if mi_likely(local) { // owning thread can free a block directly - if (mi_unlikely(mi_check_is_double_free(page, block))) return; + if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); #if (MI_DEBUG!=0) memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); @@ -397,10 +397,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block mi_block_set_next(page, block, page->local_free); page->local_free = block; page->used--; - if (mi_unlikely(mi_page_all_free(page))) { + if mi_unlikely(mi_page_all_free(page)) { _mi_page_retire(page); } - else if (mi_unlikely(mi_page_is_in_full(page))) { + else if mi_unlikely(mi_page_is_in_full(page)) { _mi_page_unfull(page); } } @@ -433,26 +433,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms { MI_UNUSED(msg); #if (MI_DEBUG>0) - if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) { + if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) { _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); return NULL; } #endif mi_segment_t* const segment = _mi_ptr_segment(p); - if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL) + if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL) #if (MI_DEBUG>0) - if (mi_unlikely(!mi_is_in_heap_region(p))) { + if mi_unlikely(!mi_is_in_heap_region(p)) { _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n" "(this may still be a valid very large allocation (over 64MiB))\n", msg, p); - if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) { + if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) { _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); } } #endif #if (MI_DEBUG>0 || MI_SECURE>=4) - if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) { + if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); return NULL; } @@ -464,15 +464,15 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms void mi_free(void* p) mi_attr_noexcept { mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); - if (mi_unlikely(segment == NULL)) return; + if mi_unlikely(segment == NULL) return; mi_threadid_t tid = _mi_thread_id(); mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_block_t*)p; - if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks + if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned - if (mi_unlikely(mi_check_is_double_free(page,block))) return; + if mi_unlikely(mi_check_is_double_free(page,block)) return; mi_check_padding(page, block); mi_stat_free(page, block); #if (MI_DEBUG!=0) @@ -480,7 +480,7 @@ void mi_free(void* p) mi_attr_noexcept #endif mi_block_set_next(page, block, page->local_free); page->local_free = block; - if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) + if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) _mi_page_retire(page); } } @@ -526,7 +526,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg); if (segment==NULL) return 0; // also returns 0 if `p == NULL` const mi_page_t* const page = _mi_segment_page_of(segment, p); - if (mi_likely(!mi_page_has_aligned(page))) { + if mi_likely(!mi_page_has_aligned(page)) { const mi_block_t* block = (const mi_block_t*)p; return mi_page_usable_size_of(page, block); } @@ -621,18 +621,18 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) - if (mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0)) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) + if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) // todo: adjust potential padding to reflect the new size? return p; // reallocation still fits and not more than 50% waste } void* newp = mi_heap_malloc(heap,newsize); - if (mi_likely(newp != NULL)) { + if mi_likely(newp != NULL) { if (zero && newsize > size) { // also set last word in the previous allocation to zero to ensure any padding is zero-initialized const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); memset((uint8_t*)newp + start, 0, newsize - start); } - if (mi_likely(p != NULL)) { + if mi_likely(p != NULL) { _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); mi_free(p); // only free the original pointer if successful } @@ -857,13 +857,13 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) { mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { void* p = mi_malloc(size); - if (mi_unlikely(p == NULL)) return mi_try_new(size,false); + if mi_unlikely(p == NULL) return mi_try_new(size,false); return p; } mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept { void* p = mi_malloc(size); - if (mi_unlikely(p == NULL)) return mi_try_new(size, true); + if mi_unlikely(p == NULL) return mi_try_new(size, true); return p; } @@ -887,7 +887,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { size_t total; - if (mi_unlikely(mi_count_size_overflow(count, size, &total))) { + if mi_unlikely(mi_count_size_overflow(count, size, &total)) { mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc return NULL; } @@ -906,7 +906,7 @@ mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) { mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { size_t total; - if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) { + if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) { mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc return NULL; } diff --git a/src/bitmap.c b/src/bitmap.c index 51926bbd..88c8620c 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -283,7 +283,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) { MI_UNUSED_RELEASE(bitmap_fields); const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - if (mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS)) { + if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) { *pre_mask = mi_bitmap_mask_(count, bitidx); *mid_mask = 0; *post_mask = 0; diff --git a/src/heap.c b/src/heap.c index e14de32f..45cb14c1 100644 --- a/src/heap.c +++ b/src/heap.c @@ -410,7 +410,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) { mi_segment_t* segment = _mi_ptr_segment(p); bool valid = (_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(valid); - if (mi_unlikely(!valid)) return NULL; + if mi_unlikely(!valid) return NULL; return mi_page_heap(_mi_segment_page_of(segment,p)); } diff --git a/src/options.c b/src/options.c index 09a28dac..e5951be4 100644 --- a/src/options.c +++ b/src/options.c @@ -119,7 +119,7 @@ mi_decl_nodiscard long mi_option_get(mi_option_t option) { if (option < 0 || option >= _mi_option_last) return 0; mi_option_desc_t* desc = &options[option]; mi_assert(desc->option == option); // index should match the option - if (mi_unlikely(desc->init == UNINIT)) { + if mi_unlikely(desc->init == UNINIT) { mi_option_init(desc); } return desc->value; diff --git a/src/os.c b/src/os.c index b7096ac6..82b38007 100644 --- a/src/os.c +++ b/src/os.c @@ -133,7 +133,7 @@ size_t _mi_os_good_alloc_size(size_t size) { else if (size < 8*MI_MiB) align_size = 256*MI_KiB; else if (size < 32*MI_MiB) align_size = 1*MI_MiB; else align_size = 4*MI_MiB; - if (mi_unlikely(size >= (SIZE_MAX - align_size))) return size; // possible overflow? + if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow? return _mi_align_up(size, align_size); } diff --git a/src/page.c b/src/page.c index fdcf5d01..357e05e1 100644 --- a/src/page.c +++ b/src/page.c @@ -130,7 +130,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS; tfreex = mi_tf_set_delayed(tfree, delay); old_delay = mi_tf_delayed(tfree); - if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) { + if mi_unlikely(old_delay == MI_DELAYED_FREEING) { mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail } @@ -198,7 +198,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { // and the local free list if (page->local_free != NULL) { - if (mi_likely(page->free == NULL)) { + if mi_likely(page->free == NULL) { // usual case page->free = page->local_free; page->local_free = NULL; @@ -400,7 +400,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { // how to check this efficiently though... // for now, we don't retire if it is the only page left of this size class. mi_page_queue_t* pq = mi_page_queue_of(page); - if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) { + if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) { if (pq->last==page && pq->first==page) { // the only page in the queue? mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); @@ -797,8 +797,8 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) { static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept { // huge allocation? const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` - if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) { - if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see ) + if mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) ) { + if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see ) _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); return NULL; } @@ -820,10 +820,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce mi_assert_internal(heap != NULL); // initialize if necessary - if (mi_unlikely(!mi_heap_is_initialized(heap))) { + if mi_unlikely(!mi_heap_is_initialized(heap)) { mi_thread_init(); // calls `_mi_heap_init` in turn heap = mi_get_default_heap(); - if (mi_unlikely(!mi_heap_is_initialized(heap))) { return NULL; } + if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; } } mi_assert_internal(mi_heap_is_initialized(heap)); @@ -835,12 +835,12 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce // find (or allocate) a page of the right size mi_page_t* page = mi_find_page(heap, size); - if (mi_unlikely(page == NULL)) { // first time out of memory, try to collect and retry the allocation once more + if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more mi_heap_collect(heap, true /* force */); page = mi_find_page(heap, size); } - if (mi_unlikely(page == NULL)) { // out of memory + if mi_unlikely(page == NULL) { // out of memory const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size); return NULL; @@ -850,7 +850,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce mi_assert_internal(mi_page_block_size(page) >= size); // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) - if (mi_unlikely(zero && page->xblock_size == 0)) { + if mi_unlikely(zero && page->xblock_size == 0) { // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. void* p = _mi_page_malloc(heap, page, size, false); mi_assert_internal(p != NULL); diff --git a/src/segment.c b/src/segment.c index 4554eba0..f9edf04a 100644 --- a/src/segment.c +++ b/src/segment.c @@ -930,8 +930,8 @@ static mi_segment_t* mi_abandoned_pop(void) { // Check efficiently if it is empty (or if the visited list needs to be moved) mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); segment = mi_tagged_segment_ptr(ts); - if (mi_likely(segment == NULL)) { - if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL + if mi_likely(segment == NULL) { + if mi_likely(!mi_abandoned_visited_revisit()) { // try to swap in the visited list on NULL return NULL; } }