merge from dev

This commit is contained in:
Daan Leijen 2022-04-19 19:55:03 -07:00
commit 413141ae29
14 changed files with 70 additions and 67 deletions

View file

@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* const block = page->free;
if (mi_unlikely(block == NULL)) {
if mi_unlikely(block == NULL) {
return _mi_malloc_generic(heap, size, zero);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
@ -38,9 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
// zero the block?
if (mi_unlikely(zero)) {
if mi_unlikely(zero) {
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks
const size_t zsize = (mi_unlikely(page->is_zero) ? sizeof(block->next) : page->xblock_size);
const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
_mi_memzero_aligned(block, zsize);
}
@ -108,7 +108,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
// The main allocation function
mi_decl_nodiscard extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
return mi_heap_malloc_small_zero(heap, size, zero);
}
else {
@ -356,7 +356,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
do {
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
if (mi_unlikely(use_delayed)) {
if mi_unlikely(use_delayed) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
}
@ -367,7 +367,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
}
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
if (mi_unlikely(use_delayed)) {
if mi_unlikely(use_delayed) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
mi_assert_internal(heap != NULL);
@ -393,9 +393,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
// and push it on the free list
if (mi_likely(local)) {
if mi_likely(local) {
// owning thread can free a block directly
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
@ -403,10 +403,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
if (mi_unlikely(mi_page_all_free(page))) {
if mi_unlikely(mi_page_all_free(page)) {
_mi_page_retire(page);
}
else if (mi_unlikely(mi_page_is_in_full(page))) {
else if mi_unlikely(mi_page_is_in_full(page)) {
_mi_page_unfull(page);
}
}
@ -439,26 +439,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
{
MI_UNUSED(msg);
#if (MI_DEBUG>0)
if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
#endif
mi_segment_t* const segment = _mi_ptr_segment(p);
if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
#if (MI_DEBUG>0)
if (mi_unlikely(!mi_is_in_heap_region(p))) {
if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
#endif
#if (MI_DEBUG>0 || MI_SECURE>=4)
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
return NULL;
}
@ -470,15 +470,15 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
void mi_free(void* p) mi_attr_noexcept
{
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
if (mi_unlikely(segment == NULL)) return;
if mi_unlikely(segment == NULL) return;
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* block = (mi_block_t*)(p);
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
if mi_unlikely(mi_check_is_double_free(page,block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0)
@ -486,7 +486,7 @@ void mi_free(void* p) mi_attr_noexcept
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
_mi_page_retire(page);
}
}
@ -532,7 +532,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
if (segment==NULL) return 0; // also returns 0 if `p == NULL`
const mi_page_t* const page = _mi_segment_page_of(segment, p);
if (mi_likely(!mi_page_has_aligned(page))) {
if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
}
@ -627,18 +627,18 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
if (mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0)) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
// todo: adjust potential padding to reflect the new size?
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
if (mi_likely(newp != NULL)) {
if mi_likely(newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start);
}
if (mi_likely(p != NULL)) {
if mi_likely(p != NULL) {
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
mi_free(p); // only free the original pointer if successful
}
@ -863,13 +863,13 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
if mi_unlikely(p == NULL) return mi_try_new(size,false);
return p;
}
mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
if mi_unlikely(p == NULL) return mi_try_new(size, true);
return p;
}
@ -893,7 +893,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
size_t total;
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
@ -912,7 +912,7 @@ mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
size_t total;
if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}