merge from dev

This commit is contained in:
Daan Leijen 2023-03-29 12:31:49 -07:00
commit 42c8015cbc
7 changed files with 33 additions and 15 deletions

View file

@ -50,7 +50,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
_mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
}
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
if (!page->is_zero && !zero && !mi_page_is_huge(page)) {
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
}
@ -401,7 +401,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
}
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
}
@ -455,7 +455,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// owning thread can free a block directly
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
}
@ -551,7 +551,7 @@ void mi_free(void* p) mi_attr_noexcept
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned

View file

@ -403,7 +403,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
else _mi_stat_decrease(&stats->reset, csize);
if (!reset) return true; // nothing to do on unreset!
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED // && !MI_TSAN
if (MI_SECURE==0) {
memset(start, 0, csize); // pretend it is eagerly reset
}

View file

@ -92,12 +92,12 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
}
#endif
#if !MI_TSAN
#if !MI_TRACK_ENABLED && !MI_TSAN
mi_block_t* tfree = mi_page_thread_free(page);
mi_assert_internal(mi_page_list_is_valid(page, tfree));
#endif
//size_t tfree_count = mi_page_list_count(page, tfree);
//mi_assert_internal(tfree_count <= page->thread_freed + 1);
#endif
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
mi_assert_internal(page->used + free_count == page->capacity);

View file

@ -315,7 +315,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
}
mi_assert_internal(!_mi_bitmap_is_any_claimed(&region->reset, 1, blocks, bit_idx));
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED // && !MI_TSAN
if (*commit) { ((uint8_t*)p)[0] = 0; }
#endif
@ -361,7 +361,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t align_offset,
if (p != NULL) {
mi_assert_internal(((uintptr_t)p + align_offset) % alignment == 0);
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED // && !MI_TSAN
if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed
#endif
}