pass all debug tests

This commit is contained in:
daanx 2024-11-30 22:54:57 -08:00
parent 9ebe941ce0
commit 8f2a5864b8
6 changed files with 50 additions and 10 deletions

View file

@ -463,7 +463,7 @@ static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
}
static inline mi_page_t* _mi_ptr_page(const void* p) {
#if MI_DEBUG
#if 1 // MI_DEBUG
return _mi_checked_ptr_page(p);
#else
return _mi_ptr_page_ex(p,NULL);

View file

@ -30,7 +30,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
{
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
if (page->block_size != 0) { // not the empty heap
mi_assert_internal(mi_page_block_size(page) >= size);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page);
}
// check the free list
mi_block_t* const block = page->free;

View file

@ -462,6 +462,9 @@ static mi_page_t* mi_arena_page_try_find_abandoned(size_t slice_count, size_t bl
mi_assert_internal(mi_page_block_size(page) == block_size);
mi_assert_internal(!mi_page_is_full(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
return page;
}
}
@ -521,6 +524,8 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
page->block_size_shift = 0;
}
_mi_page_map_register(page);
mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_block_size(page) == block_size);
mi_assert_internal(mi_page_is_abandoned(page));
@ -561,6 +566,9 @@ static mi_page_t* mi_singleton_page_alloc(mi_heap_t* heap, size_t block_size, si
mi_assert(page != NULL);
mi_assert(page->reserved == 1);
mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
return page;
}
@ -584,6 +592,11 @@ mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t block
page = mi_singleton_page_alloc(heap, block_size, block_alignment);
}
// mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(block_alignment <= MI_PAGE_MAX_OVERALLOC_ALIGN || _mi_is_aligned(mi_page_start(page), block_alignment));
return page;
}
@ -601,11 +614,14 @@ void _mi_arena_page_free(mi_page_t* page, mi_tld_t* tld) {
void _mi_arena_page_abandon(mi_page_t* page, mi_tld_t* tld) {
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(page->next==NULL);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page);
if (mi_page_all_free(page)) {
_mi_arena_page_free(page, tld);
}
else if (page->memid.memkind==MI_MEM_ARENA) {
else if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
// make available for allocations
size_t bin = _mi_bin(mi_page_block_size(page));
size_t slice_index;
@ -622,6 +638,8 @@ void _mi_arena_page_abandon(mi_page_t* page, mi_tld_t* tld) {
bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) {
if (mi_page_is_singleton(page)) { mi_assert_internal(mi_page_is_abandoned(page)); }
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page);
// if (!mi_page_is_abandoned(page)) return false; // it is not abandoned (anymore)
// note: we can access the page even it is in the meantime reclaimed by another thread since

View file

@ -219,20 +219,26 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
size = _mi_align_up(size, _mi_os_page_size());
const bool use_overalloc = (alignment > mi_os_mem_config.alloc_granularity && alignment <= size/8);
// try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
if (p == NULL) return NULL;
void* p = NULL;
if (!use_overalloc) {
p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
}
// aligned already?
if (((uintptr_t)p % alignment) == 0) {
if (p != NULL && ((uintptr_t)p % alignment) == 0) {
*base = p;
}
else {
// if not aligned, free it, overallocate, and unmap around it
#if !MI_TRACK_ASAN
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
if (!use_overalloc) {
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
}
#endif
mi_os_prim_free(p, size, commit, stats);
if (p != NULL) { mi_os_prim_free(p, size, commit, stats); }
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
const size_t over_size = size + alignment;

View file

@ -36,7 +36,9 @@ static bool mi_page_map_init(void) {
}
// commit the first part so NULL pointers get resolved without an access violation
if (!mi_page_map_all_committed) {
_mi_os_commit(_mi_page_map, _mi_os_page_size(), NULL, NULL);
bool is_zero;
_mi_os_commit(_mi_page_map, _mi_os_page_size(), &is_zero, NULL);
if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(_mi_page_map, _mi_os_page_size()); }
_mi_page_map[0] = 1; // so _mi_ptr_page(NULL) == NULL
mi_assert_internal(_mi_ptr_page(NULL)==NULL);
}
@ -51,7 +53,11 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
for (size_t i = commit_bit_idx_lo; i <= commit_bit_idx_hi; i++) { // per bit to avoid crossing over bitmap chunks
if (mi_bitmap_is_xsetN(MI_BIT_CLEAR, &mi_page_map_commit, i, 1)) {
// this may race, in which case we do multiple commits (which is ok)
_mi_os_commit(_mi_page_map + (i*mi_page_map_entries_per_commit_bit), mi_page_map_entries_per_commit_bit, NULL, NULL);
bool is_zero;
uint8_t* const start = _mi_page_map + (i*mi_page_map_entries_per_commit_bit);
const size_t size = mi_page_map_entries_per_commit_bit;
_mi_os_commit(start, size, &is_zero, NULL);
if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start,size); }
mi_bitmap_xsetN(MI_BIT_SET, &mi_page_map_commit, i, 1, NULL);
}
}
@ -69,6 +75,8 @@ static size_t mi_page_map_get_idx(mi_page_t* page, uint8_t** page_start, size_t*
void _mi_page_map_register(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_internal(_mi_is_aligned(page,MI_PAGE_ALIGN));
if mi_unlikely(_mi_page_map == NULL) {
if (!mi_page_map_init()) return;
}

View file

@ -745,7 +745,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
#if MI_STAT
count++;
#endif
#if MI_MAX_CANDIDATE_SEARCH > 1
candidate_count++;
#endif
// collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
@ -978,6 +980,8 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(mi_page_block_size(page) >= size);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page);
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
if mi_unlikely(zero && mi_page_is_huge(page)) {