revise asserts

This commit is contained in:
daanx 2023-05-17 16:55:36 -07:00
parent 09f171b4f5
commit faac72b4ae
5 changed files with 25 additions and 13 deletions

View file

@ -27,6 +27,7 @@ Optional:
#define mi_track_align(p,alignedp,offset,size)
#define mi_track_resize(p,oldsize,newsize)
#define mi_track_realloc(p,oldsize,newp,newsize)
#define mi_track_init()
The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
@ -109,6 +110,10 @@ defined, undefined, or not accessible at all:
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
#endif
#ifndef mi_track_realloc
#define mi_track_realloc(p,oldsize,newp,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(newp,newsize,false)
#endif
#ifndef mi_track_align
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
#endif

View file

@ -51,7 +51,7 @@ static void mi_padding_init(mi_page_t* page, mi_block_t* block, size_t size) {
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_assert_internal(page->xblock_size == 0 || mi_page_block_size(page) >= size);
mi_block_t* const block = page->free;
if mi_unlikely(block == NULL) {
return _mi_malloc_generic(heap, size, zero, 0);
@ -883,6 +883,7 @@ static void* mi_heap_try_remap_zero(mi_heap_t* heap, mi_segment_t* segment, void
segment = mi_checked_ptr_segment(block, "mi_remap");
page = _mi_segment_page_of(segment, block);
mi_padding_init(page, block, newsize);
mi_track_realloc(p, size, block, newsize);
if (zero) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);

View file

@ -91,7 +91,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
{ 1024, UNINIT, MI_OPTION(remap_threshold) }, // size in KiB after which realloc starts using OS remap (0 to disable auto remap)
{ 1024,UNINIT, MI_OPTION(remap_threshold) }, // size in KiB after which realloc starts using OS remap (0 to disable auto remap)
};
static bool mi_option_is_size_in_kib(mi_option_t option) {

View file

@ -206,7 +206,7 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(queue, page));
mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_assert_internal(page->xblock_size == queue->block_size || (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_heap_t* heap = mi_page_heap(page);
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
@ -232,7 +232,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
#endif
mi_assert_internal((page->xblock_size == queue->block_size) ||
(mi_page_queue_is_huge(queue)) || // not: && page->xblock_size > MI_LARGE_OBJ_SIZE_MAX since it could be due to page alignment
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) || // not: && page->xblock_size > MI_LARGE_OBJ_SIZE_MAX since it could be due to page alignment
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)) );
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
@ -261,8 +261,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
(mi_page_is_huge(page) && mi_page_queue_is_huge(from)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(to)));
mi_heap_t* heap = mi_page_heap(page);
if (page->prev != NULL) page->prev->next = page->next;

View file

@ -157,6 +157,12 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
// mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
mi_assert_internal(segment->page_kind == MI_PAGE_HUGE ||
(mi_segment_page_size(segment) * segment->capacity == segment->segment_size));
if (segment->page_kind == MI_PAGE_HUGE) {
mi_assert_internal(segment->capacity == 1);
}
else {
mi_assert_internal(mi_segment_page_size(segment) * segment->capacity == segment->segment_size);
}
return true;
}
#endif
@ -425,7 +431,7 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
}
if (page_size != NULL) *page_size = psize;
mi_assert_internal(page->xblock_size==0 || _mi_ptr_page(p) == page);
mi_assert_internal(_mi_ptr_page(p) == page);
mi_assert_internal(_mi_ptr_segment(p) == segment);
return p;
}
@ -706,14 +712,14 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg
page->segment_in_use = false;
// zero the page data, but not the segment fields and capacity, and block_size (for page size calculations)
uint32_t block_size = page->xblock_size;
uint32_t xblock_size = page->xblock_size;
uint16_t capacity = page->capacity;
uint16_t reserved = page->reserved;
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
page->capacity = capacity;
page->reserved = reserved;
page->xblock_size = block_size;
page->xblock_size = xblock_size;
segment->used--;
// schedule purge
@ -1235,7 +1241,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
// overallocate to accommodate large alignments.
size_t psize;
uint8_t* start = _mi_segment_page_start(segment, page, 0, &psize, NULL);
page->xblock_size = MI_HUGE_BLOCK_SIZE; // ensure it goes into the huge page queue; (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
page->xblock_size = MI_HUGE_BLOCK_SIZE;
// reset the part of the page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
if (page_alignment >= MI_ALIGN_HUGE && segment->allow_decommit && page->is_committed) {
@ -1314,7 +1320,7 @@ mi_block_t* _mi_segment_huge_page_expand(mi_segment_t* segment, mi_page_t* page,
size_t psize = 0;
_mi_segment_page_start(segment, page, 0, &psize, NULL);
mi_assert_internal(psize >= newsize);
page->xblock_size = MI_HUGE_BLOCK_SIZE; // (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
page->xblock_size = MI_HUGE_BLOCK_SIZE;
return block;
}
@ -1363,7 +1369,7 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
size_t psize = 0;
_mi_segment_page_start(newsegment, newpage, 0, &psize, NULL);
mi_assert_internal(psize >= newsize);
newpage->xblock_size = MI_HUGE_BLOCK_SIZE; // (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
newpage->xblock_size = MI_HUGE_BLOCK_SIZE;
mi_assert_internal(mi_page_block_size(newpage) >= newsize);
_mi_heap_huge_page_attach(heap, newpage);