merge from dev

This commit is contained in:
daan 2020-01-20 19:06:08 -08:00
commit 394a7a92ab
92 changed files with 2994 additions and 1010 deletions

View file

@ -34,7 +34,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
mi_page_t* page = pq->first;
while(page != NULL) {
mi_page_t* next = page->next; // save next in case the page gets removed from the queue
mi_assert_internal(page->heap == heap);
mi_assert_internal(mi_page_heap(page) == heap);
count++;
if (!fn(heap, pq, page, arg1, arg2)) return false;
page = next; // and continue
@ -50,13 +50,14 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
UNUSED(arg1);
UNUSED(arg2);
UNUSED(pq);
mi_assert_internal(page->heap == heap);
mi_assert_internal(mi_page_heap(page) == heap);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(segment->thread_id == heap->thread_id);
mi_assert_expensive(_mi_page_is_valid(page));
return true;
}
#endif
#if MI_DEBUG>=3
static bool mi_heap_is_valid(mi_heap_t* heap) {
mi_assert_internal(heap!=NULL);
mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
@ -111,20 +112,25 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{
if (!mi_heap_is_initialized(heap)) return;
_mi_deferred_free(heap, collect > NORMAL);
// collect (some) abandoned pages
if (collect >= NORMAL && !heap->no_reclaim) {
if (collect == NORMAL) {
// this may free some segments (but also take ownership of abandoned pages)
_mi_segment_try_reclaim_abandoned(heap, false, &heap->tld->segments);
}
#if MI_DEBUG
else if (collect == ABANDON && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
else if (
#ifdef NDEBUG
collect == FORCE
#else
collect >= FORCE
#endif
&& _mi_is_main_thread() && mi_heap_is_backing(heap))
{
// the main thread is abandoned, try to free all abandoned segments.
// if all memory is freed by now, all segments should be freed.
_mi_segment_try_reclaim_abandoned(heap, true, &heap->tld->segments);
}
#endif
}
// if abandoning, mark all pages to no longer add to delayed_free
@ -193,7 +199,7 @@ mi_heap_t* mi_heap_new(void) {
heap->tld = bheap->tld;
heap->thread_id = _mi_thread_id();
_mi_random_split(&bheap->random, &heap->random);
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->key[0] = _mi_heap_random_next(heap);
heap->key[1] = _mi_heap_random_next(heap);
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
@ -242,28 +248,30 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
UNUSED(pq);
// ensure no more thread_delayed_free will be added
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
// stats
if (page->block_size > MI_MEDIUM_OBJ_SIZE_MAX) {
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&heap->tld->stats.large,page->block_size);
const size_t bsize = mi_page_block_size(page);
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&heap->tld->stats.large,bsize);
}
else {
_mi_stat_decrease(&heap->tld->stats.huge, page->block_size);
_mi_stat_decrease(&heap->tld->stats.huge, bsize);
}
}
#if (MI_STAT>1)
size_t inuse = page->used - page->thread_freed;
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap,normal[_mi_bin(page->block_size)], inuse);
#if (MI_STAT>1)
_mi_page_free_collect(page, false); // update used count
const size_t inuse = page->used;
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], inuse);
}
mi_heap_stat_decrease(heap,malloc, page->block_size * inuse); // todo: off for aligned blocks...
#endif
mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
#endif
// pretend it is all free now
mi_assert_internal(page->thread_freed<=0xFFFF);
page->used = (uint16_t)page->thread_freed;
/// pretend it is all free now
mi_assert_internal(mi_page_thread_free(page) == NULL);
page->used = 0;
// and free the page
_mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
@ -355,7 +363,7 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
mi_assert(mi_heap_is_initialized(heap));
if (!mi_heap_is_initialized(heap)) return NULL;
mi_assert_expensive(mi_heap_is_valid(heap));
mi_heap_t* old = mi_get_default_heap();
mi_heap_t* old = mi_get_default_heap();
_mi_heap_set_default_direct(heap);
return old;
}
@ -374,7 +382,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(valid);
if (mi_unlikely(!valid)) return NULL;
return _mi_segment_page_of(segment,p)->heap;
return mi_page_heap(_mi_segment_page_of(segment,p));
}
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
@ -390,7 +398,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
bool* found = (bool*)vfound;
mi_segment_t* segment = _mi_page_segment(page);
void* start = _mi_page_start(segment, page, NULL);
void* end = (uint8_t*)start + (page->capacity * page->block_size);
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
*found = (p >= start && p < end);
return (!*found); // continue if not found
}
@ -432,13 +440,14 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
mi_assert_internal(page->local_free == NULL);
if (page->used == 0) return true;
const size_t bsize = mi_page_block_size(page);
size_t psize;
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
if (page->capacity == 1) {
// optimize page with one block
mi_assert_internal(page->used == 1 && page->free == NULL);
return visitor(page->heap, area, pstart, page->block_size, arg);
return visitor(mi_page_heap(page), area, pstart, bsize, arg);
}
// create a bitmap of free blocks.
@ -451,8 +460,8 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
free_count++;
mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
size_t offset = (uint8_t*)block - pstart;
mi_assert_internal(offset % page->block_size == 0);
size_t blockidx = offset / page->block_size; // Todo: avoid division?
mi_assert_internal(offset % bsize == 0);
size_t blockidx = offset / bsize; // Todo: avoid division?
mi_assert_internal( blockidx < MI_MAX_BLOCKS);
size_t bitidx = (blockidx / sizeof(uintptr_t));
size_t bit = blockidx - (bitidx * sizeof(uintptr_t));
@ -471,8 +480,8 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
}
else if ((m & ((uintptr_t)1 << bit)) == 0) {
used_count++;
uint8_t* block = pstart + (i * page->block_size);
if (!visitor(page->heap, area, block, page->block_size, arg)) return false;
uint8_t* block = pstart + (i * bsize);
if (!visitor(mi_page_heap(page), area, block, bsize, arg)) return false;
}
}
mi_assert_internal(page->used == used_count);
@ -487,12 +496,13 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
UNUSED(pq);
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
mi_heap_area_ex_t xarea;
const size_t bsize = mi_page_block_size(page);
xarea.page = page;
xarea.area.reserved = page->reserved * page->block_size;
xarea.area.committed = page->capacity * page->block_size;
xarea.area.reserved = page->reserved * bsize;
xarea.area.committed = page->capacity * bsize;
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
xarea.area.used = page->used - page->thread_freed; // race is ok
xarea.area.block_size = page->block_size;
xarea.area.used = page->used;
xarea.area.block_size = bsize;
return fun(heap, &xarea, arg);
}