fix debug build

This commit is contained in:
daan 2020-01-16 03:54:51 -08:00
parent 9629d73188
commit b8072aaacb
2 changed files with 22 additions and 19 deletions

View file

@ -56,7 +56,8 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_expensive(_mi_page_is_valid(page));
return true; return true;
} }
#endif
#if MI_DEBUG>=3
static bool mi_heap_is_valid(mi_heap_t* heap) { static bool mi_heap_is_valid(mi_heap_t* heap) {
mi_assert_internal(heap!=NULL); mi_assert_internal(heap!=NULL);
mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
@ -111,7 +112,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{ {
if (!mi_heap_is_initialized(heap)) return; if (!mi_heap_is_initialized(heap)) return;
_mi_deferred_free(heap, collect > NORMAL); _mi_deferred_free(heap, collect > NORMAL);
// collect (some) abandoned pages // collect (some) abandoned pages
if (collect >= NORMAL && !heap->no_reclaim) { if (collect >= NORMAL && !heap->no_reclaim) {
if (collect == NORMAL) { if (collect == NORMAL) {
@ -123,8 +124,8 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
collect == FORCE collect == FORCE
#else #else
collect >= FORCE collect >= FORCE
#endif #endif
&& _mi_is_main_thread() && mi_heap_is_backing(heap)) && _mi_is_main_thread() && mi_heap_is_backing(heap))
{ {
// the main thread is abandoned, try to free all abandoned segments. // the main thread is abandoned, try to free all abandoned segments.
// if all memory is freed by now, all segments should be freed. // if all memory is freed by now, all segments should be freed.
@ -135,19 +136,19 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// if abandoning, mark all pages to no longer add to delayed_free // if abandoning, mark all pages to no longer add to delayed_free
if (collect == ABANDON) { if (collect == ABANDON) {
//for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) { //for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) {
// _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE // _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
//} //}
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
} }
// free thread delayed blocks. // free thread delayed blocks.
// (if abandoning, after this there are no more local references into the pages.) // (if abandoning, after this there are no more local references into the pages.)
_mi_heap_delayed_free(heap); _mi_heap_delayed_free(heap);
// collect all pages owned by this thread // collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != ABANDON || heap->thread_delayed_free == NULL ); mi_assert_internal( collect != ABANDON || heap->thread_delayed_free == NULL );
// collect segment caches // collect segment caches
if (collect >= FORCE) { if (collect >= FORCE) {
_mi_segment_thread_collect(&heap->tld->segments); _mi_segment_thread_collect(&heap->tld->segments);
@ -177,7 +178,7 @@ void mi_collect(bool force) mi_attr_noexcept {
----------------------------------------------------------- */ ----------------------------------------------------------- */
mi_heap_t* mi_heap_get_default(void) { mi_heap_t* mi_heap_get_default(void) {
mi_thread_init(); mi_thread_init();
return mi_get_default_heap(); return mi_get_default_heap();
} }
@ -198,7 +199,7 @@ mi_heap_t* mi_heap_new(void) {
heap->tld = bheap->tld; heap->tld = bheap->tld;
heap->thread_id = _mi_thread_id(); heap->thread_id = _mi_thread_id();
_mi_random_split(&bheap->random, &heap->random); _mi_random_split(&bheap->random, &heap->random);
heap->cookie = _mi_heap_random_next(heap) | 1; heap->cookie = _mi_heap_random_next(heap) | 1;
heap->key[0] = _mi_heap_random_next(heap); heap->key[0] = _mi_heap_random_next(heap);
heap->key[1] = _mi_heap_random_next(heap); heap->key[1] = _mi_heap_random_next(heap);
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
@ -226,7 +227,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
static void mi_heap_free(mi_heap_t* heap) { static void mi_heap_free(mi_heap_t* heap) {
mi_assert_internal(mi_heap_is_initialized(heap)); mi_assert_internal(mi_heap_is_initialized(heap));
if (mi_heap_is_backing(heap)) return; // dont free the backing heap if (mi_heap_is_backing(heap)) return; // dont free the backing heap
// reset default // reset default
if (mi_heap_is_default(heap)) { if (mi_heap_is_default(heap)) {
_mi_heap_set_default_direct(heap->tld->heap_backing); _mi_heap_set_default_direct(heap->tld->heap_backing);
@ -247,7 +248,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
UNUSED(pq); UNUSED(pq);
// ensure no more thread_delayed_free will be added // ensure no more thread_delayed_free will be added
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
// stats // stats
const size_t bsize = mi_page_block_size(page); const size_t bsize = mi_page_block_size(page);
@ -311,7 +312,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
if (from==NULL || from->page_count == 0) return; if (from==NULL || from->page_count == 0) return;
// unfull all full pages in the `from` heap // unfull all full pages in the `from` heap
mi_page_t* page = from->pages[MI_BIN_FULL].first; mi_page_t* page = from->pages[MI_BIN_FULL].first;
while (page != NULL) { while (page != NULL) {
mi_page_t* next = page->next; mi_page_t* next = page->next;
_mi_page_unfull(page); _mi_page_unfull(page);
@ -323,7 +324,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
_mi_heap_delayed_free(from); _mi_heap_delayed_free(from);
// transfer all pages by appending the queues; this will set // transfer all pages by appending the queues; this will set
// a new heap field which is ok as all pages are unfull'd and thus // a new heap field which is ok as all pages are unfull'd and thus
// other threads won't access this field anymore (see `mi_free_block_mt`) // other threads won't access this field anymore (see `mi_free_block_mt`)
for (size_t i = 0; i < MI_BIN_FULL; i++) { for (size_t i = 0; i < MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i]; mi_page_queue_t* pq = &heap->pages[i];
@ -334,7 +335,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
} }
mi_assert_internal(from->thread_delayed_free == NULL); mi_assert_internal(from->thread_delayed_free == NULL);
mi_assert_internal(from->page_count == 0); mi_assert_internal(from->page_count == 0);
// and reset the `from` heap // and reset the `from` heap
mi_heap_reset_pages(from); mi_heap_reset_pages(from);
} }
@ -362,7 +363,7 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
mi_assert(mi_heap_is_initialized(heap)); mi_assert(mi_heap_is_initialized(heap));
if (!mi_heap_is_initialized(heap)) return NULL; if (!mi_heap_is_initialized(heap)) return NULL;
mi_assert_expensive(mi_heap_is_valid(heap)); mi_assert_expensive(mi_heap_is_valid(heap));
mi_heap_t* old = mi_get_default_heap(); mi_heap_t* old = mi_get_default_heap();
_mi_heap_set_default_direct(heap); _mi_heap_set_default_direct(heap);
return old; return old;
} }
@ -534,4 +535,3 @@ bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_vis
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
} }

View file

@ -135,7 +135,7 @@ static size_t mi_segment_page_size(const mi_segment_t* segment) {
} }
#if (MI_DEBUG>=3) #if (MI_DEBUG>=2)
static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) { static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) {
mi_page_t* p = tld->pages_reset.first; mi_page_t* p = tld->pages_reset.first;
while (p != NULL) { while (p != NULL) {
@ -144,7 +144,9 @@ static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tl
} }
return false; return false;
} }
#endif
#if (MI_DEBUG>=3)
static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) { static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(segment != NULL); mi_assert_internal(segment != NULL);
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
@ -169,6 +171,7 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
#endif #endif
static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) { static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(page != NULL);
if (page->next != NULL || page->prev != NULL) { if (page->next != NULL || page->prev != NULL) {
mi_assert_internal(mi_pages_reset_contains(page, tld)); mi_assert_internal(mi_pages_reset_contains(page, tld));
return false; return false;
@ -1052,6 +1055,6 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
mi_reset_delayed(tld); mi_reset_delayed(tld);
mi_assert_internal(mi_page_not_in_queue(page, tld)); mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld));
return page; return page;
} }