diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index cfbd9782..3042e6f9 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -92,7 +92,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // void _mi_heap_delayed_free(mi_heap_t* heap); void _mi_heap_collect_retired(mi_heap_t* heap, bool force); -void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay); +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); void _mi_deferred_free(mi_heap_t* heap, bool force); diff --git a/src/heap.c b/src/heap.c index 963cb982..5c1f8d38 100644 --- a/src/heap.c +++ b/src/heap.c @@ -103,7 +103,7 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq UNUSED(arg2); UNUSED(heap); UNUSED(pq); - _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE); + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); return true; // don't break } @@ -242,7 +242,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ UNUSED(pq); // ensure no more thread_delayed_free will be added - _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE); + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); // stats if (page->block_size > MI_LARGE_OBJ_SIZE_MAX) { diff --git a/src/page.c b/src/page.c index 78570ab0..7491bd61 100644 --- a/src/page.c +++ b/src/page.c @@ -119,7 +119,7 @@ bool _mi_page_is_valid(mi_page_t* page) { } #endif -void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay) { +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { mi_thread_free_t tfree; mi_thread_free_t tfreex; mi_delayed_t old_delay; @@ -133,11 +133,13 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay) { else if (delay == old_delay) { break; // avoid atomic operation if already equal } + else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) { + break; // leave never set + } } while ((old_delay == MI_DELAYED_FREEING) || !mi_atomic_cas_weak(mi_atomic_cast(uintptr_t, &page->thread_free), tfreex, tfree)); } - /* ----------------------------------------------------------- Page collect the `local_free` and `thread_free` lists ----------------------------------------------------------- */ @@ -229,9 +231,12 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_internal(page->heap == NULL); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(!page->is_reset); + mi_assert_internal(mi_tf_delayed(page->thread_free) == MI_NEVER_DELAYED_FREE); _mi_page_free_collect(page,false); mi_page_queue_t* pq = mi_page_queue(heap, page->block_size); mi_page_queue_push(heap, pq, page); + mi_assert_internal(page->heap != NULL); + _mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE, true); // override never (after push so heap is set) mi_assert_expensive(_mi_page_is_valid(page)); } @@ -308,7 +313,7 @@ void _mi_page_unfull(mi_page_t* page) { mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_is_in_full(page)); - _mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE); + _mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE, false); if (!mi_page_is_in_full(page)) return; mi_heap_t* heap = page->heap; @@ -324,7 +329,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { mi_assert_internal(!mi_page_immediate_available(page)); mi_assert_internal(!mi_page_is_in_full(page)); - _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE); + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false); if (mi_page_is_in_full(page)) return; mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);