From a41594060431be49db816751eda2c2bfed2fdc2e Mon Sep 17 00:00:00 2001 From: daanx Date: Sat, 4 Jan 2025 17:44:56 -0800 Subject: [PATCH] move singleton pages to the full queue at allocation time --- src/page.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/page.c b/src/page.c index e1c07a93..4388dd36 100644 --- a/src/page.c +++ b/src/page.c @@ -990,14 +990,21 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al mi_assert_internal(mi_page_block_size(page) >= size); // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) + void* p; if mi_unlikely(zero && mi_page_is_huge(page)) { // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. - void* p = _mi_page_malloc(heap, page, size); + p = _mi_page_malloc(heap, page, size); mi_assert_internal(p != NULL); _mi_memzero_aligned(p, mi_page_usable_block_size(page)); - return p; } else { - return _mi_page_malloc_zero(heap, page, size, zero); + p = _mi_page_malloc_zero(heap, page, size, zero); + mi_assert_internal(p != NULL); } + // move singleton pages to the full queue + if (page->reserved == page->used) { + mi_assert_internal(page->reserved == 1); + mi_page_to_full(page, mi_page_queue_of(page)); + } + return p; }