diff --git a/src/alloc.c b/src/alloc.c index 91c13c31..2dd0ae9b 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -115,6 +115,21 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc mi_thread_free_t tfreex; bool use_delayed; + mi_segment_t* segment = _mi_page_segment(page); + if (segment->kind==MI_SEGMENT_HUGE) { + // huge page segments are always abandoned and can be freed immediately + mi_assert_internal(segment->thread_id==0); + mi_assert_internal(segment->abandoned_next==NULL); + // claim it and immediately free it + mi_block_set_next(page, block, page->free); + page->free = block; + page->used--; + mi_heap_t* heap = mi_get_default_heap(); + segment->thread_id = heap->thread_id; + _mi_segment_page_free(page,true,&heap->tld->segments); + return; + } + do { tfree = page->thread_free; use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE || diff --git a/src/page-queue.c b/src/page-queue.c index 0fb46ffa..8d4b4e51 100644 --- a/src/page-queue.c +++ b/src/page-queue.c @@ -268,6 +268,7 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { mi_assert_internal(page->heap == NULL); mi_assert_internal(!mi_page_queue_contains(queue, page)); + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); mi_assert_internal(page->block_size == queue->block_size || (page->block_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); diff --git a/src/page.c b/src/page.c index 7e7bd5aa..313bb66a 100644 --- a/src/page.c +++ b/src/page.c @@ -98,10 +98,12 @@ bool _mi_page_is_valid(mi_page_t* page) { if (page->heap!=NULL) { mi_segment_t* segment = _mi_page_segment(page); mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == page->heap->thread_id); - mi_page_queue_t* pq = mi_page_queue_of(page); - mi_assert_internal(mi_page_queue_contains(pq, page)); - mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page)); - mi_assert_internal(mi_heap_contains_queue(page->heap,pq)); + if (segment->kind != MI_SEGMENT_HUGE) { + mi_page_queue_t* pq = mi_page_queue_of(page); + mi_assert_internal(mi_page_queue_contains(pq, page)); + mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page)); + mi_assert_internal(mi_heap_contains_queue(page->heap,pq)); + } } return true; } @@ -207,6 +209,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); mi_assert_internal(page->heap == NULL); + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); _mi_page_free_collect(page,false); mi_page_queue_t* pq = mi_page_queue(heap, page->block_size); mi_page_queue_push(heap, pq, page); @@ -215,12 +218,13 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { // allocate a fresh page from a segment static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) { - mi_assert_internal(mi_heap_contains_queue(heap, pq)); + mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq)); mi_page_t* page = _mi_segment_page_alloc(block_size, &heap->tld->segments, &heap->tld->os); if (page == NULL) return NULL; + mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE); mi_page_init(heap, page, block_size, &heap->tld->stats); _mi_stat_increase( &heap->tld->stats.pages, 1); - mi_page_queue_push(heap, pq, page); + if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL mi_assert_expensive(_mi_page_is_valid(page)); return page; } @@ -701,16 +705,26 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept { General allocation ----------------------------------------------------------- */ -// Large and huge pages are allocated directly without being in a queue +// Large and huge pages are allocated directly static mi_page_t* mi_large_page_alloc(mi_heap_t* heap, size_t size) { size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t); - mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE); - mi_page_queue_t* pq = mi_page_queue(heap,block_size); - mi_assert_internal(mi_page_queue_is_huge(pq)); + mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE); + bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX); + mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap,block_size)); mi_page_t* page = mi_page_fresh_alloc(heap,pq,block_size); if (page != NULL) { mi_assert_internal(mi_page_immediate_available(page)); mi_assert_internal(page->block_size == block_size); + if (pq == NULL) { + // huge pages are directly abandoned + mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); + mi_assert_internal(_mi_page_segment(page)->used==1); + mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue + page->heap = NULL; + } + else { + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + } if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) { _mi_stat_increase(&heap->tld->stats.large, block_size); _mi_stat_counter_increase(&heap->tld->stats.large_count, 1); @@ -719,7 +733,7 @@ static mi_page_t* mi_large_page_alloc(mi_heap_t* heap, size_t size) { _mi_stat_increase(&heap->tld->stats.huge, block_size); _mi_stat_counter_increase(&heap->tld->stats.huge_count, 1); } - } + } return page; } diff --git a/src/segment.c b/src/segment.c index 54de294f..4a52f5a4 100644 --- a/src/segment.c +++ b/src/segment.c @@ -915,6 +915,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld if (segment == NULL || page==NULL) return NULL; mi_assert_internal(segment->used==1); mi_assert_internal(page->block_size >= size); + segment->thread_id = 0; // huge segments are immediately abandoned return page; }