From caa5e51a67dd7c1a6efe0393a4f78986d2c9e547 Mon Sep 17 00:00:00 2001 From: Daan Leijen Date: Wed, 22 Jan 2020 11:29:32 -0800 Subject: [PATCH] align size of page_t, increase slices per segment --- include/mimalloc-types.h | 32 ++++++++++++++++++-------------- src/init.c | 3 +++ src/page.c | 4 ++-- src/segment.c | 1 + 4 files changed, 24 insertions(+), 16 deletions(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 6685b5a7..661e2856 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -89,7 +89,7 @@ terms of the MIT license. A copy of the license can be found in the file // Main tuning parameters for segment and page sizes // Sizes for 64-bit, divide by two for 32-bit #define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64kb -#define MI_SEGMENT_SHIFT ( 8 + MI_SEGMENT_SLICE_SHIFT) // 64mb +#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 64mb #define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64kb #define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512kb @@ -104,7 +104,7 @@ terms of the MIT license. A copy of the license can be found in the file #define MI_SMALL_PAGE_SIZE (1ULL<0`) - mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) + mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) volatile _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads volatile _Atomic(uintptr_t) xheap; - - struct mi_page_s* next; // next page owned by this thread with the same `block_size` - struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` + struct mi_page_s* next; // next page owned by this thread with the same `block_size` + struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` + + // 64-bit 9 words, 32-bit 12 words, (+2 for secure) + #if MI_INTPTR_SIZE==8 + uintptr_t padding[1]; + #endif } mi_page_t; diff --git a/src/init.c b/src/init.c index a0873615..e77185ff 100644 --- a/src/init.c +++ b/src/init.c @@ -28,6 +28,9 @@ const mi_page_t _mi_page_empty = { ATOMIC_VAR_INIT(0), // xthread_free ATOMIC_VAR_INIT(0), // xheap NULL, NULL + #if MI_INTPTR_SIZE==8 + , { 0 } // padding + #endif }; #define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) diff --git a/src/page.c b/src/page.c index 13706100..5b2a85f7 100644 --- a/src/page.c +++ b/src/page.c @@ -74,10 +74,10 @@ static bool mi_page_is_valid_init(mi_page_t* page) { mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->capacity <= page->reserved); - const size_t bsize = mi_page_block_size(page); mi_segment_t* segment = _mi_page_segment(page); uint8_t* start = _mi_page_start(segment,page,NULL); mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL)); + //const size_t bsize = mi_page_block_size(page); //mi_assert_internal(start + page->capacity*page->block_size == page->top); mi_assert_internal(mi_page_list_is_valid(page,page->free)); @@ -86,7 +86,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) { #if MI_DEBUG>3 // generally too expensive to check this if (page->flags.is_zero) { for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) { - mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t))); + mi_assert_expensive(mi_mem_is_zero(block + 1, bsize - sizeof(mi_block_t))); } } #endif diff --git a/src/segment.c b/src/segment.c index b3a33d60..22757968 100644 --- a/src/segment.c +++ b/src/segment.c @@ -458,6 +458,7 @@ static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_st mask >>= 1; idx++; } + mi_assert_internal(segment->decommit_mask == 0); } static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {