mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-08 00:09:31 +03:00
better block alignment
This commit is contained in:
parent
c478ddaab4
commit
64c4181ffa
4 changed files with 23 additions and 12 deletions
|
@ -479,11 +479,16 @@ static inline uint8_t* mi_page_start(const mi_page_t* page) {
|
|||
return page->page_start;
|
||||
}
|
||||
|
||||
|
||||
static inline uint8_t* mi_page_area(const mi_page_t* page, size_t* size) {
|
||||
if (size) { *size = mi_page_block_size(page) * page->reserved; }
|
||||
return mi_page_start(page);
|
||||
}
|
||||
|
||||
static inline size_t mi_page_info_size(void) {
|
||||
return _mi_align_up(sizeof(mi_page_t), MI_MAX_ALIGN_SIZE);
|
||||
}
|
||||
|
||||
static inline bool mi_page_contains_address(const mi_page_t* page, const void* p) {
|
||||
size_t psize;
|
||||
uint8_t* start = mi_page_area(page, &psize);
|
||||
|
|
|
@ -323,13 +323,14 @@ typedef struct mi_page_s {
|
|||
// ------------------------------------------------------
|
||||
|
||||
#define MI_PAGE_ALIGN MI_ARENA_SLICE_ALIGN // pages must be aligned on this for the page map.
|
||||
#define MI_PAGE_MIN_BLOCK_ALIGN MI_SIZE_BITS // minimal block alignment in a page (64b on 64-bit, 32b on 32-bit)
|
||||
#define MI_PAGE_MIN_START_BLOCK_ALIGN MI_MAX_ALIGN_SIZE // minimal block alignment for the first block in a page (16b)
|
||||
#define MI_PAGE_MAX_START_BLOCK_ALIGN2 MI_KiB // maximal block alignment for "power of 2"-sized blocks
|
||||
#define MI_PAGE_MAX_OVERALLOC_ALIGN MI_ARENA_SLICE_SIZE // (64 KiB) limit for which we overallocate in arena pages, beyond this use OS allocation
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING) && MI_SIZE_SIZE == 8
|
||||
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+2)*MI_PAGE_MIN_BLOCK_ALIGN) // >= sizeof(mi_page_t)
|
||||
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+2)*32) // 160 >= sizeof(mi_page_t)
|
||||
#else
|
||||
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+1)*MI_PAGE_MIN_BLOCK_ALIGN) // >= sizeof(mi_page_t)
|
||||
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+1)*32) // 128/96 >= sizeof(mi_page_t)
|
||||
#endif
|
||||
|
||||
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
||||
|
|
|
@ -20,7 +20,9 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
|
|||
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
|
||||
if (alignment > size) return false;
|
||||
const size_t bsize = mi_good_size(size);
|
||||
return (bsize <= MI_PAGE_MIN_BLOCK_ALIGN && (bsize & (alignment-1)) == 0);
|
||||
const bool ok = (bsize <= MI_PAGE_MAX_START_BLOCK_ALIGN2 && _mi_is_power_of_two(bsize));
|
||||
if (ok) { mi_assert_internal((bsize & (alignment-1)) == 0); } // since both power of 2 and alignment <= size
|
||||
return ok;
|
||||
}
|
||||
|
||||
#if MI_GUARDED
|
||||
|
|
19
src/arena.c
19
src/arena.c
|
@ -621,25 +621,28 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
|
|||
}
|
||||
}
|
||||
#endif
|
||||
if (MI_PAGE_INFO_SIZE < _mi_align_up(sizeof(*page), MI_PAGE_MIN_BLOCK_ALIGN)) {
|
||||
_mi_error_message(EFAULT, "fatal internal error: MI_PAGE_INFO_SIZE is too small.\n");
|
||||
};
|
||||
mi_assert(MI_PAGE_INFO_SIZE >= mi_page_info_size());
|
||||
size_t block_start;
|
||||
#if MI_GUARDED
|
||||
// in a guarded build, we aling pages with blocks a multiple of an OS page size, to the OS page size
|
||||
// in a guarded build, we align pages with blocks a multiple of an OS page size, to the OS page size
|
||||
// this ensures that all blocks in such pages are OS page size aligned (which is needed for the guard pages)
|
||||
const size_t os_page_size = _mi_os_page_size();
|
||||
mi_assert_internal(MI_PAGE_ALIGN >= os_page_size);
|
||||
if (block_size % os_page_size == 0) {
|
||||
block_start = _mi_align_up(MI_PAGE_INFO_SIZE, os_page_size);
|
||||
if (block_size % os_page_size == 0 && block_size > os_page_size /* at least 2 or more */ ) {
|
||||
block_start = _mi_align_up(_mi_page_info_size(), os_page_size);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (os_align) {
|
||||
block_start = MI_PAGE_ALIGN;
|
||||
}
|
||||
else if (_mi_is_power_of_two(block_size) && block_size <= MI_PAGE_MAX_START_BLOCK_ALIGN2) {
|
||||
// naturally align all power-of-2 blocks
|
||||
block_start = _mi_align_up(mi_page_info_size(), block_size);
|
||||
}
|
||||
else {
|
||||
block_start = MI_PAGE_INFO_SIZE;
|
||||
// otherwise start after the info
|
||||
block_start = mi_page_info_size();
|
||||
}
|
||||
const size_t reserved = (os_align ? 1 : (mi_size_of_slices(slice_count) - block_start) / block_size);
|
||||
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
||||
|
@ -691,7 +694,7 @@ static mi_page_t* mi_singleton_page_alloc(mi_heap_t* heap, size_t block_size, si
|
|||
const mi_arena_id_t req_arena_id = heap->arena_id;
|
||||
mi_tld_t* const tld = heap->tld;
|
||||
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
|
||||
const size_t info_size = (os_align ? MI_PAGE_ALIGN : MI_PAGE_INFO_SIZE);
|
||||
const size_t info_size = (os_align ? MI_PAGE_ALIGN : mi_page_info_size());
|
||||
const size_t slice_count = mi_slice_count_of_size(info_size + block_size);
|
||||
|
||||
mi_page_t* page = mi_arena_page_alloc_fresh(slice_count, block_size, block_alignment, req_arena_id, tld);
|
||||
|
|
Loading…
Add table
Reference in a new issue