better block alignment

This commit is contained in:
daanx 2024-12-10 20:32:48 -08:00
parent c478ddaab4
commit 64c4181ffa
4 changed files with 23 additions and 12 deletions

View file

@ -479,11 +479,16 @@ static inline uint8_t* mi_page_start(const mi_page_t* page) {
return page->page_start;
}
static inline uint8_t* mi_page_area(const mi_page_t* page, size_t* size) {
if (size) { *size = mi_page_block_size(page) * page->reserved; }
return mi_page_start(page);
}
static inline size_t mi_page_info_size(void) {
return _mi_align_up(sizeof(mi_page_t), MI_MAX_ALIGN_SIZE);
}
static inline bool mi_page_contains_address(const mi_page_t* page, const void* p) {
size_t psize;
uint8_t* start = mi_page_area(page, &psize);

View file

@ -323,13 +323,14 @@ typedef struct mi_page_s {
// ------------------------------------------------------
#define MI_PAGE_ALIGN MI_ARENA_SLICE_ALIGN // pages must be aligned on this for the page map.
#define MI_PAGE_MIN_BLOCK_ALIGN MI_SIZE_BITS // minimal block alignment in a page (64b on 64-bit, 32b on 32-bit)
#define MI_PAGE_MIN_START_BLOCK_ALIGN MI_MAX_ALIGN_SIZE // minimal block alignment for the first block in a page (16b)
#define MI_PAGE_MAX_START_BLOCK_ALIGN2 MI_KiB // maximal block alignment for "power of 2"-sized blocks
#define MI_PAGE_MAX_OVERALLOC_ALIGN MI_ARENA_SLICE_SIZE // (64 KiB) limit for which we overallocate in arena pages, beyond this use OS allocation
#if (MI_ENCODE_FREELIST || MI_PADDING) && MI_SIZE_SIZE == 8
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+2)*MI_PAGE_MIN_BLOCK_ALIGN) // >= sizeof(mi_page_t)
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+2)*32) // 160 >= sizeof(mi_page_t)
#else
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+1)*MI_PAGE_MIN_BLOCK_ALIGN) // >= sizeof(mi_page_t)
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+1)*32) // 128/96 >= sizeof(mi_page_t)
#endif
// The max object size are checked to not waste more than 12.5% internally over the page sizes.