revert back to flat address map

This commit is contained in:
daanx 2024-12-22 12:18:53 -08:00
parent c9b2d31665
commit 93fa8d895a
4 changed files with 66 additions and 263 deletions

View file

@ -68,14 +68,6 @@ typedef int32_t mi_ssize_t;
#define MI_MiB (MI_KiB*MI_KiB)
#define MI_GiB (MI_MiB*MI_KiB)
#if MI_INTPTR_SIZE > 4
#define MI_MAX_VABITS (48)
#define MI_PAGE_MAP_FLAT 0
#else
#define MI_MAX_VABITS (32)
#define MI_PAGE_MAP_FLAT 1
#endif
/* --------------------------------------------------------------------------------
Architecture

View file

@ -169,6 +169,7 @@ bool _mi_page_map_init(void);
void _mi_page_map_register(mi_page_t* page);
void _mi_page_map_unregister(mi_page_t* page);
void _mi_page_map_unregister_range(void* start, size_t size);
mi_page_t* _mi_safe_ptr_page(const void* p);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
@ -441,29 +442,18 @@ static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t si
Pages
----------------------------------------------------------- */
#if MI_PAGE_MAP_FLAT
// flat page-map committed on demand
extern uint8_t* _mi_page_map;
static inline uintptr_t _mi_page_map_index(const void* p) {
return (((uintptr_t)p) >> MI_ARENA_SLICE_SHIFT);
static inline size_t _mi_page_map_index(const void* p) {
return (size_t)((uintptr_t)p >> MI_ARENA_SLICE_SHIFT);
}
static inline mi_page_t* _mi_ptr_page_ex(const void* p, bool* valid) {
#if 1
const uintptr_t idx = _mi_page_map_index(p);
const size_t idx = _mi_page_map_index(p);
const size_t ofs = _mi_page_map[idx];
if (valid != NULL) *valid = (ofs != 0);
return (mi_page_t*)((idx - ofs + 1) << MI_ARENA_SLICE_SHIFT);
#else
const uintptr_t idx = _mi_page_map_index(p);
const uintptr_t up = idx << MI_ARENA_SLICE_SHIFT;
__builtin_prefetch((void*)up);
const size_t ofs = _mi_page_map[idx];
if (valid != NULL) *valid = (ofs != 0);
return (mi_page_t*)(up - ((ofs - 1) << MI_ARENA_SLICE_SHIFT));
#endif
if (valid != NULL) { *valid = (ofs != 0); }
return (mi_page_t*)((((uintptr_t)p >> MI_ARENA_SLICE_SHIFT) + 1 - ofs) << MI_ARENA_SLICE_SHIFT);
}
static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
@ -476,49 +466,10 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
return _mi_ptr_page_ex(p, NULL);
}
#else
// 2-level page map
// one page-map directory = 64 KiB => covers 2^16 * 2^16 = 2^32 = 4 GiB address space
// the page-map needs 48-16-16 = 16 bits => 2^16 map directories = 2^16 * 2^3 = 2^19 = 512 KiB size.
// we commit the page-map directories on-demand. (2^16 * 2^16 = 2^32 ~= 4 GiB needed to cover 256 TeB)
#define MI_PAGE_MAP_SUB_SHIFT (16) // 64 KiB
#define MI_PAGE_MAP_SUB_SIZE (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT)
#define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT)
#define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT)
extern uint8_t** _mi_page_map;
static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) {
const uintptr_t u = (uintptr_t)p / MI_ARENA_SLICE_SIZE;
if (sub_idx != NULL) { *sub_idx = (uint32_t)u % MI_PAGE_MAP_SUB_SIZE; }
return (size_t)(u / MI_PAGE_MAP_COUNT);
}
static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
const uintptr_t u = (uintptr_t)p / MI_ARENA_SLICE_SIZE;
const uint8_t* const sub = _mi_page_map[u / MI_PAGE_MAP_COUNT];
const uint8_t ofs = sub[(uint32_t)u % MI_PAGE_MAP_SUB_SIZE];
return (mi_page_t*)((u - ofs + 1) * MI_ARENA_SLICE_SIZE);
}
static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
const uintptr_t u = (uintptr_t)p / MI_ARENA_SLICE_SIZE;
const uint8_t* const sub = _mi_page_map[u / MI_PAGE_MAP_COUNT];
//if mi_unlikely(sub == NULL) { return NULL; }
const uint8_t ofs = sub[(uint32_t)u % MI_PAGE_MAP_SUB_SIZE];
//if mi_unlikely(ofs == 0) { return NULL; }
return (mi_page_t*)((u - ofs + 1) * MI_ARENA_SLICE_SIZE);
}
#endif
static inline mi_page_t* _mi_ptr_page(const void* p) {
mi_assert_internal(p==NULL || mi_is_in_heap_region(p));
#if MI_DEBUG || defined(__APPLE__)
return _mi_checked_ptr_page(p);
return _mi_checked_ptr_page(p);
#else
return _mi_unchecked_ptr_page(p);
#endif
@ -637,7 +588,7 @@ static inline bool mi_page_immediate_available(const mi_page_t* page) {
return (page->free != NULL);
}
// is the page not yet used up to its reserved space?
static inline bool mi_page_is_expandable(const mi_page_t* page) {
mi_assert_internal(page != NULL);