merge from dev3

This commit is contained in:
daanx 2024-12-22 14:40:46 -08:00
commit 6b97830f6a
8 changed files with 327 additions and 84 deletions

View file

@ -107,6 +107,26 @@ typedef int32_t mi_ssize_t;
// Define big endian if needed
// #define MI_BIG_ENDIAN 1
// maximum virtual address bits in a user-space pointer
#if MI_DEFAULT_VIRTUAL_ADDRESS_BITS > 0
#define MI_MAX_VABITS MI_DEFAULT_VIRTUAL_ADDRESS_BITS
#elif MI_ARCH_X64
#define MI_MAX_VABITS (47)
#elif MI_INTPTR_SIZE > 4
#define MI_MAX_VABITS (48)
#else
#define MI_MAX_VABITS (32)
#endif
// use a flat page-map (or a 2-level one)
#ifndef MI_PAGE_MAP_FLAT
#if MI_MAX_VABITS <= 40
#define MI_PAGE_MAP_FLAT 1
#else
#define MI_PAGE_MAP_FLAT 0
#endif
#endif
/* --------------------------------------------------------------------------------
Builtin's

View file

@ -169,6 +169,7 @@ bool _mi_page_map_init(void);
void _mi_page_map_register(mi_page_t* page);
void _mi_page_map_unregister(mi_page_t* page);
void _mi_page_map_unregister_range(void* start, size_t size);
mi_page_t* _mi_safe_ptr_page(const void* p);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
@ -422,6 +423,14 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
return (heap != &_mi_heap_empty);
}
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];
}
//static inline uintptr_t _mi_ptr_cookie(const void* p) {
// extern mi_heap_t _mi_heap_main;
// mi_assert_internal(_mi_heap_main.cookie != 0);
@ -433,48 +442,78 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
Pages
----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];
}
#if MI_PAGE_MAP_FLAT
// flat page-map committed on demand
extern uint8_t* _mi_page_map;
static inline uintptr_t _mi_page_map_index(const void* p) {
return (((uintptr_t)p) >> MI_ARENA_SLICE_SHIFT);
static inline size_t _mi_page_map_index(const void* p) {
return (size_t)((uintptr_t)p >> MI_ARENA_SLICE_SHIFT);
}
static inline mi_page_t* _mi_ptr_page_ex(const void* p, bool* valid) {
#if 1
const uintptr_t idx = _mi_page_map_index(p);
const size_t idx = _mi_page_map_index(p);
const size_t ofs = _mi_page_map[idx];
if (valid != NULL) *valid = (ofs != 0);
return (mi_page_t*)((idx - ofs + 1) << MI_ARENA_SLICE_SHIFT);
#else
const uintptr_t idx = _mi_page_map_index(p);
const uintptr_t up = idx << MI_ARENA_SLICE_SHIFT;
__builtin_prefetch((void*)up);
const size_t ofs = _mi_page_map[idx];
if (valid != NULL) *valid = (ofs != 0);
return (mi_page_t*)(up - ((ofs - 1) << MI_ARENA_SLICE_SHIFT));
#endif
if (valid != NULL) { *valid = (ofs != 0); }
return (mi_page_t*)((((uintptr_t)p >> MI_ARENA_SLICE_SHIFT) + 1 - ofs) << MI_ARENA_SLICE_SHIFT);
}
static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
bool valid;
mi_page_t* const page = _mi_ptr_page_ex(p,&valid);
mi_page_t* const page = _mi_ptr_page_ex(p, &valid);
return (valid ? page : NULL);
}
static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
return _mi_ptr_page_ex(p, NULL);
}
#else
// 2-level page map:
// The page-map is usually 4 MiB and points to sub maps of 64 KiB.
// The page-map is committed on-demand (in 64 KiB) parts (and sub-maps are committed on-demand as well)
// One sub page-map = 64 KiB => covers 2^13 * 2^16 = 2^32 = 512 MiB address space
// The page-map needs 48-16-13 = 19 bits => 2^19 sub map pointers = 4 MiB size.
// (Choosing a MI_PAGE_MAP_SUB_SHIFT of 16 gives slightly better code but will commit the initial sub-map at 512 KiB)
#define MI_PAGE_MAP_SUB_SHIFT (13)
#define MI_PAGE_MAP_SUB_COUNT (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT)
#define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT)
#define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT)
extern mi_page_t*** _mi_page_map;
static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) {
const uintptr_t u = (uintptr_t)p / MI_ARENA_SLICE_SIZE;
if (sub_idx != NULL) { *sub_idx = (uint32_t)u % MI_PAGE_MAP_SUB_COUNT; }
return (size_t)(u / MI_PAGE_MAP_SUB_COUNT);
}
static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
size_t sub_idx;
const size_t idx = _mi_page_map_index(p, &sub_idx);
return _mi_page_map[idx][sub_idx];
}
static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
size_t sub_idx;
const size_t idx = _mi_page_map_index(p, &sub_idx);
mi_page_t** const sub = _mi_page_map[idx];
if mi_unlikely(sub == NULL) return NULL;
return sub[sub_idx];
}
#endif
static inline mi_page_t* _mi_ptr_page(const void* p) {
mi_assert_internal(p==NULL || mi_is_in_heap_region(p));
#if MI_DEBUG || defined(__APPLE__)
return _mi_checked_ptr_page(p);
#else
return _mi_ptr_page_ex(p,NULL);
return _mi_unchecked_ptr_page(p);
#endif
}
@ -591,7 +630,7 @@ static inline bool mi_page_immediate_available(const mi_page_t* page) {
return (page->free != NULL);
}
// is the page not yet used up to its reserved space?
static inline bool mi_page_is_expandable(const mi_page_t* page) {
mi_assert_internal(page != NULL);