wip: can run initial test

This commit is contained in:
daanx 2024-11-29 17:50:37 -08:00
parent e0152ab82f
commit 0f635413d6
6 changed files with 25 additions and 17 deletions

View file

@ -447,7 +447,7 @@ static inline mi_page_t* _mi_ptr_page(const void* p) {
#if MI_DEBUG
if mi_unlikely(ofs==0) return MI_PAGE_PTR_INVALID;
#endif
return (mi_page_t*)((up + ofs - 1) << MI_ARENA_BLOCK_SHIFT);
return (mi_page_t*)((up + ofs + 1) << MI_ARENA_BLOCK_SHIFT);
}
@ -663,7 +663,8 @@ We also pass a separate `null` value to be used as `NULL` or otherwise
------------------------------------------------------------------- */
static inline bool mi_is_in_same_page(const void* p, const void* q) {
return (_mi_ptr_page(p) == _mi_ptr_page(q));
// return (_mi_ptr_page(p) == _mi_ptr_page(q));
return ((uintptr_t)p / MI_LARGE_PAGE_SIZE) == ((uintptr_t)q / MI_LARGE_PAGE_SIZE);
}
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {

View file

@ -415,7 +415,8 @@ void* _mi_arena_alloc_aligned(
}
// fall back to the OS
return mi_arena_os_alloc_aligned(size, alignment, align_offset, commit, allow_large, req_arena_id, memid, tld);
void* p = mi_arena_os_alloc_aligned(size, alignment, align_offset, commit, allow_large, req_arena_id, memid, tld);
return p;
}
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_tld_t* tld)
@ -498,6 +499,7 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t block_count, size_t block_siz
else {
page->block_size_shift = 0;
}
_mi_page_map_register(page);
mi_assert_internal(mi_page_block_size(page) == block_size);
mi_assert_internal(mi_page_is_abandoned(page));
@ -564,12 +566,13 @@ static uint8_t* mi_arena_page_allocated_area(mi_page_t* page, size_t* psize) {
const size_t diff = pstart - (uint8_t*)page;
const size_t size = _mi_align_up(page_size + diff, MI_ARENA_BLOCK_SIZE);
if (psize != NULL) { *psize = size; }
return pstart;
return (uint8_t*)page;
}
void _mi_arena_page_free(mi_page_t* page, mi_tld_t* tld) {
size_t size;
uint8_t* pstart = mi_arena_page_allocated_area(page, &size);
_mi_page_map_unregister(page);
_mi_arena_free(pstart, size, size, page->memid, &tld->stats);
}
@ -1110,7 +1113,7 @@ static void mi_arenas_try_purge(bool force, bool visit_all, mi_stats_t* stats) {
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
if (max_arena == 0) return;
_mi_error_message(EFAULT, "purging not yet implemented\n");
// _mi_error_message(EFAULT, "purging not yet implemented\n");
MI_UNUSED(stats);
MI_UNUSED(visit_all);
MI_UNUSED(force);

View file

@ -144,7 +144,7 @@ static bool mi_bitmap_chunk_try_xset8(mi_bit_t set, mi_bitmap_chunk_t* chunk, si
// Set/clear a sequence of `n` bits within a chunk. Returns true if all bits transitioned from 0 to 1 (or 1 to 0)
static bool mi_bitmap_chunk_xsetN(mi_bit_t set, mi_bitmap_chunk_t* chunk, size_t cidx, size_t n, bool* palready_xset) {
mi_assert_internal(cidx + n < MI_BITMAP_CHUNK_BITS);
mi_assert_internal(cidx + n <= MI_BITMAP_CHUNK_BITS);
mi_assert_internal(n>0);
bool all_transition = true;
bool all_already_xset = true;

View file

@ -130,7 +130,7 @@ static mi_decl_cache_align mi_subproc_t mi_subproc_default;
static mi_decl_cache_align mi_tld_t tld_main = {
0, false,
&_mi_heap_main, &_mi_heap_main,
NULL, // subproc
&mi_subproc_default, // subproc
0, // tseq
{ 0, &tld_main.stats }, // os
{ MI_STATS_NULL } // stats

View file

@ -11,7 +11,7 @@ terms of the MIT license. A copy of the license can be found in the file
mi_decl_cache_align signed char* _mi_page_map = NULL;
static bool mi_page_map_all_committed = false;
static size_t mi_blocks_per_commit_bit = 1;
static size_t mi_size_per_commit_bit = MI_ARENA_BLOCK_SIZE;
static mi_memid_t mi_page_map_memid;
static mi_bitmap_t mi_page_map_commit;
@ -20,13 +20,12 @@ static bool mi_page_map_init(void) {
if (vbits >= 48) vbits = 47;
// 1 byte per block = 2 GiB for 128 TiB address space (48 bit = 256 TiB address space)
// 64 KiB for 4 GiB address space (on 32-bit)
const size_t page_map_size = (MI_ZU(1) << (vbits >> MI_ARENA_BLOCK_SHIFT));
const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_BLOCK_SHIFT));
const size_t min_commit_size = _mi_divide_up(page_map_size,MI_BITMAP_MAX_BITS);
mi_blocks_per_commit_bit = mi_block_count_of_size(min_commit_size);
mi_size_per_commit_bit = _mi_divide_up(page_map_size,MI_BITMAP_MAX_BITS);
mi_page_map_all_committed = _mi_os_has_overcommit(); // commit on-access on Linux systems
_mi_page_map = (int8_t*)_mi_os_alloc_aligned(page_map_size, 0, mi_page_map_all_committed, true, &mi_page_map_memid, NULL);
_mi_page_map = (int8_t*)_mi_os_alloc_aligned(page_map_size, 1, mi_page_map_all_committed, true, &mi_page_map_memid, NULL);
if (_mi_page_map==NULL) {
_mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB);
return false;
@ -38,6 +37,7 @@ static bool mi_page_map_init(void) {
// commit the first part so NULL pointers get resolved without an access violation
if (!mi_page_map_all_committed) {
_mi_os_commit(_mi_page_map, _mi_os_page_size(), NULL, NULL);
_mi_page_map[0] = -1; // so _mi_ptr_page(NULL) == NULL
}
return true;
}
@ -45,12 +45,12 @@ static bool mi_page_map_init(void) {
static void mi_page_map_ensure_committed(void* p, size_t idx, size_t block_count) {
// is the page map area that contains the page address committed?
if (!mi_page_map_all_committed) {
const size_t commit_bit_count = _mi_divide_up(block_count, mi_blocks_per_commit_bit);
const size_t commit_bit_idx = idx / mi_blocks_per_commit_bit;
const size_t commit_bit_count = _mi_divide_up(block_count, mi_size_per_commit_bit);
const size_t commit_bit_idx = idx / mi_size_per_commit_bit;
for (size_t i = 0; i < commit_bit_count; i++) { // per bit to avoid crossing over bitmap chunks
if (mi_bitmap_is_xsetN(MI_BIT_CLEAR, &mi_page_map_commit, commit_bit_idx + i, 1)) {
// this may race, in which case we do multiple commits (which is ok)
_mi_os_commit((uint8_t*)p + (i*mi_blocks_per_commit_bit*MI_ARENA_BLOCK_SIZE), mi_blocks_per_commit_bit* MI_ARENA_BLOCK_SIZE, NULL, NULL);
_mi_os_commit(_mi_page_map + ((commit_bit_idx + i)*mi_size_per_commit_bit), mi_size_per_commit_bit, NULL, NULL);
mi_bitmap_xsetN(MI_BIT_SET, &mi_page_map_commit, commit_bit_idx + i, 1, NULL);
}
}
@ -75,7 +75,7 @@ void _mi_page_map_register(mi_page_t* page) {
size_t block_count;
const size_t idx = mi_page_map_get_idx(page, &page_start, &block_count);
mi_page_map_ensure_committed(page, idx, block_count);
mi_page_map_ensure_committed(page_start, idx, block_count);
// set the offsets
for (int i = 0; i < (int)block_count; i++) {
@ -100,7 +100,7 @@ void _mi_page_map_unregister(mi_page_t* page) {
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
uintptr_t idx = ((uintptr_t)p >> MI_ARENA_BLOCK_SHIFT);
if (!mi_page_map_all_committed || mi_bitmap_is_xsetN(MI_BIT_SET, &mi_page_map_commit, idx/mi_blocks_per_commit_bit, 1)) {
if (!mi_page_map_all_committed || mi_bitmap_is_xsetN(MI_BIT_SET, &mi_page_map_commit, idx/mi_size_per_commit_bit, 1)) {
return (_mi_page_map[idx] != 0);
}
else {

View file

@ -40,6 +40,10 @@ static int ITER = 20;
static int THREADS = 8;
static int SCALE = 10;
static int ITER = 10;
#elif 1
static int THREADS = 1;
static int SCALE = 10;
static int ITER = 10;
#else
static int THREADS = 32; // more repeatable if THREADS <= #processors
static int SCALE = 25; // scaling factor