diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index e316de94..208989e3 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -147,6 +147,7 @@ mi_arena_t* _mi_arena_from_id(mi_arena_id_t id); void* _mi_arena_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid); void* _mi_arena_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid); +void _mi_arena_free(void* p, size_t size, mi_memid_t memid); bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena); bool _mi_arena_contains(const void* p); void _mi_arenas_collect(bool force_purge); @@ -421,11 +422,11 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) { return (heap != &_mi_heap_empty); } -static inline uintptr_t _mi_ptr_cookie(const void* p) { - extern mi_heap_t _mi_heap_main; - mi_assert_internal(_mi_heap_main.cookie != 0); - return ((uintptr_t)p ^ _mi_heap_main.cookie); -} +//static inline uintptr_t _mi_ptr_cookie(const void* p) { +// extern mi_heap_t _mi_heap_main; +// mi_assert_internal(_mi_heap_main.cookie != 0); +// return ((uintptr_t)p ^ _mi_heap_main.cookie); +//} /* ----------------------------------------------------------- diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 59393848..461b5393 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -396,7 +396,6 @@ struct mi_heap_s { mi_tld_t* tld; // thread-local data mi_arena_t* exclusive_arena; // if the heap should only allocate from a specific arena (or NULL) uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) - uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list mi_random_ctx_t random; // random number context used for secure allocation size_t page_count; // total number of pages in the `pages` queues. size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) @@ -522,21 +521,13 @@ void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount); #define mi_os_stat_increase(stat,amount) mi_subproc_stat_increase(_mi_subproc(),stat,amount) #define mi_os_stat_decrease(stat,amount) mi_subproc_stat_decrease(_mi_subproc(),stat,amount) -#define mi_tld_stat_counter_increase(tld,stat,amount) __mi_stat_counter_increase( &(tld)->stats.stat, amount) -#define mi_tld_stat_increase(tld,stat,amount) __mi_stat_increase( &(tld)->stats.stat, amount) -#define mi_tld_stat_decrease(tld,stat,amount) __mi_stat_decrease( &(tld)->stats.stat, amount) +#define mi_heap_stat_counter_increase(heap,stat,amount) __mi_stat_counter_increase( &(heap)->tld->stats.stat, amount) +#define mi_heap_stat_increase(heap,stat,amount) __mi_stat_increase( &(heap)->tld->stats.stat, amount) +#define mi_heap_stat_decrease(heap,stat,amount) __mi_stat_decrease( &(heap)->tld->stats.stat, amount) -#define mi_debug_tld_stat_counter_increase(tld,stat,amount) mi_debug_stat_counter_increase( (tld)->stats.stat, amount) -#define mi_debug_tld_stat_increase(tld,stat,amount) mi_debug_stat_increase( (tld)->stats.stat, amount) -#define mi_debug_tld_stat_decrease(tld,stat,amount) mi_debug_stat_decrease( (tld)->stats.stat, amount) - -#define mi_heap_stat_counter_increase(heap,stat,amount) mi_tld_stat_counter_increase((heap)->tld, stat, amount) -#define mi_heap_stat_increase(heap,stat,amount) mi_tld_stat_increase( (heap)->tld, stat, amount) -#define mi_heap_stat_decrease(heap,stat,amount) mi_tld_stat_decrease( (heap)->tld, stat, amount) - -#define mi_debug_heap_stat_counter_increase(heap,stat,amount) mi_debug_tld_stat_counter_increase((heap)->tld, stat, amount) -#define mi_debug_heap_stat_increase(heap,stat,amount) mi_debug_tld_stat_increase( (heap)->tld, stat, amount) -#define mi_debug_heap_stat_decrease(heap,stat,amount) mi_debug_tld_stat_decrease( (heap)->tld, stat, amount) +#define mi_debug_heap_stat_counter_increase(heap,stat,amount) mi_debug_stat_counter_increase( (heap)->tld->stats.stat, amount) +#define mi_debug_heap_stat_increase(heap,stat,amount) mi_debug_stat_increase( (heap)->tld->stats.stat, amount) +#define mi_debug_heap_stat_decrease(heap,stat,amount) mi_debug_stat_decrease( (heap)->tld->stats.stat, amount) // ------------------------------------------------------ diff --git a/src/arena-meta.c b/src/arena-meta.c index a5dc8e75..065a1331 100644 --- a/src/arena-meta.c +++ b/src/arena-meta.c @@ -148,11 +148,8 @@ mi_decl_noinline void _mi_meta_free(void* p, size_t size, mi_memid_t memid) { _mi_memzero_aligned(mi_meta_block_start(mpage, block_idx), block_count*MI_META_BLOCK_SIZE); mi_bitmap_setN(&mpage->blocks_free, block_idx, block_count,NULL); } - else if (mi_memid_is_os(memid)) { - _mi_os_free(p, size, memid); - } else { - mi_assert_internal(mi_memid_needs_no_free(memid)); + _mi_arena_free(p,size,memid); } } diff --git a/src/arena.c b/src/arena.c index c4b02cf6..869cba49 100644 --- a/src/arena.c +++ b/src/arena.c @@ -762,8 +762,6 @@ mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t block return page; } -static void mi_arena_free(void* p, size_t size, mi_memid_t memid); - void _mi_arena_page_free(mi_page_t* page) { mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_ptr_page(page)==page); @@ -794,7 +792,7 @@ void _mi_arena_page_free(mi_page_t* page) { if (page->memid.memkind == MI_MEM_ARENA) { mi_bitmap_clear(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index); } - mi_arena_free(page, mi_memid_size(page->memid), page->memid); + _mi_arena_free(page, mi_memid_size(page->memid), page->memid); } /* ----------------------------------------------------------- @@ -920,7 +918,7 @@ void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap) { static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices); static void mi_arenas_try_purge(bool force, bool visit_all); -static void mi_arena_free(void* p, size_t size, mi_memid_t memid) { +void _mi_arena_free(void* p, size_t size, mi_memid_t memid) { if (p==NULL) return; if (size==0) return; diff --git a/src/heap.c b/src/heap.c index d82b383f..f47aaad9 100644 --- a/src/heap.c +++ b/src/heap.c @@ -213,8 +213,8 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint _mi_random_split(&heap->tld->heap_backing->random, &heap->random); } heap->cookie = _mi_heap_random_next(heap) | 1; - heap->keys[0] = _mi_heap_random_next(heap); - heap->keys[1] = _mi_heap_random_next(heap); + //heap->keys[0] = _mi_heap_random_next(heap); + //heap->keys[1] = _mi_heap_random_next(heap);*/ _mi_heap_guarded_init(heap); // push on the thread local heaps list @@ -227,7 +227,15 @@ mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena mi_assert(heap_tag >= 0 && heap_tag < 256); // allocate and initialize a heap mi_memid_t memid; - mi_heap_t* heap = (mi_heap_t*)_mi_meta_zalloc(sizeof(mi_heap_t), &memid); + mi_heap_t* heap; + if (arena_id == _mi_arena_id_none()) { + heap = (mi_heap_t*)_mi_meta_zalloc(sizeof(mi_heap_t), &memid); + } + else { + // heaps associated wita a specific arena are allocated in that arena + // note: takes up at least one slice which is quite wasteful... + heap = (mi_heap_t*)_mi_arena_alloc(_mi_subproc(), sizeof(mi_heap_t), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, &memid); + } if (heap==NULL) { _mi_error_message(ENOMEM, "unable to allocate heap meta-data\n"); return NULL; diff --git a/src/init.c b/src/init.c index 1968ef68..2f147e55 100644 --- a/src/init.c +++ b/src/init.c @@ -115,7 +115,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = { &tld_empty, // tld NULL, // exclusive_arena 0, // cookie - { 0, 0 }, // keys + //{ 0, 0 }, // keys { {0}, {0}, 0, true }, // random 0, // page count MI_BIN_FULL, 0, // page retired min/max @@ -149,9 +149,9 @@ static mi_decl_cache_align mi_tld_t tld_main = { mi_decl_cache_align mi_heap_t heap_main = { &tld_main, // thread local data + NULL, // exclusive arena 0, // initial cookie - 0, // arena id - { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) + //{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) { {0x846ca68b}, {0}, 0, true }, // random 0, // page count MI_BIN_FULL, 0, // page retired min/max @@ -248,8 +248,8 @@ static void mi_heap_main_init(void) { _mi_random_init(&heap_main.random); #endif heap_main.cookie = _mi_heap_random_next(&heap_main); - heap_main.keys[0] = _mi_heap_random_next(&heap_main); - heap_main.keys[1] = _mi_heap_random_next(&heap_main); + //heap_main.keys[0] = _mi_heap_random_next(&heap_main); + //heap_main.keys[1] = _mi_heap_random_next(&heap_main); _mi_heap_guarded_init(&heap_main); heap_main.allow_page_abandon = (mi_option_get(mi_option_full_page_retain) >= 0); heap_main.full_page_retain = mi_option_get_clamp(mi_option_full_page_retain, -1, 32);