From db82baf1a8f2952d83c6df91bee9cca4a463e0eb Mon Sep 17 00:00:00 2001 From: daanx Date: Sun, 22 Dec 2024 18:09:16 -0800 Subject: [PATCH] cleanup, some renaming --- include/mimalloc/internal.h | 241 ++++++++++++++++++------------------ src/arena-meta.c | 8 +- src/arena.c | 58 ++++----- src/free.c | 8 +- src/heap.c | 4 +- src/init.c | 2 +- src/page.c | 6 +- 7 files changed, 159 insertions(+), 168 deletions(-) diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 9146896c..041e7653 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -57,171 +57,168 @@ terms of the MIT license. A copy of the license can be found in the file #endif // "libc.c" -#include -void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); -void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); -char _mi_toupper(char c); -int _mi_strnicmp(const char* s, const char* t, size_t n); -void _mi_strlcpy(char* dest, const char* src, size_t dest_size); -void _mi_strlcat(char* dest, const char* src, size_t dest_size); -size_t _mi_strlen(const char* s); -size_t _mi_strnlen(const char* s, size_t max_len); -bool _mi_getenv(const char* name, char* result, size_t result_size); +#include +void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); +void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); +char _mi_toupper(char c); +int _mi_strnicmp(const char* s, const char* t, size_t n); +void _mi_strlcpy(char* dest, const char* src, size_t dest_size); +void _mi_strlcat(char* dest, const char* src, size_t dest_size); +size_t _mi_strlen(const char* s); +size_t _mi_strnlen(const char* s, size_t max_len); +bool _mi_getenv(const char* name, char* result, size_t result_size); // "options.c" -void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); -void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); -void _mi_warning_message(const char* fmt, ...); -void _mi_verbose_message(const char* fmt, ...); -void _mi_trace_message(const char* fmt, ...); -void _mi_output_message(const char* fmt, ...); -void _mi_options_init(void); -long _mi_option_get_fast(mi_option_t option); -void _mi_error_message(int err, const char* fmt, ...); +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); +void _mi_warning_message(const char* fmt, ...); +void _mi_verbose_message(const char* fmt, ...); +void _mi_trace_message(const char* fmt, ...); +void _mi_output_message(const char* fmt, ...); +void _mi_options_init(void); +long _mi_option_get_fast(mi_option_t option); +void _mi_error_message(int err, const char* fmt, ...); // random.c -void _mi_random_init(mi_random_ctx_t* ctx); -void _mi_random_init_weak(mi_random_ctx_t* ctx); -void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); -void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); -uintptr_t _mi_random_next(mi_random_ctx_t* ctx); -uintptr_t _mi_heap_random_next(mi_heap_t* heap); -uintptr_t _mi_os_random_weak(uintptr_t extra_seed); +void _mi_random_init(mi_random_ctx_t* ctx); +void _mi_random_init_weak(mi_random_ctx_t* ctx); +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); +uintptr_t _mi_random_next(mi_random_ctx_t* ctx); +uintptr_t _mi_heap_random_next(mi_heap_t* heap); +uintptr_t _mi_os_random_weak(uintptr_t extra_seed); static inline uintptr_t _mi_random_shuffle(uintptr_t x); // init.c extern mi_decl_cache_align const mi_page_t _mi_page_empty; -void _mi_process_load(void); +void _mi_process_load(void); void mi_cdecl _mi_process_done(void); -bool _mi_is_redirected(void); -bool _mi_allocator_init(const char** message); -void _mi_allocator_done(void); -bool _mi_is_main_thread(void); -size_t _mi_current_thread_count(void); -bool _mi_preloading(void); // true while the C runtime is not initialized yet -void _mi_thread_done(mi_heap_t* heap); +bool _mi_is_redirected(void); +bool _mi_allocator_init(const char** message); +void _mi_allocator_done(void); +bool _mi_is_main_thread(void); +size_t _mi_current_thread_count(void); +bool _mi_preloading(void); // true while the C runtime is not initialized yet +void _mi_thread_done(mi_heap_t* heap); -mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld` +mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld` mi_subproc_t* _mi_subproc(void); mi_subproc_t* _mi_subproc_main(void); +mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id); mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; size_t _mi_thread_seq_id(void) mi_attr_noexcept; - -mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap -mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id); void _mi_heap_guarded_init(mi_heap_t* heap); // os.c -void _mi_os_init(void); // called from process init -void* _mi_os_alloc(size_t size, mi_memid_t* memid); -void* _mi_os_zalloc(size_t size, mi_memid_t* memid); -void _mi_os_free(void* p, size_t size, mi_memid_t memid); -void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid); +void _mi_os_init(void); // called from process init +void* _mi_os_alloc(size_t size, mi_memid_t* memid); +void* _mi_os_zalloc(size_t size, mi_memid_t* memid); +void _mi_os_free(void* p, size_t size, mi_memid_t memid); +void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid); -size_t _mi_os_page_size(void); -size_t _mi_os_good_alloc_size(size_t size); -bool _mi_os_has_overcommit(void); -bool _mi_os_has_virtual_reserve(void); -size_t _mi_os_virtual_address_bits(void); +size_t _mi_os_page_size(void); +size_t _mi_os_good_alloc_size(size_t size); +bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); +size_t _mi_os_virtual_address_bits(void); -bool _mi_os_reset(void* addr, size_t size); -bool _mi_os_commit(void* p, size_t size, bool* is_zero); -bool _mi_os_decommit(void* addr, size_t size); -bool _mi_os_protect(void* addr, size_t size); -bool _mi_os_unprotect(void* addr, size_t size); -bool _mi_os_purge(void* p, size_t size); -bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset); +bool _mi_os_reset(void* addr, size_t size); +bool _mi_os_commit(void* p, size_t size, bool* is_zero); +bool _mi_os_decommit(void* addr, size_t size); +bool _mi_os_protect(void* addr, size_t size); +bool _mi_os_unprotect(void* addr, size_t size); +bool _mi_os_purge(void* p, size_t size); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset); -void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid); -void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid); +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid); +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid); -void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); -bool _mi_os_use_large_page(size_t size, size_t alignment); -size_t _mi_os_large_page_size(void); +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); +bool _mi_os_use_large_page(size_t size, size_t alignment); +size_t _mi_os_large_page_size(void); -void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); // arena.c mi_arena_id_t _mi_arena_id_none(void); -mi_arena_t* _mi_arena_from_id(mi_arena_id_t id); +mi_arena_t* _mi_arena_from_id(mi_arena_id_t id); +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena); -void* _mi_arena_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid); -void* _mi_arena_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid); -void _mi_arena_free(void* p, size_t size, mi_memid_t memid); -bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena); -bool _mi_arena_contains(const void* p); -void _mi_arenas_collect(bool force_purge); -void _mi_arena_unsafe_destroy_all(void); +void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid); +void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid); +void _mi_arenas_free(void* p, size_t size, mi_memid_t memid); +bool _mi_arenas_contain(const void* p); +void _mi_arenas_collect(bool force_purge); +void _mi_arenas_unsafe_destroy_all(void); -mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment); -void _mi_arena_page_free(mi_page_t* page); -void _mi_arena_page_abandon(mi_page_t* page); -void _mi_arena_page_unabandon(mi_page_t* page); -bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page); +mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment); +void _mi_arenas_page_free(mi_page_t* page); +void _mi_arenas_page_abandon(mi_page_t* page); +void _mi_arenas_page_unabandon(mi_page_t* page); +bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page); // arena-meta.c -void* _mi_meta_zalloc( size_t size, mi_memid_t* memid ); -void _mi_meta_free(void* p, size_t size, mi_memid_t memid); -bool _mi_meta_is_meta_page(void* p); +void* _mi_meta_zalloc( size_t size, mi_memid_t* memid ); +void _mi_meta_free(void* p, size_t size, mi_memid_t memid); +bool _mi_meta_is_meta_page(void* p); // "page-map.c" -bool _mi_page_map_init(void); -void _mi_page_map_register(mi_page_t* page); -void _mi_page_map_unregister(mi_page_t* page); -void _mi_page_map_unregister_range(void* start, size_t size); -mi_page_t* _mi_safe_ptr_page(const void* p); +bool _mi_page_map_init(void); +void _mi_page_map_register(mi_page_t* page); +void _mi_page_map_unregister(mi_page_t* page); +void _mi_page_map_unregister_range(void* start, size_t size); +mi_page_t* _mi_safe_ptr_page(const void* p); // "page.c" -void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; -void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks -void _mi_page_unfull(mi_page_t* page); -void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq); // free the page -void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... -void _mi_page_force_abandon(mi_page_t* page); -void _mi_heap_collect_retired(mi_heap_t* heap, bool force); +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks +void _mi_page_unfull(mi_page_t* page); +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq); // free the page +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... +void _mi_heap_collect_retired(mi_heap_t* heap, bool force); -size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); -void _mi_deferred_free(mi_heap_t* heap, bool force); +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); +void _mi_deferred_free(mi_heap_t* heap, bool force); -void _mi_page_free_collect(mi_page_t* page,bool force); -void _mi_page_init(mi_heap_t* heap, mi_page_t* page); +void _mi_page_free_collect(mi_page_t* page,bool force); +void _mi_page_init(mi_heap_t* heap, mi_page_t* page); -size_t _mi_bin_size(uint8_t bin); // for stats -uint8_t _mi_bin(size_t size); // for stats +size_t _mi_bin_size(uint8_t bin); // for stats +uint8_t _mi_bin(size_t size); // for stats // "heap.c" -mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id, mi_tld_t* tld); -void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag, mi_tld_t* tld); -void _mi_heap_destroy_pages(mi_heap_t* heap); -void _mi_heap_collect_abandon(mi_heap_t* heap); -void _mi_heap_set_default_direct(mi_heap_t* heap); -bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); -void _mi_heap_unsafe_destroy_all(void); -mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); -void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page); -bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg); -void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page); +mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id, mi_tld_t* tld); +void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag, mi_tld_t* tld); +void _mi_heap_destroy_pages(mi_heap_t* heap); +void _mi_heap_collect_abandon(mi_heap_t* heap); +void _mi_heap_set_default_direct(mi_heap_t* heap); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); +void _mi_heap_unsafe_destroy_all(void); +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); +void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page); +bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg); +void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page); // "stats.c" -void _mi_stats_done(mi_stats_t* stats); -void _mi_stats_merge_from(mi_stats_t* to, mi_stats_t* from); -mi_msecs_t _mi_clock_now(void); -mi_msecs_t _mi_clock_end(mi_msecs_t start); -mi_msecs_t _mi_clock_start(void); +void _mi_stats_done(mi_stats_t* stats); +void _mi_stats_merge_from(mi_stats_t* to, mi_stats_t* from); +mi_msecs_t _mi_clock_now(void); +mi_msecs_t _mi_clock_end(mi_msecs_t start); +mi_msecs_t _mi_clock_start(void); // "alloc.c" -void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` -void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` -void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` -void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; -void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` -void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; -mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p); -void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); +void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` +void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p); +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); #if MI_DEBUG>1 -bool _mi_page_is_valid(mi_page_t* page); +bool _mi_page_is_valid(mi_page_t* page); #endif @@ -718,8 +715,8 @@ static inline bool _mi_page_unown(mi_page_t* page) { while mi_unlikely(mi_tf_block(tf_old) != NULL) { _mi_page_free_collect(page, false); // update used if (mi_page_all_free(page)) { // it may become free just before unowning it - _mi_arena_page_unabandon(page); - _mi_arena_page_free(page); + _mi_arenas_page_unabandon(page); + _mi_arenas_page_free(page); return true; } tf_old = mi_atomic_load_relaxed(&page->xthread_free); diff --git a/src/arena-meta.c b/src/arena-meta.c index a916706b..34be6e0e 100644 --- a/src/arena-meta.c +++ b/src/arena-meta.c @@ -72,9 +72,9 @@ static mi_meta_page_t* mi_meta_page_zalloc(void) { // allocate a fresh arena slice // note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again.. mi_memid_t memid; - uint8_t* base = (uint8_t*)_mi_arena_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0, - true /* commit*/, (MI_SECURE==0) /* allow large? */, - NULL /* req arena */, 0 /* thread_seq */, &memid); + uint8_t* base = (uint8_t*)_mi_arenas_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0, + true /* commit*/, (MI_SECURE==0) /* allow large? */, + NULL /* req arena */, 0 /* thread_seq */, &memid); if (base == NULL) return NULL; mi_assert_internal(_mi_is_aligned(base,MI_META_PAGE_ALIGN)); if (!memid.initially_zero) { @@ -165,7 +165,7 @@ mi_decl_noinline void _mi_meta_free(void* p, size_t size, mi_memid_t memid) { mi_bitmap_setN(&mpage->blocks_free, block_idx, block_count,NULL); } else { - _mi_arena_free(p,size,memid); + _mi_arenas_free(p,size,memid); } } diff --git a/src/arena.c b/src/arena.c index b9fbef05..7a016165 100644 --- a/src/arena.c +++ b/src/arena.c @@ -467,7 +467,7 @@ static void* mi_arena_os_alloc_aligned( // Allocate large sized memory -void* _mi_arena_alloc_aligned( mi_subproc_t* subproc, +void* _mi_arenas_alloc_aligned( mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid) @@ -493,9 +493,9 @@ void* _mi_arena_alloc_aligned( mi_subproc_t* subproc, return p; } -void* _mi_arena_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid) +void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid) { - return _mi_arena_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, memid); + return _mi_arenas_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, memid); } @@ -521,7 +521,7 @@ static bool mi_arena_try_claim_abandoned(size_t slice_index, mi_arena_t* arena, // note: this normally never happens unless heaptags are actually used. // (an unown might free the page, and depending on that we can keep it in the abandoned map or not) // note: a minor wrinkle: the page will still be mapped but the abandoned map entry is (temporarily) clear at this point. - // so we cannot check in `mi_arena_free` for this invariant to hold. + // so we cannot check in `mi_arenas_free` for this invariant to hold. const bool freed = _mi_page_unown(page); *keep_abandoned = !freed; return false; @@ -531,7 +531,7 @@ static bool mi_arena_try_claim_abandoned(size_t slice_index, mi_arena_t* arena, return true; } -static mi_page_t* mi_arena_page_try_find_abandoned(mi_subproc_t* subproc, size_t slice_count, size_t block_size, mi_arena_t* req_arena, mi_heaptag_t heaptag, size_t tseq) +static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_t slice_count, size_t block_size, mi_arena_t* req_arena, mi_heaptag_t heaptag, size_t tseq) { MI_UNUSED(slice_count); const size_t bin = _mi_bin(block_size); @@ -584,7 +584,7 @@ static mi_page_t* mi_arena_page_try_find_abandoned(mi_subproc_t* subproc, size_t #endif // Allocate a fresh page -static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment, +static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment, mi_arena_t* req_arena, size_t tseq) { const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page @@ -697,18 +697,18 @@ static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_ } // Allocate a regular small/medium/large page. -static mi_page_t* mi_arena_page_regular_alloc(mi_heap_t* heap, size_t slice_count, size_t block_size) { +static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_count, size_t block_size) { mi_arena_t* req_arena = heap->exclusive_arena; mi_tld_t* const tld = heap->tld; // 1. look for an abandoned page - mi_page_t* page = mi_arena_page_try_find_abandoned(tld->subproc, slice_count, block_size, req_arena, heap->tag, tld->thread_seq); + mi_page_t* page = mi_arenas_page_try_find_abandoned(tld->subproc, slice_count, block_size, req_arena, heap->tag, tld->thread_seq); if (page != NULL) { return page; // return as abandoned } // 2. find a free block, potentially allocating a new arena - page = mi_arena_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq); + page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq); if (page != NULL) { mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count); _mi_page_init(heap, page); @@ -719,7 +719,7 @@ static mi_page_t* mi_arena_page_regular_alloc(mi_heap_t* heap, size_t slice_coun } // Allocate a page containing one block (very large, or with large alignment) -static mi_page_t* mi_arena_page_singleton_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { +static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { mi_arena_t* req_arena = heap->exclusive_arena; mi_tld_t* const tld = heap->tld; const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN); @@ -730,7 +730,7 @@ static mi_page_t* mi_arena_page_singleton_alloc(mi_heap_t* heap, size_t block_si const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, MI_ARENA_GUARD_PAGE_SIZE) + MI_ARENA_GUARD_PAGE_SIZE); #endif - mi_page_t* page = mi_arena_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq); + mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq); if (page == NULL) return NULL; mi_assert(page->reserved == 1); @@ -740,23 +740,23 @@ static mi_page_t* mi_arena_page_singleton_alloc(mi_heap_t* heap, size_t block_si } -mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { +mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { mi_page_t* page; if mi_unlikely(block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) { mi_assert_internal(_mi_is_power_of_two(block_alignment)); - page = mi_arena_page_singleton_alloc(heap, block_size, block_alignment); + page = mi_arenas_page_singleton_alloc(heap, block_size, block_alignment); } else if (block_size <= MI_SMALL_MAX_OBJ_SIZE) { - page = mi_arena_page_regular_alloc(heap, mi_slice_count_of_size(MI_SMALL_PAGE_SIZE), block_size); + page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_SMALL_PAGE_SIZE), block_size); } else if (block_size <= MI_MEDIUM_MAX_OBJ_SIZE) { - page = mi_arena_page_regular_alloc(heap, mi_slice_count_of_size(MI_MEDIUM_PAGE_SIZE), block_size); + page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_MEDIUM_PAGE_SIZE), block_size); } else if (block_size <= MI_LARGE_MAX_OBJ_SIZE) { - page = mi_arena_page_regular_alloc(heap, mi_slice_count_of_size(MI_LARGE_PAGE_SIZE), block_size); + page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_LARGE_PAGE_SIZE), block_size); } else { - page = mi_arena_page_singleton_alloc(heap, block_size, block_alignment); + page = mi_arenas_page_singleton_alloc(heap, block_size, block_alignment); } // mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); @@ -767,7 +767,7 @@ mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t block return page; } -void _mi_arena_page_free(mi_page_t* page) { +void _mi_arenas_page_free(mi_page_t* page) { mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(mi_page_is_owned(page)); @@ -804,14 +804,14 @@ void _mi_arena_page_free(mi_page_t* page) { if (page->memid.memkind == MI_MEM_ARENA) { mi_bitmap_clear(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index); } - _mi_arena_free(page, mi_memid_size(page->memid), page->memid); + _mi_arenas_free(page, mi_memid_size(page->memid), page->memid); } /* ----------------------------------------------------------- Arena abandon ----------------------------------------------------------- */ -void _mi_arena_page_abandon(mi_page_t* page) { +void _mi_arenas_page_abandon(mi_page_t* page) { mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(mi_page_is_owned(page)); @@ -855,7 +855,7 @@ void _mi_arena_page_abandon(mi_page_t* page) { _mi_page_unown(page); } -bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page) { +bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page) { mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(mi_page_is_owned(page)); @@ -871,13 +871,13 @@ bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page) { mi_subproc_t* subproc = _mi_subproc(); mi_subproc_stat_counter_increase( subproc, pages_reabandon_full, 1); mi_subproc_stat_adjust_decrease( subproc, pages_abandoned, 1, true /* on alloc */); // adjust as we are not abandoning fresh - _mi_arena_page_abandon(page); + _mi_arenas_page_abandon(page); return true; } } // called from `mi_free` if trying to unabandon an abandoned page -void _mi_arena_page_unabandon(mi_page_t* page) { +void _mi_arenas_page_unabandon(mi_page_t* page) { mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(mi_page_is_owned(page)); @@ -917,12 +917,6 @@ void _mi_arena_page_unabandon(mi_page_t* page) { } } -void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap) { - MI_UNUSED(heap); - // TODO: implement this - return; -} - /* ----------------------------------------------------------- Arena free @@ -930,7 +924,7 @@ void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap) { static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices); static void mi_arenas_try_purge(bool force, bool visit_all); -void _mi_arena_free(void* p, size_t size, mi_memid_t memid) { +void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) { if (p==NULL) return; if (size==0) return; @@ -1001,7 +995,7 @@ bool mi_arena_contains(mi_arena_id_t arena_id, const void* p) { } // Is a pointer inside any of our arenas? -bool _mi_arena_contains(const void* p) { +bool _mi_arenas_contain(const void* p) { mi_subproc_t* subproc = _mi_subproc(); const size_t max_arena = mi_arenas_get_count(subproc); for (size_t i = 0; i < max_arena; i++) { @@ -1043,7 +1037,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) { // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` // for dynamic libraries that are unloaded and need to release all their allocated memory. -void _mi_arena_unsafe_destroy_all(void) { +void _mi_arenas_unsafe_destroy_all(void) { mi_arenas_unsafe_destroy(_mi_subproc()); _mi_arenas_collect(true /* force purge */); // purge non-owned arenas } diff --git a/src/free.c b/src/free.c index d08123a2..4d72cc7a 100644 --- a/src/free.c +++ b/src/free.c @@ -210,9 +210,9 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) { if (mi_page_all_free(page)) { // first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish) - _mi_arena_page_unabandon(page); + _mi_arenas_page_unabandon(page); // we can free the page directly - _mi_arena_page_free(page); + _mi_arenas_page_free(page); return; } @@ -240,7 +240,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) { { if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for an block_size we don't use // first remove it from the abandoned pages in the arena -- this waits for any readers to finish - _mi_arena_page_unabandon(page); + _mi_arenas_page_unabandon(page); _mi_heap_page_reclaim(tagheap, page); mi_heap_stat_counter_increase(tagheap, pages_reclaim_on_free, 1); return; @@ -252,7 +252,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) { // 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations if (!mi_page_is_used_at_frac(page,8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page !mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA && - _mi_arena_page_try_reabandon_to_mapped(page)) + _mi_arenas_page_try_reabandon_to_mapped(page)) { return; } diff --git a/src/heap.c b/src/heap.c index a1b06c6b..25ddf9b7 100644 --- a/src/heap.c +++ b/src/heap.c @@ -211,7 +211,7 @@ mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena else { // heaps associated wita a specific arena are allocated in that arena // note: takes up at least one slice which is quite wasteful... - heap = (mi_heap_t*)_mi_arena_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, &memid); + heap = (mi_heap_t*)_mi_arenas_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, &memid); } if (heap==NULL) { _mi_error_message(ENOMEM, "unable to allocate heap meta-data\n"); @@ -341,7 +341,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ page->next = NULL; page->prev = NULL; mi_page_set_heap(page, NULL); - _mi_arena_page_free(page); + _mi_arenas_page_free(page); return true; // keep going } diff --git a/src/init.c b/src/init.c index 5f3fb797..8233f8a3 100644 --- a/src/init.c +++ b/src/init.c @@ -713,7 +713,7 @@ void mi_cdecl _mi_process_done(void) { if (mi_option_is_enabled(mi_option_destroy_on_exit)) { mi_collect(true /* force */); _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) - _mi_arena_unsafe_destroy_all(); + _mi_arenas_unsafe_destroy_all(); } if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { diff --git a/src/page.c b/src/page.c index 6030161a..7c8429a9 100644 --- a/src/page.c +++ b/src/page.c @@ -252,7 +252,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { else { mi_page_queue_remove(pq, page); mi_page_set_heap(page, NULL); - _mi_arena_page_abandon(page); + _mi_arenas_page_abandon(page); } } @@ -264,7 +264,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size mi_assert_internal(mi_heap_contains_queue(heap, pq)); mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_MAX_OBJ_SIZE || block_size == pq->block_size); #endif - mi_page_t* page = _mi_arena_page_alloc(heap, block_size, page_alignment); + mi_page_t* page = _mi_arenas_page_alloc(heap, block_size, page_alignment); if (page == NULL) { // out-of-memory return NULL; @@ -357,7 +357,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) { // and free it mi_page_set_heap(page,NULL); - _mi_arena_page_free(page); + _mi_arenas_page_free(page); } #define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE