cleanup, some renaming

This commit is contained in:
daanx 2024-12-22 18:09:16 -08:00
parent 9ecadaecd5
commit db82baf1a8
7 changed files with 159 additions and 168 deletions

View file

@ -104,11 +104,9 @@ void _mi_thread_done(mi_heap_t* heap);
mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld` mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld`
mi_subproc_t* _mi_subproc(void); mi_subproc_t* _mi_subproc(void);
mi_subproc_t* _mi_subproc_main(void); mi_subproc_t* _mi_subproc_main(void);
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
size_t _mi_thread_seq_id(void) mi_attr_noexcept; size_t _mi_thread_seq_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
void _mi_heap_guarded_init(mi_heap_t* heap); void _mi_heap_guarded_init(mi_heap_t* heap);
// os.c // os.c
@ -144,20 +142,20 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t m
// arena.c // arena.c
mi_arena_id_t _mi_arena_id_none(void); mi_arena_id_t _mi_arena_id_none(void);
mi_arena_t* _mi_arena_from_id(mi_arena_id_t id); mi_arena_t* _mi_arena_from_id(mi_arena_id_t id);
void* _mi_arena_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void* _mi_arena_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void _mi_arena_free(void* p, size_t size, mi_memid_t memid);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena); bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena);
bool _mi_arena_contains(const void* p);
void _mi_arenas_collect(bool force_purge);
void _mi_arena_unsafe_destroy_all(void);
mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment); void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void _mi_arena_page_free(mi_page_t* page); void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void _mi_arena_page_abandon(mi_page_t* page); void _mi_arenas_free(void* p, size_t size, mi_memid_t memid);
void _mi_arena_page_unabandon(mi_page_t* page); bool _mi_arenas_contain(const void* p);
bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page); void _mi_arenas_collect(bool force_purge);
void _mi_arenas_unsafe_destroy_all(void);
mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment);
void _mi_arenas_page_free(mi_page_t* page);
void _mi_arenas_page_abandon(mi_page_t* page);
void _mi_arenas_page_unabandon(mi_page_t* page);
bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page);
// arena-meta.c // arena-meta.c
void* _mi_meta_zalloc( size_t size, mi_memid_t* memid ); void* _mi_meta_zalloc( size_t size, mi_memid_t* memid );
@ -178,7 +176,6 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept;
void _mi_page_unfull(mi_page_t* page); void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq); // free the page void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
void _mi_page_force_abandon(mi_page_t* page);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force); void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
@ -718,8 +715,8 @@ static inline bool _mi_page_unown(mi_page_t* page) {
while mi_unlikely(mi_tf_block(tf_old) != NULL) { while mi_unlikely(mi_tf_block(tf_old) != NULL) {
_mi_page_free_collect(page, false); // update used _mi_page_free_collect(page, false); // update used
if (mi_page_all_free(page)) { // it may become free just before unowning it if (mi_page_all_free(page)) { // it may become free just before unowning it
_mi_arena_page_unabandon(page); _mi_arenas_page_unabandon(page);
_mi_arena_page_free(page); _mi_arenas_page_free(page);
return true; return true;
} }
tf_old = mi_atomic_load_relaxed(&page->xthread_free); tf_old = mi_atomic_load_relaxed(&page->xthread_free);

View file

@ -72,7 +72,7 @@ static mi_meta_page_t* mi_meta_page_zalloc(void) {
// allocate a fresh arena slice // allocate a fresh arena slice
// note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again.. // note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again..
mi_memid_t memid; mi_memid_t memid;
uint8_t* base = (uint8_t*)_mi_arena_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0, uint8_t* base = (uint8_t*)_mi_arenas_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0,
true /* commit*/, (MI_SECURE==0) /* allow large? */, true /* commit*/, (MI_SECURE==0) /* allow large? */,
NULL /* req arena */, 0 /* thread_seq */, &memid); NULL /* req arena */, 0 /* thread_seq */, &memid);
if (base == NULL) return NULL; if (base == NULL) return NULL;
@ -165,7 +165,7 @@ mi_decl_noinline void _mi_meta_free(void* p, size_t size, mi_memid_t memid) {
mi_bitmap_setN(&mpage->blocks_free, block_idx, block_count,NULL); mi_bitmap_setN(&mpage->blocks_free, block_idx, block_count,NULL);
} }
else { else {
_mi_arena_free(p,size,memid); _mi_arenas_free(p,size,memid);
} }
} }

View file

@ -467,7 +467,7 @@ static void* mi_arena_os_alloc_aligned(
// Allocate large sized memory // Allocate large sized memory
void* _mi_arena_alloc_aligned( mi_subproc_t* subproc, void* _mi_arenas_alloc_aligned( mi_subproc_t* subproc,
size_t size, size_t alignment, size_t align_offset, size_t size, size_t alignment, size_t align_offset,
bool commit, bool allow_large, bool commit, bool allow_large,
mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid) mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid)
@ -493,9 +493,9 @@ void* _mi_arena_alloc_aligned( mi_subproc_t* subproc,
return p; return p;
} }
void* _mi_arena_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid) void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid)
{ {
return _mi_arena_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, memid); return _mi_arenas_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, memid);
} }
@ -521,7 +521,7 @@ static bool mi_arena_try_claim_abandoned(size_t slice_index, mi_arena_t* arena,
// note: this normally never happens unless heaptags are actually used. // note: this normally never happens unless heaptags are actually used.
// (an unown might free the page, and depending on that we can keep it in the abandoned map or not) // (an unown might free the page, and depending on that we can keep it in the abandoned map or not)
// note: a minor wrinkle: the page will still be mapped but the abandoned map entry is (temporarily) clear at this point. // note: a minor wrinkle: the page will still be mapped but the abandoned map entry is (temporarily) clear at this point.
// so we cannot check in `mi_arena_free` for this invariant to hold. // so we cannot check in `mi_arenas_free` for this invariant to hold.
const bool freed = _mi_page_unown(page); const bool freed = _mi_page_unown(page);
*keep_abandoned = !freed; *keep_abandoned = !freed;
return false; return false;
@ -531,7 +531,7 @@ static bool mi_arena_try_claim_abandoned(size_t slice_index, mi_arena_t* arena,
return true; return true;
} }
static mi_page_t* mi_arena_page_try_find_abandoned(mi_subproc_t* subproc, size_t slice_count, size_t block_size, mi_arena_t* req_arena, mi_heaptag_t heaptag, size_t tseq) static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_t slice_count, size_t block_size, mi_arena_t* req_arena, mi_heaptag_t heaptag, size_t tseq)
{ {
MI_UNUSED(slice_count); MI_UNUSED(slice_count);
const size_t bin = _mi_bin(block_size); const size_t bin = _mi_bin(block_size);
@ -584,7 +584,7 @@ static mi_page_t* mi_arena_page_try_find_abandoned(mi_subproc_t* subproc, size_t
#endif #endif
// Allocate a fresh page // Allocate a fresh page
static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment, static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment,
mi_arena_t* req_arena, size_t tseq) mi_arena_t* req_arena, size_t tseq)
{ {
const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page
@ -697,18 +697,18 @@ static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_
} }
// Allocate a regular small/medium/large page. // Allocate a regular small/medium/large page.
static mi_page_t* mi_arena_page_regular_alloc(mi_heap_t* heap, size_t slice_count, size_t block_size) { static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_count, size_t block_size) {
mi_arena_t* req_arena = heap->exclusive_arena; mi_arena_t* req_arena = heap->exclusive_arena;
mi_tld_t* const tld = heap->tld; mi_tld_t* const tld = heap->tld;
// 1. look for an abandoned page // 1. look for an abandoned page
mi_page_t* page = mi_arena_page_try_find_abandoned(tld->subproc, slice_count, block_size, req_arena, heap->tag, tld->thread_seq); mi_page_t* page = mi_arenas_page_try_find_abandoned(tld->subproc, slice_count, block_size, req_arena, heap->tag, tld->thread_seq);
if (page != NULL) { if (page != NULL) {
return page; // return as abandoned return page; // return as abandoned
} }
// 2. find a free block, potentially allocating a new arena // 2. find a free block, potentially allocating a new arena
page = mi_arena_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq); page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq);
if (page != NULL) { if (page != NULL) {
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count); mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
_mi_page_init(heap, page); _mi_page_init(heap, page);
@ -719,7 +719,7 @@ static mi_page_t* mi_arena_page_regular_alloc(mi_heap_t* heap, size_t slice_coun
} }
// Allocate a page containing one block (very large, or with large alignment) // Allocate a page containing one block (very large, or with large alignment)
static mi_page_t* mi_arena_page_singleton_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) {
mi_arena_t* req_arena = heap->exclusive_arena; mi_arena_t* req_arena = heap->exclusive_arena;
mi_tld_t* const tld = heap->tld; mi_tld_t* const tld = heap->tld;
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN); const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
@ -730,7 +730,7 @@ static mi_page_t* mi_arena_page_singleton_alloc(mi_heap_t* heap, size_t block_si
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, MI_ARENA_GUARD_PAGE_SIZE) + MI_ARENA_GUARD_PAGE_SIZE); const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, MI_ARENA_GUARD_PAGE_SIZE) + MI_ARENA_GUARD_PAGE_SIZE);
#endif #endif
mi_page_t* page = mi_arena_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq); mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq);
if (page == NULL) return NULL; if (page == NULL) return NULL;
mi_assert(page->reserved == 1); mi_assert(page->reserved == 1);
@ -740,23 +740,23 @@ static mi_page_t* mi_arena_page_singleton_alloc(mi_heap_t* heap, size_t block_si
} }
mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) {
mi_page_t* page; mi_page_t* page;
if mi_unlikely(block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) { if mi_unlikely(block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) {
mi_assert_internal(_mi_is_power_of_two(block_alignment)); mi_assert_internal(_mi_is_power_of_two(block_alignment));
page = mi_arena_page_singleton_alloc(heap, block_size, block_alignment); page = mi_arenas_page_singleton_alloc(heap, block_size, block_alignment);
} }
else if (block_size <= MI_SMALL_MAX_OBJ_SIZE) { else if (block_size <= MI_SMALL_MAX_OBJ_SIZE) {
page = mi_arena_page_regular_alloc(heap, mi_slice_count_of_size(MI_SMALL_PAGE_SIZE), block_size); page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_SMALL_PAGE_SIZE), block_size);
} }
else if (block_size <= MI_MEDIUM_MAX_OBJ_SIZE) { else if (block_size <= MI_MEDIUM_MAX_OBJ_SIZE) {
page = mi_arena_page_regular_alloc(heap, mi_slice_count_of_size(MI_MEDIUM_PAGE_SIZE), block_size); page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_MEDIUM_PAGE_SIZE), block_size);
} }
else if (block_size <= MI_LARGE_MAX_OBJ_SIZE) { else if (block_size <= MI_LARGE_MAX_OBJ_SIZE) {
page = mi_arena_page_regular_alloc(heap, mi_slice_count_of_size(MI_LARGE_PAGE_SIZE), block_size); page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_LARGE_PAGE_SIZE), block_size);
} }
else { else {
page = mi_arena_page_singleton_alloc(heap, block_size, block_alignment); page = mi_arenas_page_singleton_alloc(heap, block_size, block_alignment);
} }
// mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc); // mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc);
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
@ -767,7 +767,7 @@ mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t block
return page; return page;
} }
void _mi_arena_page_free(mi_page_t* page) { void _mi_arenas_page_free(mi_page_t* page) {
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(mi_page_is_owned(page)); mi_assert_internal(mi_page_is_owned(page));
@ -804,14 +804,14 @@ void _mi_arena_page_free(mi_page_t* page) {
if (page->memid.memkind == MI_MEM_ARENA) { if (page->memid.memkind == MI_MEM_ARENA) {
mi_bitmap_clear(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index); mi_bitmap_clear(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index);
} }
_mi_arena_free(page, mi_memid_size(page->memid), page->memid); _mi_arenas_free(page, mi_memid_size(page->memid), page->memid);
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena abandon Arena abandon
----------------------------------------------------------- */ ----------------------------------------------------------- */
void _mi_arena_page_abandon(mi_page_t* page) { void _mi_arenas_page_abandon(mi_page_t* page) {
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(mi_page_is_owned(page)); mi_assert_internal(mi_page_is_owned(page));
@ -855,7 +855,7 @@ void _mi_arena_page_abandon(mi_page_t* page) {
_mi_page_unown(page); _mi_page_unown(page);
} }
bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page) { bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page) {
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(mi_page_is_owned(page)); mi_assert_internal(mi_page_is_owned(page));
@ -871,13 +871,13 @@ bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page) {
mi_subproc_t* subproc = _mi_subproc(); mi_subproc_t* subproc = _mi_subproc();
mi_subproc_stat_counter_increase( subproc, pages_reabandon_full, 1); mi_subproc_stat_counter_increase( subproc, pages_reabandon_full, 1);
mi_subproc_stat_adjust_decrease( subproc, pages_abandoned, 1, true /* on alloc */); // adjust as we are not abandoning fresh mi_subproc_stat_adjust_decrease( subproc, pages_abandoned, 1, true /* on alloc */); // adjust as we are not abandoning fresh
_mi_arena_page_abandon(page); _mi_arenas_page_abandon(page);
return true; return true;
} }
} }
// called from `mi_free` if trying to unabandon an abandoned page // called from `mi_free` if trying to unabandon an abandoned page
void _mi_arena_page_unabandon(mi_page_t* page) { void _mi_arenas_page_unabandon(mi_page_t* page) {
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(mi_page_is_owned(page)); mi_assert_internal(mi_page_is_owned(page));
@ -917,12 +917,6 @@ void _mi_arena_page_unabandon(mi_page_t* page) {
} }
} }
void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap) {
MI_UNUSED(heap);
// TODO: implement this
return;
}
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena free Arena free
@ -930,7 +924,7 @@ void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap) {
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices); static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices);
static void mi_arenas_try_purge(bool force, bool visit_all); static void mi_arenas_try_purge(bool force, bool visit_all);
void _mi_arena_free(void* p, size_t size, mi_memid_t memid) { void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
if (p==NULL) return; if (p==NULL) return;
if (size==0) return; if (size==0) return;
@ -1001,7 +995,7 @@ bool mi_arena_contains(mi_arena_id_t arena_id, const void* p) {
} }
// Is a pointer inside any of our arenas? // Is a pointer inside any of our arenas?
bool _mi_arena_contains(const void* p) { bool _mi_arenas_contain(const void* p) {
mi_subproc_t* subproc = _mi_subproc(); mi_subproc_t* subproc = _mi_subproc();
const size_t max_arena = mi_arenas_get_count(subproc); const size_t max_arena = mi_arenas_get_count(subproc);
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
@ -1043,7 +1037,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory. // for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(void) { void _mi_arenas_unsafe_destroy_all(void) {
mi_arenas_unsafe_destroy(_mi_subproc()); mi_arenas_unsafe_destroy(_mi_subproc());
_mi_arenas_collect(true /* force purge */); // purge non-owned arenas _mi_arenas_collect(true /* force purge */); // purge non-owned arenas
} }

View file

@ -210,9 +210,9 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) {
if (mi_page_all_free(page)) if (mi_page_all_free(page))
{ {
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish) // first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
_mi_arena_page_unabandon(page); _mi_arenas_page_unabandon(page);
// we can free the page directly // we can free the page directly
_mi_arena_page_free(page); _mi_arenas_page_free(page);
return; return;
} }
@ -240,7 +240,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) {
{ {
if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for an block_size we don't use if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for an block_size we don't use
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish // first remove it from the abandoned pages in the arena -- this waits for any readers to finish
_mi_arena_page_unabandon(page); _mi_arenas_page_unabandon(page);
_mi_heap_page_reclaim(tagheap, page); _mi_heap_page_reclaim(tagheap, page);
mi_heap_stat_counter_increase(tagheap, pages_reclaim_on_free, 1); mi_heap_stat_counter_increase(tagheap, pages_reclaim_on_free, 1);
return; return;
@ -252,7 +252,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) {
// 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations // 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
if (!mi_page_is_used_at_frac(page,8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page if (!mi_page_is_used_at_frac(page,8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA && !mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
_mi_arena_page_try_reabandon_to_mapped(page)) _mi_arenas_page_try_reabandon_to_mapped(page))
{ {
return; return;
} }

View file

@ -211,7 +211,7 @@ mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena
else { else {
// heaps associated wita a specific arena are allocated in that arena // heaps associated wita a specific arena are allocated in that arena
// note: takes up at least one slice which is quite wasteful... // note: takes up at least one slice which is quite wasteful...
heap = (mi_heap_t*)_mi_arena_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, &memid); heap = (mi_heap_t*)_mi_arenas_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, &memid);
} }
if (heap==NULL) { if (heap==NULL) {
_mi_error_message(ENOMEM, "unable to allocate heap meta-data\n"); _mi_error_message(ENOMEM, "unable to allocate heap meta-data\n");
@ -341,7 +341,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
page->next = NULL; page->next = NULL;
page->prev = NULL; page->prev = NULL;
mi_page_set_heap(page, NULL); mi_page_set_heap(page, NULL);
_mi_arena_page_free(page); _mi_arenas_page_free(page);
return true; // keep going return true; // keep going
} }

View file

@ -713,7 +713,7 @@ void mi_cdecl _mi_process_done(void) {
if (mi_option_is_enabled(mi_option_destroy_on_exit)) { if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
mi_collect(true /* force */); mi_collect(true /* force */);
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_arena_unsafe_destroy_all(); _mi_arenas_unsafe_destroy_all();
} }
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {

View file

@ -252,7 +252,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
else { else {
mi_page_queue_remove(pq, page); mi_page_queue_remove(pq, page);
mi_page_set_heap(page, NULL); mi_page_set_heap(page, NULL);
_mi_arena_page_abandon(page); _mi_arenas_page_abandon(page);
} }
} }
@ -264,7 +264,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
mi_assert_internal(mi_heap_contains_queue(heap, pq)); mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_MAX_OBJ_SIZE || block_size == pq->block_size); mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_MAX_OBJ_SIZE || block_size == pq->block_size);
#endif #endif
mi_page_t* page = _mi_arena_page_alloc(heap, block_size, page_alignment); mi_page_t* page = _mi_arenas_page_alloc(heap, block_size, page_alignment);
if (page == NULL) { if (page == NULL) {
// out-of-memory // out-of-memory
return NULL; return NULL;
@ -357,7 +357,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) {
// and free it // and free it
mi_page_set_heap(page,NULL); mi_page_set_heap(page,NULL);
_mi_arena_page_free(page); _mi_arenas_page_free(page);
} }
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE #define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE