diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 535fe1fb..e43d4420 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -597,19 +597,6 @@ static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { return page->heap; } -static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { - if (heap != NULL) { - page->heap = heap; - page->heap_tag = heap->tag; - mi_atomic_store_release(&page->xthread_id, heap->tld->thread_id); - } - else { - page->heap = NULL; - mi_atomic_store_release(&page->xthread_id,0); - } -} - - // Thread free flag helpers static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { return (mi_block_t*)(tf & ~1); @@ -700,11 +687,11 @@ static inline bool mi_page_is_used_at_frac(const mi_page_t* page, uint16_t n) { static inline bool mi_page_is_abandoned(const mi_page_t* page) { // note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free) - return (mi_page_xthread_id(page) <= MI_PAGE_IS_ABANDONED_MAPPED); + return (mi_page_thread_id(page) == 0); } static inline bool mi_page_is_abandoned_mapped(const mi_page_t* page) { - return (mi_page_xthread_id(page) == MI_PAGE_IS_ABANDONED_MAPPED); + return ((mi_page_xthread_id(page) & ~(MI_PAGE_IS_ABANDONED_MAPPED - 1)) == MI_PAGE_IS_ABANDONED_MAPPED); } static inline void mi_page_set_abandoned_mapped(mi_page_t* page) { @@ -801,6 +788,21 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { mi_page_flags_set(page, has_aligned, MI_PAGE_HAS_ALIGNED); } +static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { + mi_assert_internal(!mi_page_is_in_full(page)); + const mi_page_flags_t flags = mi_page_flags(page); + const mi_threadid_t tid = (heap != NULL ? heap->tld->thread_id : 0) | flags; // for MI_PAGE_HAS_ALIGNED + if (heap != NULL) { + page->heap = heap; + page->heap_tag = heap->tag; + } + else { + page->heap = NULL; + } + mi_atomic_store_release(&page->xthread_id, tid); +} + + /* ------------------------------------------------------------------- Guarded objects ------------------------------------------------------------------- */ diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 7e968e10..2a1702ff 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -248,7 +248,7 @@ typedef struct mi_block_s { // `is_abandoned_mapped` is true if the page is abandoned (thread_id==0) and it is in an arena so can be quickly found for reuse ("mapped") #define MI_PAGE_IN_FULL_QUEUE MI_ZU(0x01) #define MI_PAGE_HAS_ALIGNED MI_ZU(0x02) -#define MI_PAGE_IS_ABANDONED_MAPPED MI_ZU(0x04) +#define MI_PAGE_IS_ABANDONED_MAPPED MI_ZU(0x04) // must be highest flag (see `internal.h:mi_page_is_abandoned_mapped`) #define MI_PAGE_FLAG_MASK MI_ZU(0x07) typedef size_t mi_page_flags_t; diff --git a/src/arena.c b/src/arena.c index bcde865e..e111a417 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1833,9 +1833,15 @@ mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* // find accessed size size_t asize; // scan the commit map for the highest entry + // scan the commit map for the highest entry size_t idx; - if (mi_bitmap_bsr(arena->slices_committed, &idx)) { - asize = (idx + 1)* MI_ARENA_SLICE_SIZE; + //if (mi_bitmap_bsr(arena->slices_committed, &idx)) { + // asize = (idx + 1)* MI_ARENA_SLICE_SIZE; + //} + if (mi_bitmap_bsr(arena->pages, &idx)) { + mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, idx); + const size_t page_slice_count = page->memid.mem.arena.slice_count; + asize = mi_size_of_slices(idx + page_slice_count); } else { asize = mi_arena_info_slices(arena) * MI_ARENA_SLICE_SIZE;