diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 01b7076b..ec106047 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -598,7 +598,7 @@ static inline bool mi_page_mostly_used(const mi_page_t* page) { static inline bool mi_page_is_abandoned(const mi_page_t* page) { // note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free) - return (mi_page_thread_id(page) == 0); + return (mi_atomic_load_acquire(&page->xthread_id) == 0); } static inline bool mi_page_is_huge(const mi_page_t* page) { diff --git a/src/arena.c b/src/arena.c index 66f83d4f..a713a110 100644 --- a/src/arena.c +++ b/src/arena.c @@ -646,11 +646,12 @@ bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) { // we only call this when on free (and thus there is still an object alive in the page) mi_memid_t memid = page->memid; if (!_mi_arena_memid_is_suitable(memid, heap->arena_id)) return false; // don't reclaim between exclusive and non-exclusive arena's + if (mi_atomic_load_acquire(&page->xheap) != (uintptr_t)heap->tld->subproc) return false; if mi_likely(memid.memkind == MI_MEM_ARENA) { size_t slice_index; mi_arena_t* arena = mi_page_arena(page, &slice_index, NULL); - if (arena->subproc != heap->tld->subproc) return false; // only reclaim within the same subprocess + //if (arena->subproc != heap->tld->subproc) return false; // only reclaim within the same subprocess // don't reclaim more from a `free` call than half the current segments // this is to prevent a pure free-ing thread to start owning too many segments @@ -665,6 +666,11 @@ bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_internal(!mi_page_is_abandoned(page)); return true; } + else { + if (mi_page_is_abandoned(page)) { + mi_assert(false); + } + } } else { // A page in OS or external memory @@ -1089,15 +1095,6 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv -/* ----------------------------------------------------------- - Abandoned pages ------------------------------------------------------------ */ - -void mi_arena_page_abandon(mi_page_t* page) { - mi_assert_internal(mi_page_is_abandoned(page)); - if (mi_page_is_full(page)) {} -} - /* ----------------------------------------------------------- diff --git a/src/free.c b/src/free.c index c7d92292..f0ce8c22 100644 --- a/src/free.c +++ b/src/free.c @@ -225,24 +225,29 @@ static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block) { // first see if the page was abandoned and if we can reclaim it into our thread - if (mi_page_is_abandoned(page) && - (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 || - mi_page_is_singleton(page) // only one block, and we are free-ing it - ) && - mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944)) - { - // the page is abandoned, try to reclaim it into our heap - if (_mi_arena_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue - mi_assert_internal(_mi_thread_id() == mi_page_thread_id(page)); - // mi_assert_internal(mi_heap_get_default()->tld->subproc == page->subproc); - mi_free(block); // recursively free as now it will be a local free in our heap - return; - } - else { - mi_assert_internal(!mi_page_is_singleton(page)); // we should have succeeded on singleton pages + if (mi_page_is_abandoned(page)) { + if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 || + mi_page_is_singleton(page)) { // only one block, and we are free-ing it + if (mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944)) + { + // the page is abandoned, try to reclaim it into our heap + if (_mi_arena_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue + mi_assert_internal(_mi_thread_id() == mi_page_thread_id(page)); + // mi_assert_internal(mi_heap_get_default()->tld->subproc == page->subproc); + mi_free(block); // recursively free as now it will be a local free in our heap + return; + } + else { + if (mi_page_is_abandoned(page)) { + mi_assert(false); + } + mi_assert_internal(!mi_page_is_singleton(page)); // we should have succeeded on singleton pages + } + } } } + // The padding check may access the non-thread-owned page for the key values. // that is safe as these are constant and the page won't be freed (as the block is not freed yet). mi_check_padding(page, block); diff --git a/src/init.c b/src/init.c index 16130af7..2378b3c8 100644 --- a/src/init.c +++ b/src/init.c @@ -396,8 +396,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) { tld->heap_backing = bheap; tld->heaps = NULL; tld->subproc = &mi_subproc_default; - tld->tseq = 0; - mi_atomic_add_acq_rel(&mi_tcount, 1); + tld->tseq = 0; // mi_atomic_add_acq_rel(&mi_tcount, 1); tld->os.stats = &tld->stats; } diff --git a/src/os.c b/src/os.c index 931abc7f..0aa0a681 100644 --- a/src/os.c +++ b/src/os.c @@ -219,11 +219,12 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; size = _mi_align_up(size, _mi_os_page_size()); - const bool use_overalloc = (alignment > mi_os_mem_config.alloc_granularity && alignment <= size/8); + // try a direct allocation if the alignment is below the default, or if larger than 1/64 fraction of the size (to avoid waste). + const bool try_direct_alloc = (alignment <= mi_os_mem_config.alloc_granularity || alignment > size/64); // try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD) void* p = NULL; - if (!use_overalloc) { + if (try_direct_alloc) { p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats); } @@ -234,7 +235,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit else { // if not aligned, free it, overallocate, and unmap around it #if !MI_TRACK_ASAN - if (!use_overalloc) { + if (try_direct_alloc) { _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); } #endif diff --git a/test/test-stress.c b/test/test-stress.c index 76dfe877..9a89744e 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -36,7 +36,7 @@ static int ITER = 400; static int THREADS = 8; static int SCALE = 25; static int ITER = 20; -#elif defined(xMI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits +#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits static int THREADS = 8; static int SCALE = 10; static int ITER = 10;