diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index a5fd12b8..6685b5a7 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -89,7 +89,7 @@ terms of the MIT license. A copy of the license can be found in the file // Main tuning parameters for segment and page sizes // Sizes for 64-bit, divide by two for 32-bit #define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64kb -#define MI_SEGMENT_SHIFT (10 + MI_SEGMENT_SLICE_SHIFT) // 64mb +#define MI_SEGMENT_SHIFT ( 8 + MI_SEGMENT_SLICE_SHIFT) // 64mb #define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64kb #define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512kb diff --git a/src/alloc.c b/src/alloc.c index de8bd3d2..6370b19d 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -195,6 +195,7 @@ static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_pag mi_tld_t* tld = heap->tld; const size_t bsize = mi_page_block_size(page); if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_assert_internal(false); _mi_stat_decrease(&tld->stats.large, bsize); } else { diff --git a/src/arena.c b/src/arena.c index 104a7e83..4fb1364a 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1,3 +1,4 @@ + /* ---------------------------------------------------------------------------- Copyright (c) 2019, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the @@ -36,7 +37,8 @@ of 256MiB in practice. // os.c void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld); -void _mi_os_free(void* p, size_t size, mi_stats_t* stats); +// void _mi_os_free(void* p, size_t size, mi_stats_t* stats); +void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats); void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize); void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats); @@ -178,8 +180,11 @@ static void* mi_cache_pop(int numa_node, size_t size, size_t alignment, bool* co if (*commit && !committed) { bool commit_zero; _mi_os_commit(p, MI_SEGMENT_SIZE, &commit_zero, tld->stats); + *commit = true; } - *commit = committed; + else { + *commit = committed; + } return p; } } @@ -207,7 +212,7 @@ static void mi_cache_purge(mi_os_tld_t* tld) { // expired, try to claim it if (mi_atomic_cas_ptr_weak(&slot->p, MI_SLOT_IN_USE, p)) { // claimed! test again - if (!slot->is_committed && !slot->is_large && now >= slot->expire) { + if (slot->is_committed && !slot->is_large && now >= slot->expire) { _mi_os_decommit(p, MI_SEGMENT_SIZE, tld->stats); slot->is_committed = false; } @@ -239,15 +244,20 @@ static bool mi_cache_push(void* start, size_t size, size_t memid, bool is_commit if (p == NULL) { // free slot if (mi_atomic_cas_ptr_weak(&slot->p, MI_SLOT_IN_USE, NULL)) { // claimed! - long delay = mi_option_get(mi_option_arena_reset_delay); - if (delay == 0 && !is_large) { - _mi_os_decommit(start, size, tld->stats); - is_committed = false; - } - slot->expire = (is_committed ? 0 : _mi_clock_now() + delay); + slot->expire = 0; slot->is_committed = is_committed; slot->memid = memid; slot->is_large = is_large; + if (is_committed) { + long delay = mi_option_get(mi_option_arena_reset_delay); + if (delay == 0 && !is_large) { + _mi_os_decommit(start, size, tld->stats); + slot->is_committed = false; + } + else { + slot->expire = _mi_clock_now() + delay; + } + } mi_atomic_write_ptr(&slot->p, start); // and make it available; return true; } @@ -369,7 +379,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, bool if (memid == MI_MEMID_OS) { // was a direct OS allocation, pass through if (!mi_cache_push(p, size, memid, is_committed, is_large, tld)) { - _mi_os_free(p, size, tld->stats); + _mi_os_free_ex(p, size, is_committed, tld->stats); } } else { diff --git a/src/options.c b/src/options.c index 489f07b3..1130e2e3 100644 --- a/src/options.c +++ b/src/options.c @@ -71,7 +71,7 @@ static mi_option_desc_t options[_mi_option_last] = { 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit) { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed - { 0, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed + { 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds { 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. diff --git a/src/page.c b/src/page.c index 44f32a73..13706100 100644 --- a/src/page.c +++ b/src/page.c @@ -378,9 +378,22 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { // no more aligned blocks in here mi_page_set_has_aligned(page, false); + mi_heap_t* heap = mi_page_heap(page); + const size_t bsize = mi_page_block_size(page); + if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + _mi_stat_decrease(&heap->tld->stats.large, bsize); + } + else { + // not strictly necessary as we never get here for a huge page + mi_assert_internal(false); + _mi_stat_decrease(&heap->tld->stats.huge, bsize); + } + } + // remove from the page list // (no need to do _mi_heap_delayed_free first as all blocks are already free) - mi_segments_tld_t* segments_tld = &mi_page_heap(page)->tld->segments; + mi_segments_tld_t* segments_tld = &heap->tld->segments; mi_page_queue_remove(pq, page); // and free it @@ -769,11 +782,11 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) { mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); } if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - _mi_stat_increase(&heap->tld->stats.large, block_size); + _mi_stat_increase(&heap->tld->stats.large, bsize); _mi_stat_counter_increase(&heap->tld->stats.large_count, 1); } else { - _mi_stat_increase(&heap->tld->stats.huge, block_size); + _mi_stat_increase(&heap->tld->stats.huge, bsize); _mi_stat_counter_increase(&heap->tld->stats.huge_count, 1); } } diff --git a/src/segment.c b/src/segment.c index 5ce4d7ba..b3a33d60 100644 --- a/src/segment.c +++ b/src/segment.c @@ -7,6 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc.h" #include "mimalloc-internal.h" #include "mimalloc-atomic.h" +#include "bitmap.inc.c" // mi_bsr #include // memset #include @@ -49,23 +50,7 @@ static uint8_t* mi_slice_start(const mi_slice_t* slice) { Bins ----------------------------------------------------------- */ // Use bit scan forward to quickly find the first zero bit if it is available -#if defined(_MSC_VER) -#include -static inline size_t mi_bsr(uintptr_t x) { - if (x==0) return 8*MI_INTPTR_SIZE; - DWORD idx; - #if (MI_INTPTR_SIZE==8) - _BitScanReverse64(&idx, x); - #else - _BitScanReverse(&idx, x); - #endif - return idx; -} -#elif defined(__GNUC__) || defined(__clang__) -static inline size_t mi_bsr(uintptr_t x) { - return (x==0 ? 8*MI_INTPTR_SIZE : (8*MI_INTPTR_SIZE - 1) - __builtin_clzl(x)); -} -#else +#if !defined(MI_HAVE_BITSCAN) #error "define bsr for your platform" #endif @@ -410,7 +395,7 @@ static void mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, s } else if (!commit && (segment->commit_mask & mask) != 0) { mi_assert_internal((void*)start != (void*)segment); - _mi_os_decommit(start, full_size,stats); + _mi_os_decommit(start, full_size, stats); segment->commit_mask &= ~mask; } // increase expiration of reusing part of the delayed decommit @@ -902,8 +887,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { slice = slice + slice->slice_count; } - // force delayed decommits instead? - mi_segment_delayed_decommit(segment, false, tld->stats); + // perform delayed decommits instead + mi_segment_delayed_decommit(segment, mi_option_is_enabled(mi_option_abandoned_page_reset), tld->stats); // all pages in the segment are abandoned; add it to the abandoned list _mi_stat_increase(&tld->stats->segments_abandoned, 1); @@ -1018,7 +1003,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen if (segment->used == 0) { // due to page_clear mi_segment_free(segment,false,tld); } - + // go on segment = next; } @@ -1185,6 +1170,5 @@ static void* mi_segment_range_of(const void* p, size_t* size) { mi_reset_delayed(tld); mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); return page; ->>>>>>> dev } */ diff --git a/src/static.c b/src/static.c index bcfaa119..b3c71e02 100644 --- a/src/static.c +++ b/src/static.c @@ -16,12 +16,8 @@ terms of the MIT license. A copy of the license can be found in the file #include "stats.c" #include "random.c" #include "os.c" -<<<<<<< HEAD //#include "memory.c" -======= #include "arena.c" -#include "memory.c" ->>>>>>> dev #include "segment.c" #include "page.c" #include "heap.c"