diff --git a/include/mimalloc.h b/include/mimalloc.h index d00e5364..51984d8c 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -154,8 +154,8 @@ mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; -mi_decl_export void mi_thread_stats(int64_t *allocated, int64_t *committed, - int64_t *reserved) mi_attr_noexcept; +mi_decl_export void mi_thread_stats(int64_t *allocated, + int64_t *committed) mi_attr_noexcept; mi_decl_export void mi_process_init(void) mi_attr_noexcept; mi_decl_export void mi_thread_init(void) mi_attr_noexcept; diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index f35f2af0..c6051524 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -45,8 +45,8 @@ terms of the MIT license. A copy of the license can be found in the file // #define MI_TRACK_ASAN 1 // #define MI_TRACK_ETW 1 -// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). -#define MI_STAT 1 +// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed +// statistics (but costs some performance). #define MI_STAT 1 // Define MI_SECURE to enable security mitigations // #define MI_SECURE 1 // guard page around metadata @@ -512,6 +512,8 @@ struct mi_heap_s { size_t page_retired_max; // largest retired index into the `pages` array. mi_heap_t* next; // list of heaps per thread bool no_reclaim; // `true` if this heap should not reclaim abandoned pages + uint64_t allocated; + uint64_t committed; }; diff --git a/src/alloc.c b/src/alloc.c index 84b01f1e..fa66a6b3 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -35,6 +35,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); // pop from the free list page->used++; + heap->allocated += mi_page_usable_block_size(page); page->free = mi_block_next(page, block); mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); #if MI_DEBUG>3 @@ -349,17 +350,17 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { #if (MI_STAT < 2) MI_UNUSED(block); #endif - mi_heap_t* const heap = mi_heap_get_default(); + mi_heap_t *const heap = mi_heap_get_default(); const size_t bsize = mi_page_usable_block_size(page); - #if (MI_STAT>1) +#if (MI_STAT > 1) const size_t usize = mi_page_usable_size_of(page, block); mi_heap_stat_decrease(heap, malloc, usize); - #endif +#endif if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { mi_heap_stat_decrease(heap, normal, bsize); - #if (MI_STAT > 1) +#if (MI_STAT > 1) mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); - #endif +#endif } else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { mi_heap_stat_decrease(heap, large, bsize); @@ -484,6 +485,8 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block mi_block_set_next(page, block, page->local_free); page->local_free = block; page->used--; + mi_heap_t *heap = mi_heap_get_default(); + heap->allocated -= mi_page_usable_block_size(page); if mi_unlikely(mi_page_all_free(page)) { _mi_page_retire(page); } @@ -579,6 +582,7 @@ void mi_free(void* p) mi_attr_noexcept mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned mi_block_set_next(page, block, page->local_free); page->local_free = block; + mi_heap_get_default()->allocated -= mi_page_usable_block_size(page); if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) _mi_page_retire(page); } diff --git a/src/heap.c b/src/heap.c index 58520ddf..bcedd362 100644 --- a/src/heap.c +++ b/src/heap.c @@ -314,6 +314,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ } mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... #endif + heap->allocated -= page->used * mi_page_usable_block_size(page); /// pretend it is all free now mi_assert_internal(mi_page_thread_free(page) == NULL); diff --git a/src/init.c b/src/init.c index 15d18371..3cbd77f2 100644 --- a/src/init.c +++ b/src/init.c @@ -109,20 +109,22 @@ const mi_page_t _mi_page_empty = { // -------------------------------------------------------- mi_decl_cache_align const mi_heap_t _mi_heap_empty = { - NULL, - MI_SMALL_PAGES_EMPTY, - MI_PAGE_QUEUES_EMPTY, - MI_ATOMIC_VAR_INIT(NULL), - 0, // tid - 0, // cookie - 0, // arena id - { 0, 0 }, // keys - { {0}, {0}, 0, true }, // random - 0, // page count - MI_BIN_FULL, 0, // page retired min/max - NULL, // next - false -}; + NULL, + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY, + MI_ATOMIC_VAR_INIT(NULL), + 0, // tid + 0, // cookie + 0, // arena id + {0, 0}, // keys + {{0}, {0}, 0, true}, // random + 0, // page count + MI_BIN_FULL, + 0, // page retired min/max + NULL, // next + false, + 0, + 0}; #define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats))) #define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os))) @@ -166,21 +168,23 @@ static mi_tld_t tld_main = { { MI_STATS_NULL } // stats }; -mi_heap_t _mi_heap_main = { - &tld_main, - MI_SMALL_PAGES_EMPTY, - MI_PAGE_QUEUES_EMPTY, - MI_ATOMIC_VAR_INIT(NULL), - 0, // thread id - 0, // initial cookie - 0, // arena id - { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) - { {0x846ca68b}, {0}, 0, true }, // random - 0, // page count - MI_BIN_FULL, 0, // page retired min/max - NULL, // next heap - false // can reclaim -}; +mi_heap_t _mi_heap_main = {&tld_main, + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY, + MI_ATOMIC_VAR_INIT(NULL), + 0, // thread id + 0, // initial cookie + 0, // arena id + {0, 0}, // the key of the main heap can be fixed + // (unlike page keys that need to be secure!) + {{0x846ca68b}, {0}, 0, true}, // random + 0, // page count + MI_BIN_FULL, + 0, // page retired min/max + NULL, // next heap + false, // can reclaim + 0, + 0}; bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. diff --git a/src/page.c b/src/page.c index 8ac0a715..f205b252 100644 --- a/src/page.c +++ b/src/page.c @@ -213,6 +213,10 @@ static void _mi_page_thread_free_collect(mi_page_t* page) // update counts now page->used -= count; + mi_heap_t *heap = mi_page_heap(page); + if (heap) { + heap->allocated -= (count * mi_page_usable_block_size(page)); + } } void _mi_page_free_collect(mi_page_t* page, bool force) { @@ -648,6 +652,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) } // enable the new free list page->capacity += (uint16_t)extend; + heap->committed += (extend * bsize); mi_stat_increase(tld->stats.page_committed, extend * bsize); mi_assert_expensive(mi_page_is_valid_init(page)); } diff --git a/src/segment.c b/src/segment.c index 28685f21..c09ca885 100644 --- a/src/segment.c +++ b/src/segment.c @@ -895,6 +895,8 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE); // _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1)); + mi_heap_t *heap = mi_heap_get_default(); + heap->committed += mi_segment_info_size(segment); _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment)); // set up guard pages @@ -963,6 +965,10 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t mi_assert_internal(page_count == 2); // first page is allocated by the segment itself // stats + if (segment->thread_id != 0) { + mi_heap_t *heap = mi_heap_get_default(); + heap->committed -= mi_segment_info_size(segment); + } _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); // return it to the OS @@ -984,6 +990,10 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld mi_assert_internal(segment->used > 0); size_t inuse = page->capacity * mi_page_block_size(page); + if (segment->thread_id != 0) { + mi_heap_t *heap = mi_heap_get_default(); + heap->committed -= inuse; + } _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); @@ -1557,6 +1567,7 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block mi_block_set_next(page, block, page->free); page->free = block; page->used--; + heap->allocated -= mi_page_usable_block_size(page); page->is_zero = false; mi_assert(page->used == 0); mi_tld_t* tld = heap->tld; diff --git a/src/stats.c b/src/stats.c index 78b1cb7e..29ae359d 100644 --- a/src/stats.c +++ b/src/stats.c @@ -413,17 +413,10 @@ void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { _mi_stats_print(mi_stats_get_default(), out, arg); } -void mi_thread_stats(int64_t *allocated, int64_t *committed, - int64_t *reserved) mi_attr_noexcept { - mi_stats_t *stat = mi_stats_get_default(); - if (stat != NULL) { - *reserved = stat->reserved.current; - *committed = stat->page_committed.current; - *allocated = 1; - *allocated += stat->normal.current; - *allocated += stat->large.current; - *allocated += stat->huge.current; - } +void mi_thread_stats(int64_t *allocated, int64_t *committed) mi_attr_noexcept { + mi_heap_t *heap = mi_heap_get_default(); + *committed = heap->committed; + *allocated = heap->allocated; } // ----------------------------------------------------------------