From e5f37eb806c9904d4283ae82502a92fc686fc802 Mon Sep 17 00:00:00 2001 From: Jim Huang Date: Sun, 21 Jul 2019 15:12:13 +0800 Subject: [PATCH] Enforce mi_stat wrapper for performance and consistency Statistics wrapper macros, mi_stat_{increase,counter_increase} are defined as no-op when MI_STAT equals to 0 (no runtime statistics). However, even MI_STAT is set to 0, internal functions such as _mi_stat_increase are invoked unexpectedly. This patch enforces the use of mi_stat wrapper macros and MI_STAT. --- include/mimalloc-types.h | 2 +- src/init.c | 4 ++-- src/os.c | 34 ++++++++++++++++++++++------------ src/page.c | 6 +++--- src/segment.c | 23 ++++++++++++----------- src/stats.c | 3 ++- 6 files changed, 42 insertions(+), 30 deletions(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index d591ff86..e236c09b 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -341,7 +341,7 @@ void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); -#if (MI_STAT) +#if MI_STAT>0 #define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) #define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) diff --git a/src/init.c b/src/init.c index f55b7318..73b24234 100644 --- a/src/init.c +++ b/src/init.c @@ -334,7 +334,7 @@ void mi_thread_init(void) mi_attr_noexcept // don't further initialize for the main thread if (_mi_is_main_thread()) return; - _mi_stat_increase(&mi_get_default_heap()->tld->stats.threads, 1); + mi_stat_increase(mi_get_default_heap()->tld->stats.threads, 1); // set hooks so our mi_thread_done() will be called #if defined(_WIN32) && defined(MI_SHARED_LIB) @@ -354,7 +354,7 @@ void mi_thread_done(void) mi_attr_noexcept { // stats mi_heap_t* heap = mi_get_default_heap(); if (!_mi_is_main_thread() && mi_heap_is_initialized(heap)) { - _mi_stat_decrease(&heap->tld->stats.threads, 1); + mi_stat_decrease(heap->tld->stats.threads, 1); } // abandon the thread local heap diff --git a/src/os.c b/src/os.c index 9349019d..7b93d506 100644 --- a/src/os.c +++ b/src/os.c @@ -170,6 +170,9 @@ void _mi_os_init() { static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats) { +#if MI_STAT==0 + UNUSED(stats); +#endif if (addr == NULL || size == 0) return true; bool err = false; #if defined(_WIN32) @@ -179,8 +182,8 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats) #else err = (munmap(addr, size) == -1); #endif - _mi_stat_decrease(&stats->committed, size); // TODO: what if never committed? - _mi_stat_decrease(&stats->reserved, size); + mi_stat_decrease(stats->committed, size); // TODO: what if never committed? + mi_stat_decrease(stats->reserved, size); if (err) { #pragma warning(suppress:4996) _mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size); @@ -303,6 +306,9 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) // Primitive allocation from the OS. // Note: the `alignment` is just a hint and the returned pointer is not guaranteed to be aligned. static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_stats_t* stats) { +#if MI_STAT==0 + UNUSED(stats); +#endif mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); if (size == 0) return NULL; @@ -317,10 +323,10 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_ int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); p = mi_unix_mmap(size, try_alignment, protect_flags); #endif - _mi_stat_increase(&stats->mmap_calls, 1); + mi_stat_increase(stats->mmap_calls, 1); if (p != NULL) { - _mi_stat_increase(&stats->reserved, size); - if (commit) _mi_stat_increase(&stats->committed, size); + mi_stat_increase(stats->reserved, size); + if (commit) mi_stat_increase(stats->committed, size); } return p; } @@ -453,17 +459,20 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* // Usuelly commit is aligned liberal, while decommit is aligned conservative. // (but not for the reset version where we want commit to be conservative as well) static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, mi_stats_t* stats) { +#if MI_STATS==0 + UNUSED(stats); +#endif // page align in the range, commit liberally, decommit conservative size_t csize; void* start = mi_os_page_align_areax(conservative, addr, size, &csize); if (csize == 0) return true; int err = 0; if (commit) { - _mi_stat_increase(&stats->committed, csize); - _mi_stat_increase(&stats->commit_calls, 1); + mi_stat_increase(stats->committed, csize); + mi_stat_increase(stats->commit_calls, 1); } else { - _mi_stat_decrease(&stats->committed, csize); + mi_stat_decrease(stats->committed, csize); } #if defined(_WIN32) @@ -505,12 +514,15 @@ bool _mi_os_commit_unreset(void* addr, size_t size, mi_stats_t* stats) { // pages and reduce swapping while keeping the memory committed. // We page align to a conservative area inside the range to reset. static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) { +#if MI_STAT==0 + UNUSED(stats); +#endif // page align conservatively within the range size_t csize; void* start = mi_os_page_align_area_conservative(addr, size, &csize); if (csize == 0) return true; - if (reset) _mi_stat_increase(&stats->reset, csize); - else _mi_stat_decrease(&stats->reset, csize); + if (reset) mi_stat_increase(stats->reset, csize); + else mi_stat_decrease(stats->reset, csize); if (!reset) return true; // nothing to do on unreset! #if MI_DEBUG>1 @@ -607,8 +619,6 @@ bool _mi_os_unprotect(void* addr, size_t size) { return mi_os_protectx(addr, size, false); } - - bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) { // page align conservatively within the range mi_assert_internal(oldsize > newsize && p != NULL); diff --git a/src/page.c b/src/page.c index 60b3fc09..02113bfc 100644 --- a/src/page.c +++ b/src/page.c @@ -472,7 +472,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e } // enable the new free list page->capacity += (uint16_t)extend; - _mi_stat_increase(&stats->page_committed, extend * page->block_size); + mi_stat_increase(stats->page_committed, extend * page->block_size); } /* ----------------------------------------------------------- @@ -501,7 +501,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st size_t page_size; _mi_page_start(_mi_page_segment(page), page, &page_size); - _mi_stat_increase(&stats->pages_extended, 1); + mi_stat_increase(stats->pages_extended, 1); // calculate the extend count size_t extend = page->reserved - page->capacity; @@ -607,7 +607,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p page = next; } // for each page - _mi_stat_counter_increase(&heap->tld->stats.searches,count); + mi_stat_counter_increase(heap->tld->stats.searches,count); if (page == NULL) { page = rpage; diff --git a/src/segment.c b/src/segment.c index 7f7bedd7..26fcbd66 100644 --- a/src/segment.c +++ b/src/segment.c @@ -216,8 +216,8 @@ reuse and avoid setting/clearing guard pages in secure mode. ------------------------------------------------------------------------------- */ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) { - if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1); - else _mi_stat_decrease(&tld->stats->segments,1); + if (segment_size>=0) mi_stat_increase(tld->stats->segments,1); + else mi_stat_decrease(tld->stats->segments,1); tld->count += (segment_size >= 0 ? 1 : -1); if (tld->count > tld->peak_count) tld->peak_count = tld->count; tld->current_size += segment_size; @@ -391,12 +391,11 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, segment->pages[i].is_reset = false; segment->pages[i].is_committed = commit; } - _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size); + mi_stat_increase(tld->stats->page_committed, segment->segment_info_size); //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment); return segment; } - static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { UNUSED(force); //fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment); @@ -407,7 +406,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment)); mi_assert(segment->next == NULL); mi_assert(segment->prev == NULL); - _mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size); + mi_stat_decrease(tld->stats->page_committed, segment->segment_info_size); segment->thread_id = 0; // update reset memory statistics @@ -477,9 +476,11 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta mi_assert_internal(page->segment_in_use); mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(page->is_committed); +#if MI_STAT>0 size_t inuse = page->capacity * page->block_size; - _mi_stat_decrease(&stats->page_committed, inuse); - _mi_stat_decrease(&stats->pages, 1); +#endif + mi_stat_decrease(stats->page_committed, inuse); + mi_stat_decrease(stats->pages, 1); // reset the page memory to reduce memory pressure? if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) { @@ -553,7 +554,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { segment->abandoned_next = (mi_segment_t*)abandoned; } while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, segment->abandoned_next)); mi_atomic_increment(&abandoned_count); - _mi_stat_increase(&tld->stats->segments_abandoned,1); + mi_stat_increase(tld->stats->segments_abandoned,1); mi_segments_track_size(-((long)segment->segment_size), tld); } @@ -562,7 +563,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment)); segment->abandoned++; - _mi_stat_increase(&tld->stats->pages_abandoned, 1); + mi_stat_increase(tld->stats->pages_abandoned, 1); mi_assert_internal(segment->abandoned <= segment->used); if (segment->used == segment->abandoned) { // all pages are abandoned, abandon the entire segment @@ -597,7 +598,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen mi_segments_track_size((long)segment->segment_size,tld); mi_assert_internal(segment->next == NULL && segment->prev == NULL); mi_assert_expensive(mi_segment_is_valid(segment)); - _mi_stat_decrease(&tld->stats->segments_abandoned,1); + mi_stat_decrease(tld->stats->segments_abandoned,1); // add its abandoned pages to the current thread mi_assert(segment->abandoned == segment->used); @@ -606,7 +607,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen if (page->segment_in_use) { segment->abandoned--; mi_assert(page->next == NULL); - _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + mi_stat_decrease(tld->stats->pages_abandoned, 1); if (mi_page_all_free(page)) { // if everything free by now, free the page mi_segment_page_clear(segment,page,tld->stats); diff --git a/src/stats.c b/src/stats.c index 2b15bf9e..b024c858 100644 --- a/src/stats.c +++ b/src/stats.c @@ -28,6 +28,7 @@ void _mi_stats_done(mi_stats_t* stats) { Statistics operations ----------------------------------------------------------- */ +#if MI_STAT>0 static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { if (amount == 0) return; bool in_main = ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main @@ -62,7 +63,6 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { mi_atomic_add( &stat->total, (int64_t)amount ); } - void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, (int64_t)amount); } @@ -70,6 +70,7 @@ void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, -((int64_t)amount)); } +#endif /* MI_STAT>0 */ // must be thread safe as it is called from stats_merge static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) {