Enforce mi_stat wrapper for performance and consistency

Statistics wrapper macros, mi_stat_{increase,counter_increase} are
defined as no-op when MI_STAT equals to 0 (no runtime statistics).
However, even MI_STAT is set to 0, internal functions such as
_mi_stat_increase are invoked unexpectedly. This patch enforces the use
of mi_stat wrapper macros and MI_STAT.
This commit is contained in:
Jim Huang 2019-07-21 15:12:13 +08:00
parent e2b4127a16
commit e5f37eb806
6 changed files with 42 additions and 30 deletions

View file

@ -341,7 +341,7 @@ void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
#if (MI_STAT) #if MI_STAT>0
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) #define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) #define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)

View file

@ -334,7 +334,7 @@ void mi_thread_init(void) mi_attr_noexcept
// don't further initialize for the main thread // don't further initialize for the main thread
if (_mi_is_main_thread()) return; if (_mi_is_main_thread()) return;
_mi_stat_increase(&mi_get_default_heap()->tld->stats.threads, 1); mi_stat_increase(mi_get_default_heap()->tld->stats.threads, 1);
// set hooks so our mi_thread_done() will be called // set hooks so our mi_thread_done() will be called
#if defined(_WIN32) && defined(MI_SHARED_LIB) #if defined(_WIN32) && defined(MI_SHARED_LIB)
@ -354,7 +354,7 @@ void mi_thread_done(void) mi_attr_noexcept {
// stats // stats
mi_heap_t* heap = mi_get_default_heap(); mi_heap_t* heap = mi_get_default_heap();
if (!_mi_is_main_thread() && mi_heap_is_initialized(heap)) { if (!_mi_is_main_thread() && mi_heap_is_initialized(heap)) {
_mi_stat_decrease(&heap->tld->stats.threads, 1); mi_stat_decrease(heap->tld->stats.threads, 1);
} }
// abandon the thread local heap // abandon the thread local heap

View file

@ -170,6 +170,9 @@ void _mi_os_init() {
static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats) static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
{ {
#if MI_STAT==0
UNUSED(stats);
#endif
if (addr == NULL || size == 0) return true; if (addr == NULL || size == 0) return true;
bool err = false; bool err = false;
#if defined(_WIN32) #if defined(_WIN32)
@ -179,8 +182,8 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
#else #else
err = (munmap(addr, size) == -1); err = (munmap(addr, size) == -1);
#endif #endif
_mi_stat_decrease(&stats->committed, size); // TODO: what if never committed? mi_stat_decrease(stats->committed, size); // TODO: what if never committed?
_mi_stat_decrease(&stats->reserved, size); mi_stat_decrease(stats->reserved, size);
if (err) { if (err) {
#pragma warning(suppress:4996) #pragma warning(suppress:4996)
_mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size); _mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size);
@ -303,6 +306,9 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
// Primitive allocation from the OS. // Primitive allocation from the OS.
// Note: the `alignment` is just a hint and the returned pointer is not guaranteed to be aligned. // Note: the `alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_stats_t* stats) { static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_stats_t* stats) {
#if MI_STAT==0
UNUSED(stats);
#endif
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
if (size == 0) return NULL; if (size == 0) return NULL;
@ -317,10 +323,10 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
p = mi_unix_mmap(size, try_alignment, protect_flags); p = mi_unix_mmap(size, try_alignment, protect_flags);
#endif #endif
_mi_stat_increase(&stats->mmap_calls, 1); mi_stat_increase(stats->mmap_calls, 1);
if (p != NULL) { if (p != NULL) {
_mi_stat_increase(&stats->reserved, size); mi_stat_increase(stats->reserved, size);
if (commit) _mi_stat_increase(&stats->committed, size); if (commit) mi_stat_increase(stats->committed, size);
} }
return p; return p;
} }
@ -453,17 +459,20 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
// Usuelly commit is aligned liberal, while decommit is aligned conservative. // Usuelly commit is aligned liberal, while decommit is aligned conservative.
// (but not for the reset version where we want commit to be conservative as well) // (but not for the reset version where we want commit to be conservative as well)
static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, mi_stats_t* stats) { static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, mi_stats_t* stats) {
#if MI_STATS==0
UNUSED(stats);
#endif
// page align in the range, commit liberally, decommit conservative // page align in the range, commit liberally, decommit conservative
size_t csize; size_t csize;
void* start = mi_os_page_align_areax(conservative, addr, size, &csize); void* start = mi_os_page_align_areax(conservative, addr, size, &csize);
if (csize == 0) return true; if (csize == 0) return true;
int err = 0; int err = 0;
if (commit) { if (commit) {
_mi_stat_increase(&stats->committed, csize); mi_stat_increase(stats->committed, csize);
_mi_stat_increase(&stats->commit_calls, 1); mi_stat_increase(stats->commit_calls, 1);
} }
else { else {
_mi_stat_decrease(&stats->committed, csize); mi_stat_decrease(stats->committed, csize);
} }
#if defined(_WIN32) #if defined(_WIN32)
@ -505,12 +514,15 @@ bool _mi_os_commit_unreset(void* addr, size_t size, mi_stats_t* stats) {
// pages and reduce swapping while keeping the memory committed. // pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset. // We page align to a conservative area inside the range to reset.
static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) { static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) {
#if MI_STAT==0
UNUSED(stats);
#endif
// page align conservatively within the range // page align conservatively within the range
size_t csize; size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize); void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return true; if (csize == 0) return true;
if (reset) _mi_stat_increase(&stats->reset, csize); if (reset) mi_stat_increase(stats->reset, csize);
else _mi_stat_decrease(&stats->reset, csize); else mi_stat_decrease(stats->reset, csize);
if (!reset) return true; // nothing to do on unreset! if (!reset) return true; // nothing to do on unreset!
#if MI_DEBUG>1 #if MI_DEBUG>1
@ -607,8 +619,6 @@ bool _mi_os_unprotect(void* addr, size_t size) {
return mi_os_protectx(addr, size, false); return mi_os_protectx(addr, size, false);
} }
bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) { bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
// page align conservatively within the range // page align conservatively within the range
mi_assert_internal(oldsize > newsize && p != NULL); mi_assert_internal(oldsize > newsize && p != NULL);

View file

@ -472,7 +472,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
} }
// enable the new free list // enable the new free list
page->capacity += (uint16_t)extend; page->capacity += (uint16_t)extend;
_mi_stat_increase(&stats->page_committed, extend * page->block_size); mi_stat_increase(stats->page_committed, extend * page->block_size);
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
@ -501,7 +501,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
size_t page_size; size_t page_size;
_mi_page_start(_mi_page_segment(page), page, &page_size); _mi_page_start(_mi_page_segment(page), page, &page_size);
_mi_stat_increase(&stats->pages_extended, 1); mi_stat_increase(stats->pages_extended, 1);
// calculate the extend count // calculate the extend count
size_t extend = page->reserved - page->capacity; size_t extend = page->reserved - page->capacity;
@ -607,7 +607,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page = next; page = next;
} // for each page } // for each page
_mi_stat_counter_increase(&heap->tld->stats.searches,count); mi_stat_counter_increase(heap->tld->stats.searches,count);
if (page == NULL) { if (page == NULL) {
page = rpage; page = rpage;

View file

@ -216,8 +216,8 @@ reuse and avoid setting/clearing guard pages in secure mode.
------------------------------------------------------------------------------- */ ------------------------------------------------------------------------------- */
static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) { static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1); if (segment_size>=0) mi_stat_increase(tld->stats->segments,1);
else _mi_stat_decrease(&tld->stats->segments,1); else mi_stat_decrease(tld->stats->segments,1);
tld->count += (segment_size >= 0 ? 1 : -1); tld->count += (segment_size >= 0 ? 1 : -1);
if (tld->count > tld->peak_count) tld->peak_count = tld->count; if (tld->count > tld->peak_count) tld->peak_count = tld->count;
tld->current_size += segment_size; tld->current_size += segment_size;
@ -391,12 +391,11 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
segment->pages[i].is_reset = false; segment->pages[i].is_reset = false;
segment->pages[i].is_committed = commit; segment->pages[i].is_committed = commit;
} }
_mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size); mi_stat_increase(tld->stats->page_committed, segment->segment_info_size);
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment); //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
return segment; return segment;
} }
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
UNUSED(force); UNUSED(force);
//fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment); //fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment);
@ -407,7 +406,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment)); mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment));
mi_assert(segment->next == NULL); mi_assert(segment->next == NULL);
mi_assert(segment->prev == NULL); mi_assert(segment->prev == NULL);
_mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size); mi_stat_decrease(tld->stats->page_committed, segment->segment_info_size);
segment->thread_id = 0; segment->thread_id = 0;
// update reset memory statistics // update reset memory statistics
@ -477,9 +476,11 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
mi_assert_internal(page->segment_in_use); mi_assert_internal(page->segment_in_use);
mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(mi_page_all_free(page));
mi_assert_internal(page->is_committed); mi_assert_internal(page->is_committed);
#if MI_STAT>0
size_t inuse = page->capacity * page->block_size; size_t inuse = page->capacity * page->block_size;
_mi_stat_decrease(&stats->page_committed, inuse); #endif
_mi_stat_decrease(&stats->pages, 1); mi_stat_decrease(stats->page_committed, inuse);
mi_stat_decrease(stats->pages, 1);
// reset the page memory to reduce memory pressure? // reset the page memory to reduce memory pressure?
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) { if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
@ -553,7 +554,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
segment->abandoned_next = (mi_segment_t*)abandoned; segment->abandoned_next = (mi_segment_t*)abandoned;
} while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, segment->abandoned_next)); } while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, segment->abandoned_next));
mi_atomic_increment(&abandoned_count); mi_atomic_increment(&abandoned_count);
_mi_stat_increase(&tld->stats->segments_abandoned,1); mi_stat_increase(tld->stats->segments_abandoned,1);
mi_segments_track_size(-((long)segment->segment_size), tld); mi_segments_track_size(-((long)segment->segment_size), tld);
} }
@ -562,7 +563,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
mi_segment_t* segment = _mi_page_segment(page); mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment)); mi_assert_expensive(mi_segment_is_valid(segment));
segment->abandoned++; segment->abandoned++;
_mi_stat_increase(&tld->stats->pages_abandoned, 1); mi_stat_increase(tld->stats->pages_abandoned, 1);
mi_assert_internal(segment->abandoned <= segment->used); mi_assert_internal(segment->abandoned <= segment->used);
if (segment->used == segment->abandoned) { if (segment->used == segment->abandoned) {
// all pages are abandoned, abandon the entire segment // all pages are abandoned, abandon the entire segment
@ -597,7 +598,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
mi_segments_track_size((long)segment->segment_size,tld); mi_segments_track_size((long)segment->segment_size,tld);
mi_assert_internal(segment->next == NULL && segment->prev == NULL); mi_assert_internal(segment->next == NULL && segment->prev == NULL);
mi_assert_expensive(mi_segment_is_valid(segment)); mi_assert_expensive(mi_segment_is_valid(segment));
_mi_stat_decrease(&tld->stats->segments_abandoned,1); mi_stat_decrease(tld->stats->segments_abandoned,1);
// add its abandoned pages to the current thread // add its abandoned pages to the current thread
mi_assert(segment->abandoned == segment->used); mi_assert(segment->abandoned == segment->used);
@ -606,7 +607,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
if (page->segment_in_use) { if (page->segment_in_use) {
segment->abandoned--; segment->abandoned--;
mi_assert(page->next == NULL); mi_assert(page->next == NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1); mi_stat_decrease(tld->stats->pages_abandoned, 1);
if (mi_page_all_free(page)) { if (mi_page_all_free(page)) {
// if everything free by now, free the page // if everything free by now, free the page
mi_segment_page_clear(segment,page,tld->stats); mi_segment_page_clear(segment,page,tld->stats);

View file

@ -28,6 +28,7 @@ void _mi_stats_done(mi_stats_t* stats) {
Statistics operations Statistics operations
----------------------------------------------------------- */ ----------------------------------------------------------- */
#if MI_STAT>0
static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
if (amount == 0) return; if (amount == 0) return;
bool in_main = ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main bool in_main = ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
@ -62,7 +63,6 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
mi_atomic_add( &stat->total, (int64_t)amount ); mi_atomic_add( &stat->total, (int64_t)amount );
} }
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) {
mi_stat_update(stat, (int64_t)amount); mi_stat_update(stat, (int64_t)amount);
} }
@ -70,6 +70,7 @@ void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) {
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
mi_stat_update(stat, -((int64_t)amount)); mi_stat_update(stat, -((int64_t)amount));
} }
#endif /* MI_STAT>0 */
// must be thread safe as it is called from stats_merge // must be thread safe as it is called from stats_merge
static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) { static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) {