diff --git a/CMakeLists.txt b/CMakeLists.txt index e18e0ea3..3c47671d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -663,6 +663,7 @@ endif() install(FILES include/mimalloc.h DESTINATION ${mi_install_incdir}) install(FILES include/mimalloc-override.h DESTINATION ${mi_install_incdir}) install(FILES include/mimalloc-new-delete.h DESTINATION ${mi_install_incdir}) +install(FILES include/mimalloc-stats.h DESTINATION ${mi_install_incdir}) install(FILES cmake/mimalloc-config.cmake DESTINATION ${mi_install_cmakedir}) install(FILES cmake/mimalloc-config-version.cmake DESTINATION ${mi_install_cmakedir}) diff --git a/ide/vs2022/mimalloc-lib.vcxproj b/ide/vs2022/mimalloc-lib.vcxproj index 035adf8d..b4bf013e 100644 --- a/ide/vs2022/mimalloc-lib.vcxproj +++ b/ide/vs2022/mimalloc-lib.vcxproj @@ -485,6 +485,7 @@ + diff --git a/ide/vs2022/mimalloc-lib.vcxproj.filters b/ide/vs2022/mimalloc-lib.vcxproj.filters index 83a177fe..6825f113 100644 --- a/ide/vs2022/mimalloc-lib.vcxproj.filters +++ b/ide/vs2022/mimalloc-lib.vcxproj.filters @@ -93,6 +93,9 @@ Headers + + Headers + diff --git a/include/mimalloc-stats.h b/include/mimalloc-stats.h new file mode 100644 index 00000000..7c1ed770 --- /dev/null +++ b/include/mimalloc-stats.h @@ -0,0 +1,102 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_STATS_H +#define MIMALLOC_STATS_H + +#include +#include + +#define MI_STAT_VERSION 1 // increased on every backward incompatible change + +// count allocation over time +typedef struct mi_stat_count_s { + int64_t total; // total allocated + int64_t peak; // peak allocation + int64_t current; // current allocation +} mi_stat_count_t; + +// counters only increase +typedef struct mi_stat_counter_s { + int64_t total; // total count +} mi_stat_counter_t; + +#define MI_STAT_FIELDS() \ + MI_STAT_COUNT(pages) /* count of mimalloc pages */ \ + MI_STAT_COUNT(reserved) /* reserved memory bytes */ \ + MI_STAT_COUNT(committed) /* committed bytes */ \ + MI_STAT_COUNT(reset) /* reset bytes */ \ + MI_STAT_COUNT(purged) /* purged bytes */ \ + MI_STAT_COUNT(page_committed) /* committed memory inside pages */ \ + MI_STAT_COUNT(pages_abandoned) /* abandonded pages count */ \ + MI_STAT_COUNT(threads) /* number of threads */ \ + MI_STAT_COUNT(malloc_normal) /* allocated bytes <= MI_LARGE_OBJ_SIZE_MAX */ \ + MI_STAT_COUNT(malloc_huge) /* allocated bytes in huge pages */ \ + MI_STAT_COUNT(malloc_requested) /* malloc requested bytes */ \ + \ + MI_STAT_COUNTER(mmap_calls) \ + MI_STAT_COUNTER(commit_calls) \ + MI_STAT_COUNTER(reset_calls) \ + MI_STAT_COUNTER(purge_calls) \ + MI_STAT_COUNTER(arena_count) /* number of memory arena's */ \ + MI_STAT_COUNTER(malloc_normal_count) /* number of blocks <= MI_LARGE_OBJ_SIZE_MAX */ \ + MI_STAT_COUNTER(malloc_huge_count) /* number of huge bloks */ \ + MI_STAT_COUNTER(malloc_guarded_count) /* number of allocations with guard pages */ \ + \ + /* internal statistics */ \ + MI_STAT_COUNTER(arena_rollback_count) \ + MI_STAT_COUNTER(pages_extended) /* number of page extensions */ \ + MI_STAT_COUNTER(pages_retire) /* number of pages that are retired */ \ + MI_STAT_COUNTER(page_searches) /* searches for a fresh page */ \ + /* only on v1 and v2 */ \ + MI_STAT_COUNT(segments) \ + MI_STAT_COUNT(segments_abandoned) \ + MI_STAT_COUNT(segments_cache) \ + MI_STAT_COUNT(_segments_reserved) \ + /* only on v3 */ \ + MI_STAT_COUNTER(pages_reclaim_on_alloc) \ + MI_STAT_COUNTER(pages_reclaim_on_free) \ + MI_STAT_COUNTER(pages_reabandon_full) \ + MI_STAT_COUNTER(pages_unabandon_busy_wait) \ + + +// Define the statistics structure +#define MI_BIN_HUGE (73U) // see types.h +#define MI_STAT_COUNT(stat) mi_stat_count_t stat; +#define MI_STAT_COUNTER(stat) mi_stat_counter_t stat; + +typedef struct mi_stats_s +{ + int version; + + MI_STAT_FIELDS() + + // future extension + mi_stat_count_t _stat_reserved[4]; + mi_stat_counter_t _stat_counter_reserved[4]; + + // size segregated statistics + mi_stat_count_t malloc_bins[MI_BIN_HUGE+1]; // allocation per size bin + mi_stat_count_t page_bins[MI_BIN_HUGE+1]; // pages allocated per size bin +} mi_stats_t; + +#undef MI_STAT_COUNT +#undef MI_STAT_COUNTER + +// Exported definitions +#ifdef __cplusplus +extern "C" { +#endif + +mi_decl_export void mi_stats_get( size_t stats_size, mi_stats_t* stats ) mi_attr_noexcept; +mi_decl_export char* mi_stats_get_json( size_t buf_size, char* buf ) mi_attr_noexcept; // use mi_free to free the result if the input buf == NULL + +#ifdef __cplusplus +} +#endif + +#endif // MIMALLOC_STATS_H \ No newline at end of file diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 6fce36cc..d6c7d91c 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -315,7 +315,7 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line #define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) #define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) #define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) - +#define MI_INIT74(x) MI_INIT64(x),MI_INIT8(x),x(),x() #include // initialize a local variable to zero; use memset as compilers optimize constant sized memset's diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index cb846fb7..90d66191 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -19,6 +19,7 @@ terms of the MIT license. A copy of the license can be found in the file // -------------------------------------------------------------------------- +#include #include // ptrdiff_t #include // uintptr_t, uint16_t, etc #include // error codes @@ -447,9 +448,114 @@ struct mi_heap_s { // ------------------------------------------------------ -// Statistics +// Sub processes do not reclaim or visit segments +// from other sub processes. These are essentially the +// static variables of a process. // ------------------------------------------------------ +#define MI_MAX_ARENAS (160) // Limited for now (and takes up .bss).. but arena's scale up exponentially (see `mi_arena_reserve`) + // 160 arenas is enough for ~2 TiB memory + +typedef struct mi_subproc_s { + _Atomic(size_t) arena_count; // current count of arena's + _Atomic(mi_arena_t*) arenas[MI_MAX_ARENAS]; // arena's of this sub-process + mi_lock_t arena_reserve_lock; // lock to ensure arena's get reserved one at a time + _Atomic(int64_t) purge_expire; // expiration is set if any arenas can be purged + + _Atomic(size_t) abandoned_count[MI_BIN_COUNT]; // total count of abandoned pages for this sub-process + mi_page_t* os_abandoned_pages; // list of pages that OS allocated and not in an arena (only used if `mi_option_visit_abandoned` is on) + mi_lock_t os_abandoned_pages_lock; // lock for the os abandoned pages list (this lock protects list operations) + + mi_memid_t memid; // provenance of this memory block (meta or OS) + mi_stats_t stats; // sub-process statistics (tld stats are merged in on thread termination) +} mi_subproc_t; + + + +// ------------------------------------------------------ +// Thread Local data +// ------------------------------------------------------ + +// Milliseconds as in `int64_t` to avoid overflows +typedef int64_t mi_msecs_t; + +// Thread local data +struct mi_tld_s { + mi_threadid_t thread_id; // thread id of this thread + size_t thread_seq; // thread sequence id (linear count of created threads) + mi_subproc_t* subproc; // sub-process this thread belongs to. + mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + bool is_in_threadpool; // true if this thread is part of a threadpool (and can run arbitrary tasks) + mi_stats_t stats; // statistics + mi_memid_t memid; // provenance of the tld memory itself (meta or OS) +}; + + +/* ----------------------------------------------------------- + Error codes passed to `_mi_fatal_error` + All are recoverable but EFAULT is a serious error and aborts by default in secure mode. + For portability define undefined error codes using common Unix codes: + +----------------------------------------------------------- */ + +#ifndef EAGAIN // double free +#define EAGAIN (11) +#endif +#ifndef ENOMEM // out of memory +#define ENOMEM (12) +#endif +#ifndef EFAULT // corrupted free-list or meta-data +#define EFAULT (14) +#endif +#ifndef EINVAL // trying to free an invalid pointer +#define EINVAL (22) +#endif +#ifndef EOVERFLOW // count*size overflow +#define EOVERFLOW (75) +#endif + + +// ------------------------------------------------------ +// Debug +// ------------------------------------------------------ + +#if !defined(MI_DEBUG_UNINIT) +#define MI_DEBUG_UNINIT (0xD0) +#endif +#if !defined(MI_DEBUG_FREED) +#define MI_DEBUG_FREED (0xDF) +#endif +#if !defined(MI_DEBUG_PADDING) +#define MI_DEBUG_PADDING (0xDE) +#endif + +#if (MI_DEBUG) +// use our own assertion to print without memory allocation +void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +#else +#define mi_assert(x) +#endif + +#if (MI_DEBUG>1) +#define mi_assert_internal mi_assert +#else +#define mi_assert_internal(x) +#endif + +#if (MI_DEBUG>2) +#define mi_assert_expensive mi_assert +#else +#define mi_assert_expensive(x) +#endif + + +// ------------------------------------------------------ +// Statistics +// ------------------------------------------------------ #ifndef MI_STAT #if (MI_DEBUG>0) #define MI_STAT 2 @@ -458,50 +564,6 @@ struct mi_heap_s { #endif #endif -typedef struct mi_stat_count_s { - int64_t total; - int64_t peak; - int64_t current; -} mi_stat_count_t; - -typedef struct mi_stat_counter_s { - int64_t total; -} mi_stat_counter_t; - -typedef struct mi_stats_s { - mi_stat_count_t pages; - mi_stat_count_t reserved; - mi_stat_count_t committed; - mi_stat_count_t reset; - mi_stat_count_t purged; - mi_stat_count_t page_committed; - mi_stat_count_t pages_abandoned; - mi_stat_count_t threads; - mi_stat_count_t normal; - mi_stat_count_t huge; - mi_stat_count_t giant; - mi_stat_count_t malloc; - mi_stat_counter_t pages_extended; - mi_stat_counter_t pages_reclaim_on_alloc; - mi_stat_counter_t pages_reclaim_on_free; - mi_stat_counter_t pages_reabandon_full; - mi_stat_counter_t pages_unabandon_busy_wait; - mi_stat_counter_t mmap_calls; - mi_stat_counter_t commit_calls; - mi_stat_counter_t reset_calls; - mi_stat_counter_t purge_calls; - mi_stat_counter_t arena_purges; - mi_stat_counter_t page_no_retire; - mi_stat_counter_t searches; - mi_stat_counter_t normal_count; - mi_stat_counter_t huge_count; - mi_stat_counter_t arena_count; - mi_stat_counter_t guarded_alloc_count; -#if MI_STAT>1 - mi_stat_count_t normal_bins[MI_BIN_COUNT]; -#endif -} mi_stats_t; - // add to stat keeping track of the peak void __mi_stat_increase(mi_stat_count_t* stat, size_t amount); @@ -559,91 +621,4 @@ void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount); #define mi_debug_heap_stat_increase(heap,stat,amount) mi_debug_stat_increase( (heap)->tld->stats.stat, amount) #define mi_debug_heap_stat_decrease(heap,stat,amount) mi_debug_stat_decrease( (heap)->tld->stats.stat, amount) - -// ------------------------------------------------------ -// Sub processes use separate arena's and no heaps/pages/blocks -// are shared between sub processes. -// The subprocess structure contains essentially all static variables (except per subprocess :-)) -// -// Each thread should belong to one sub-process only -// ------------------------------------------------------ - -#define MI_MAX_ARENAS (160) // Limited for now (and takes up .bss).. but arena's scale up exponentially (see `mi_arena_reserve`) - // 160 arenas is enough for ~2 TiB memory - -typedef struct mi_subproc_s { - _Atomic(size_t) arena_count; // current count of arena's - _Atomic(mi_arena_t*) arenas[MI_MAX_ARENAS]; // arena's of this sub-process - mi_lock_t arena_reserve_lock; // lock to ensure arena's get reserved one at a time - _Atomic(int64_t) purge_expire; // expiration is set if any arenas can be purged - - _Atomic(size_t) abandoned_count[MI_BIN_COUNT]; // total count of abandoned pages for this sub-process - mi_page_t* os_abandoned_pages; // list of pages that OS allocated and not in an arena (only used if `mi_option_visit_abandoned` is on) - mi_lock_t os_abandoned_pages_lock; // lock for the os abandoned pages list (this lock protects list operations) - - mi_memid_t memid; // provenance of this memory block (meta or OS) - mi_stats_t stats; // sub-process statistics (tld stats are merged in on thread termination) -} mi_subproc_t; - - -// ------------------------------------------------------ -// Thread Local data -// ------------------------------------------------------ - -// Milliseconds as in `int64_t` to avoid overflows -typedef int64_t mi_msecs_t; - -// Thread local data -struct mi_tld_s { - mi_threadid_t thread_id; // thread id of this thread - size_t thread_seq; // thread sequence id (linear count of created threads) - mi_subproc_t* subproc; // sub-process this thread belongs to. - mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) - mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) - unsigned long long heartbeat; // monotonic heartbeat count - bool recurse; // true if deferred was called; used to prevent infinite recursion. - bool is_in_threadpool; // true if this thread is part of a threadpool (and can run arbitrary tasks) - mi_stats_t stats; // statistics - mi_memid_t memid; // provenance of the tld memory itself (meta or OS) -}; - - -/* ----------------------------------------------------------- - Error codes passed to `_mi_fatal_error` - All are recoverable but EFAULT is a serious error and aborts by default in secure mode. - For portability define undefined error codes using common Unix codes: - ------------------------------------------------------------ */ - -#ifndef EAGAIN // double free -#define EAGAIN (11) -#endif -#ifndef ENOMEM // out of memory -#define ENOMEM (12) -#endif -#ifndef EFAULT // corrupted free-list or meta-data -#define EFAULT (14) -#endif -#ifndef EINVAL // trying to free an invalid pointer -#define EINVAL (22) -#endif -#ifndef EOVERFLOW // count*size overflow -#define EOVERFLOW (75) -#endif - -// ------------------------------------------------------ -// Debug -// ------------------------------------------------------ - -#ifndef MI_DEBUG_UNINIT -#define MI_DEBUG_UNINIT (0xD0) -#endif -#ifndef MI_DEBUG_FREED -#define MI_DEBUG_FREED (0xDF) -#endif -#ifndef MI_DEBUG_PADDING -#define MI_DEBUG_PADDING (0xDE) -#endif - - #endif // MI_TYPES_H diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index e5976565..937cbdb8 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -193,7 +193,9 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0; if mi_likely(is_aligned) { - mi_debug_heap_stat_increase(heap, malloc, size); + #if MI_STAT>1 + mi_heap_stat_increase(heap, malloc_requested, size); + #endif void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen mi_assert_internal(p != NULL); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); diff --git a/src/alloc.c b/src/alloc.c index 9cd44338..60571040 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -87,11 +87,11 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_ #if (MI_STAT>0) const size_t bsize = mi_page_usable_block_size(page); if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { - mi_heap_stat_increase(heap, normal, bsize); - mi_heap_stat_counter_increase(heap, normal_count, 1); + mi_heap_stat_increase(heap, malloc_normal, bsize); + mi_heap_stat_counter_increase(heap, malloc_normal_count, 1); #if (MI_STAT>1) const size_t bin = _mi_bin(bsize); - mi_heap_stat_increase(heap, normal_bins[bin], 1); + mi_heap_stat_increase(heap, malloc_bins[bin], 1); #endif } #endif @@ -153,7 +153,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, #if MI_STAT>1 if (p != NULL) { if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } - mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p)); } #endif #if MI_DEBUG>3 @@ -195,7 +195,7 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z #if MI_STAT>1 if (p != NULL) { if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } - mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p)); } #endif #if MI_DEBUG>3 @@ -272,7 +272,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) // if p == NULL then behave as malloc. // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) - const size_t size = (p==NULL ? 0 : _mi_usable_size(p,"mi_realloc")); + const size_t size = (p==NULL ? 0 : _mi_usable_size(p,"mi_realloc")); if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) mi_assert_internal(p!=NULL); // todo: do not track as the usable size is still the same in the free; adjust potential padding? @@ -646,7 +646,7 @@ static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) { // give up to place it right in front of the guard page if the offset is too large for unalignment offset = MI_PAGE_MAX_OVERALLOC_ALIGN; } - void* p = (uint8_t*)block + offset; + void* p = (uint8_t*)block + offset; mi_track_align(block, p, offset, obj_size); mi_track_mem_defined(block, sizeof(mi_block_t)); return p; @@ -668,7 +668,7 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo void* const p = mi_block_ptr_set_guarded(block, obj_size); // stats - mi_track_malloc(p, size, zero); + mi_track_malloc(p, size, zero); if (p != NULL) { if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } #if MI_STAT>1 diff --git a/src/free.c b/src/free.c index 12bb8e26..d63a430e 100644 --- a/src/free.c +++ b/src/free.c @@ -174,7 +174,7 @@ void mi_free(void* p) mi_attr_noexcept if mi_unlikely(page==NULL) return; #endif mi_assert_internal(page!=NULL); - + const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page)); if mi_likely(xtid == 0) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0` // thread-local, aligned, and not a full page @@ -202,7 +202,7 @@ void mi_free(void* p) mi_attr_noexcept // Multi-threaded Free (`_mt`) // ------------------------------------------------------ static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free); -static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, size_t atmost) { +static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, size_t atmost) { mi_page_queue_t* const pq = mi_page_queue(heap,block_size); mi_assert_internal(pq!=NULL); return (pq->count <= atmost); @@ -239,7 +239,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* // 2. we can try to reclaim the page for ourselves // note: we only reclaim if the page originated from our heap (the heap field is preserved on abandonment) - // to avoid claiming arbitrary object sizes and limit indefinite expansion. This helps benchmarks like `larson` + // to avoid claiming arbitrary object sizes and limit indefinite expansion. This helps benchmarks like `larson` if (page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks { const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free); @@ -260,7 +260,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* (reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed !mi_page_is_used_at_frac(page, 8) && // and the page is not too full !heap->tld->is_in_threadpool && // and not part of a threadpool - _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable + _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable ) { // first remove it from the abandoned pages in the arena -- this waits for any readers to finish @@ -540,17 +540,17 @@ void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { const size_t bsize = mi_page_usable_block_size(page); #if (MI_STAT>1) const size_t usize = mi_page_usable_size_of(page, block); - mi_heap_stat_decrease(heap, malloc, usize); + mi_heap_stat_decrease(heap, malloc_requested, usize); #endif if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { - mi_heap_stat_decrease(heap, normal, bsize); + mi_heap_stat_decrease(heap, malloc_normal, bsize); #if (MI_STAT > 1) - mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); + mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1); #endif } else { const size_t bpsize = mi_page_block_size(page); // match stat in page.c:mi_huge_page_alloc - mi_heap_stat_decrease(heap, huge, bpsize); + mi_heap_stat_decrease(heap, malloc_huge, bpsize); } } #else diff --git a/src/heap.c b/src/heap.c index ac67698a..54c94179 100644 --- a/src/heap.c +++ b/src/heap.c @@ -128,6 +128,11 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) // collect arenas (this is program wide so don't force purges on abandonment of threads) //mi_atomic_storei64_release(&heap->tld->subproc->purge_expire, 1); _mi_arenas_collect(collect == MI_FORCE /* force purge? */, collect >= MI_FORCE /* visit all? */, heap->tld); + + // merge statistics + if (collect <= MI_FORCE) { + mi_stats_merge(); + } } void _mi_heap_collect_abandon(mi_heap_t* heap) { @@ -329,18 +334,18 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ // stats const size_t bsize = mi_page_block_size(page); if (bsize > MI_LARGE_MAX_OBJ_SIZE) { - mi_heap_stat_decrease(heap, huge, bsize); + mi_heap_stat_decrease(heap, malloc_huge, bsize); } #if (MI_STAT) _mi_page_free_collect(page, false); // update used count const size_t inuse = page->used; if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { - mi_heap_stat_decrease(heap, normal, bsize * inuse); + mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse); #if (MI_STAT>1) - mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); + mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse); #endif } - mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... + mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks... #endif /// pretend it is all free now diff --git a/src/init.c b/src/init.c index 2c7bf109..b9f74713 100644 --- a/src/init.c +++ b/src/init.c @@ -68,24 +68,22 @@ const mi_page_t _mi_page_empty = { #define MI_STAT_COUNT_NULL() {0,0,0} // Empty statistics -#if MI_STAT>1 -#define MI_STAT_COUNT_END_NULL() , { MI_INIT64(MI_STAT_COUNT_NULL), MI_INIT8(MI_STAT_COUNT_NULL), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL() } -#else -#define MI_STAT_COUNT_END_NULL() -#endif - #define MI_STATS_NULL \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ { 0 }, { 0 }, { 0 }, { 0 }, \ { 0 }, { 0 }, { 0 }, { 0 }, \ + \ { 0 }, { 0 }, { 0 }, { 0 }, \ - { 0 }, { 0 }, { 0 }, { 0 } \ - MI_STAT_COUNT_END_NULL() + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + \ + { MI_INIT4(MI_STAT_COUNT_NULL) }, \ + { { 0 }, { 0 }, { 0 }, { 0 } }, \ + \ + { MI_INIT74(MI_STAT_COUNT_NULL) }, \ + { MI_INIT74(MI_STAT_COUNT_NULL) } // -------------------------------------------------------- // Statically allocate an empty heap as the initial @@ -112,7 +110,7 @@ static mi_decl_cache_align mi_tld_t tld_empty = { 0, // heartbeat false, // recurse false, // is_in_threadpool - { MI_STATS_NULL }, // stats + { MI_STAT_VERSION, MI_STATS_NULL }, // stats MI_MEMID_STATIC // memid }; @@ -149,7 +147,7 @@ static mi_decl_cache_align mi_tld_t tld_main = { 0, // heartbeat false, // recurse false, // is_in_threadpool - { MI_STATS_NULL }, // stats + { MI_STAT_VERSION, MI_STATS_NULL }, // stats MI_MEMID_STATIC // memid }; @@ -186,7 +184,7 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. -mi_stats_t _mi_stats_main = { MI_STATS_NULL }; +mi_stats_t _mi_stats_main = { MI_STAT_VERSION, MI_STATS_NULL }; #if MI_GUARDED mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) { diff --git a/src/os.c b/src/os.c index 8d8824f6..dcfe5ccf 100644 --- a/src/os.c +++ b/src/os.c @@ -9,9 +9,6 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc/atomic.h" #include "mimalloc/prim.h" -// always use main stats for OS calls -#define os_stats (&_mi_stats_main) - /* ----------------------------------------------------------- Initialization. ----------------------------------------------------------- */ diff --git a/src/page-queue.c b/src/page-queue.c index 6e8b0853..4c30c970 100644 --- a/src/page-queue.c +++ b/src/page-queue.c @@ -148,10 +148,10 @@ bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq) { for (mi_page_t* page = pq->first; page != NULL; page = page->next) { mi_assert_internal(page->prev == prev); if (mi_page_is_in_full(page)) { - mi_assert_internal(_mi_wsize_from_size(pq->block_size) == MI_LARGE_MAX_OBJ_WSIZE + 2); + mi_assert_internal(_mi_wsize_from_size(pq->block_size) == MI_LARGE_MAX_OBJ_WSIZE + 2); } else if (mi_page_is_huge(page)) { - mi_assert_internal(_mi_wsize_from_size(pq->block_size) == MI_LARGE_MAX_OBJ_WSIZE + 1); + mi_assert_internal(_mi_wsize_from_size(pq->block_size) == MI_LARGE_MAX_OBJ_WSIZE + 1); } else { mi_assert_internal(mi_page_block_size(page) == pq->block_size); @@ -168,10 +168,15 @@ bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq) { } +static size_t mi_page_bin(const mi_page_t* page) { + const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); + mi_assert_internal(bin <= MI_BIN_FULL); + return bin; +} + static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { mi_assert_internal(heap!=NULL); - size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); - mi_assert_internal(bin <= MI_BIN_FULL); + const size_t bin = mi_page_bin(page); mi_page_queue_t* pq = &heap->pages[bin]; mi_assert_internal((mi_page_block_size(page) == pq->block_size) || (mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) || @@ -411,9 +416,9 @@ static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { mi_assert_internal(mi_heap_contains_queue(heap,pq)); mi_assert_internal(pq->block_size == append->block_size); - + if (append->first==NULL) return 0; - + // set append pages to new heap and count size_t count = 0; for (mi_page_t* page = append->first; page != NULL; page = page->next) { diff --git a/src/page.c b/src/page.c index f25d0d9b..284cbf35 100644 --- a/src/page.c +++ b/src/page.c @@ -217,7 +217,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { // Collect elements in the thread-free list starting at `head`. This is an optimized // version of `_mi_page_free_collect` to be used from `free.c:_mi_free_collect_mt` that avoids atomic access to `xthread_free`. -// +// // `head` must be in the `xthread_free` list. It will not collect `head` itself // so the `used` count is not fully updated in general. However, if the `head` is // the last remaining element, it will be collected and the used count will become `0` (so `mi_page_all_free` becomes true). @@ -324,6 +324,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size } mi_heap_stat_increase(heap, pages, 1); mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size); + mi_heap_stat_increase(heap, page_bins[mi_page_bin(page)], 1); mi_assert_expensive(_mi_page_is_valid(page)); return page; } @@ -394,6 +395,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) { // and free it mi_heap_t* heap = page->heap; + mi_heap_stat_decrease(heap, page_bins[mi_page_bin(page)], 1); mi_page_set_heap(page,NULL); _mi_arenas_page_free(page); _mi_arenas_collect(false, false, heap->tld); // allow purging @@ -427,7 +429,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? if (pq->last==page && pq->first==page) { // the only page in the queue? mi_heap_t* heap = mi_page_heap(page); - mi_debug_heap_stat_counter_increase(heap, page_no_retire, 1); + mi_debug_heap_stat_counter_increase(heap, pages_retire, 1); page->retire_expire = (bsize <= MI_SMALL_MAX_OBJ_SIZE ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); mi_assert_internal(pq >= heap->pages); const size_t index = pq - heap->pages; @@ -784,7 +786,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m page = next; } // for each page - mi_debug_heap_stat_counter_increase(heap, searches, count); + mi_heap_stat_counter_increase(heap, page_searches, count); // set the page to the best candidate if (page_candidate != NULL) { @@ -897,8 +899,8 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a mi_assert_internal(mi_page_is_abandoned(page)); mi_page_set_heap(page, NULL); #endif - mi_heap_stat_increase(heap, huge, mi_page_block_size(page)); - mi_heap_stat_counter_increase(heap, huge_count, 1); + mi_heap_stat_increase(heap, malloc_huge, mi_page_block_size(page)); + mi_heap_stat_counter_increase(heap, malloc_huge_count, 1); } return page; } diff --git a/src/stats.c b/src/stats.c index c6a4eecf..52cff406 100644 --- a/src/stats.c +++ b/src/stats.c @@ -92,13 +92,13 @@ void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) { // must be thread safe as it is called from stats_merge -static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src) { +static void mi_stat_count_add(mi_stat_count_t* stat, const mi_stat_count_t* src) { if (stat==src) return; if (src->total!=0) { mi_atomic_addi64_relaxed(&stat->total, src->total); } if (src->current!=0) { mi_atomic_addi64_relaxed(&stat->current, src->current); } // peak scores do really not work across threads ... we use conservative max if (src->peak > stat->peak) { - mi_atomic_maxi64_relaxed(&stat->peak, src->peak); // or: mi_atomic_addi64_relaxed( &stat->peak, src->peak); + mi_atomic_maxi64_relaxed(&stat->peak, src->peak); // or: mi_atomic_addi64_relaxed( &stat->peak, src->peak); } } @@ -107,50 +107,29 @@ static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t if (src->total!=0) { mi_atomic_addi64_relaxed(&stat->total, src->total); } } +#define MI_STAT_COUNT(stat) mi_stat_count_add(&stats->stat, &src->stat); +#define MI_STAT_COUNTER(stat) mi_stat_counter_add(&stats->stat, &src->stat); + // must be thread safe as it is called from stats_merge static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { if (stats==src) return; - mi_stat_add(&stats->pages, &src->pages); - mi_stat_add(&stats->reserved, &src->reserved); - mi_stat_add(&stats->committed, &src->committed); - mi_stat_add(&stats->reset, &src->reset); - mi_stat_add(&stats->purged, &src->purged); - mi_stat_add(&stats->page_committed, &src->page_committed); - mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned); - mi_stat_add(&stats->threads, &src->threads); + // copy all fields + MI_STAT_FIELDS() - mi_stat_add(&stats->malloc, &src->malloc); - mi_stat_add(&stats->normal, &src->normal); - mi_stat_add(&stats->huge, &src->huge); - mi_stat_add(&stats->giant, &src->giant); - - mi_stat_counter_add(&stats->pages_extended, &src->pages_extended); - mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls); - mi_stat_counter_add(&stats->commit_calls, &src->commit_calls); - mi_stat_counter_add(&stats->reset_calls, &src->reset_calls); - mi_stat_counter_add(&stats->purge_calls, &src->purge_calls); - - mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire); - mi_stat_counter_add(&stats->searches, &src->searches); - mi_stat_counter_add(&stats->normal_count, &src->normal_count); - mi_stat_counter_add(&stats->huge_count, &src->huge_count); - mi_stat_counter_add(&stats->guarded_alloc_count, &src->guarded_alloc_count); - - mi_stat_counter_add(&stats->pages_extended, &src->pages_extended); - mi_stat_counter_add(&stats->pages_reclaim_on_alloc, &src->pages_reclaim_on_alloc); - mi_stat_counter_add(&stats->pages_reclaim_on_free, &src->pages_reclaim_on_free); - mi_stat_counter_add(&stats->pages_reabandon_full, &src->pages_reabandon_full); - mi_stat_counter_add(&stats->pages_unabandon_busy_wait, &src->pages_unabandon_busy_wait); -#if MI_STAT>1 + #if MI_STAT>1 for (size_t i = 0; i <= MI_BIN_HUGE; i++) { - // if (src->normal_bins[i].total != 0 && src->normal_bins[i].current != 0) { - mi_stat_add(&stats->normal_bins[i], &src->normal_bins[i]); - //} + mi_stat_count_add(&stats->malloc_bins[i], &src->malloc_bins[i]); + } + #endif + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_stat_count_add(&stats->page_bins[i], &src->page_bins[i]); } -#endif } +#undef MI_STAT_COUNT +#undef MI_STAT_COUNTER + /* ----------------------------------------------------------- Display statistics ----------------------------------------------------------- */ @@ -333,45 +312,47 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) // and print using that mi_print_header(out,arg); #if MI_STAT>1 - mi_stats_print_bins(stats->normal_bins, MI_BIN_HUGE, "normal",out,arg); + mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "normal",out,arg); #endif #if MI_STAT - mi_stat_print(&stats->normal, "normal", (stats->normal_count.total == 0 ? 1 : -1), out, arg); - mi_stat_print(&stats->huge, "huge", (stats->huge_count.total == 0 ? 1 : -1), out, arg); + mi_stat_print(&stats->malloc_normal, "normal", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg); + mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg); mi_stat_count_t total = { 0,0,0 }; - mi_stat_add(&total, &stats->normal); - mi_stat_add(&total, &stats->huge); - mi_stat_print(&total, "total", 1, out, arg); + mi_stat_count_add(&total, &stats->malloc_normal); + mi_stat_count_add(&total, &stats->malloc_huge); + mi_stat_print_ex(&total, "total", 1, out, arg, ""); #endif #if MI_STAT>1 - mi_stat_print(&stats->malloc, "malloc req", 1, out, arg); + mi_stat_print_ex(&stats->malloc_requested, "malloc req", 1, out, arg, ""); _mi_fprintf(out, arg, "\n"); #endif mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, ""); mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, ""); mi_stat_peak_print(&stats->reset, "reset", 1, out, arg ); mi_stat_peak_print(&stats->purged, "purged", 1, out, arg ); - //mi_stat_print(&stats->segments, "segments", -1, out, arg); - //mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); - //mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); mi_stat_print_ex(&stats->page_committed, "touched", 1, out, arg, ""); - mi_stat_print_ex(&stats->pages, "pages", -1, out, arg, ""); + // mi_stat_print(&stats->segments, "segments", -1, out, arg); + // mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); + // mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); + mi_stat_print(&stats->pages, "pages", -1, out, arg); mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg); mi_stat_counter_print(&stats->pages_reclaim_on_alloc, "-reclaima", out, arg); mi_stat_counter_print(&stats->pages_reclaim_on_free, "-reclaimf", out, arg); mi_stat_counter_print(&stats->pages_reabandon_full, "-reabandon", out, arg); mi_stat_counter_print(&stats->pages_unabandon_busy_wait, "-waits", out, arg); mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg); - mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg); + mi_stat_counter_print(&stats->pages_retire, "-retire", out, arg); mi_stat_counter_print(&stats->arena_count, "arenas", out, arg); - mi_stat_counter_print(&stats->arena_purges, "-purges", out, arg); - mi_stat_counter_print(&stats->mmap_calls, "mmap calls", out, arg); - mi_stat_counter_print(&stats->commit_calls, " -commit", out, arg); - mi_stat_counter_print(&stats->reset_calls, "-reset", out, arg); - mi_stat_counter_print(&stats->purge_calls, "-purge", out, arg); - mi_stat_counter_print(&stats->guarded_alloc_count, "guarded", out, arg); + // mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg); + // mi_stat_counter_print(&stats->arena_purges, "-purges", out, arg); + mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg); + mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg); + mi_stat_counter_print(&stats->commit_calls, "commits", out, arg); + mi_stat_counter_print(&stats->reset_calls, "resets", out, arg); + mi_stat_counter_print(&stats->purge_calls, "purges", out, arg); + mi_stat_counter_print(&stats->malloc_guarded_count, "guarded", out, arg); mi_stat_print(&stats->threads, "threads", -1, out, arg); - mi_stat_counter_print_avg(&stats->searches, "searches", out, arg); + mi_stat_counter_print_avg(&stats->page_searches, "searches", out, arg); _mi_fprintf(out, arg, "%10s: %5zu\n", "numa nodes", _mi_os_numa_node_count()); size_t elapsed; @@ -492,3 +473,159 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit; if (page_faults!=NULL) *page_faults = pinfo.page_faults; } + + +// -------------------------------------------------------- +// Return statistics +// -------------------------------------------------------- + +void mi_stats_get(size_t stats_size, mi_stats_t* stats) mi_attr_noexcept { + if (stats == NULL || stats_size == 0) return; + _mi_memzero(stats, stats_size); + const size_t size = (stats_size > sizeof(mi_stats_t) ? sizeof(mi_stats_t) : stats_size); + _mi_memcpy(stats, &_mi_subproc()->stats, size); + stats->version = MI_STAT_VERSION; +} + + +// -------------------------------------------------------- +// Statics in json format +// -------------------------------------------------------- + +typedef struct mi_heap_buf_s { + char* buf; + size_t size; + size_t used; + bool can_realloc; +} mi_heap_buf_t; + +static bool mi_heap_buf_expand(mi_heap_buf_t* hbuf) { + if (hbuf==NULL) return false; + if (hbuf->buf != NULL && hbuf->size>0) { + hbuf->buf[hbuf->size-1] = 0; + } + if (hbuf->size > SIZE_MAX/2 || !hbuf->can_realloc) return false; + const size_t newsize = (hbuf->size == 0 ? 2*MI_KiB : 2*hbuf->size); + char* const newbuf = (char*)mi_rezalloc(hbuf->buf, newsize); + if (newbuf == NULL) return false; + hbuf->buf = newbuf; + hbuf->size = newsize; + return true; +} + +static void mi_heap_buf_print(mi_heap_buf_t* hbuf, const char* msg) { + if (msg==NULL || hbuf==NULL) return; + if (hbuf->used + 1 >= hbuf->size && !hbuf->can_realloc) return; + for (const char* src = msg; *src != 0; src++) { + char c = *src; + if (hbuf->used + 1 >= hbuf->size) { + if (!mi_heap_buf_expand(hbuf)) return; + } + mi_assert_internal(hbuf->used < hbuf->size); + hbuf->buf[hbuf->used++] = c; + } + mi_assert_internal(hbuf->used < hbuf->size); + hbuf->buf[hbuf->used] = 0; +} + +static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, size_t bin, bool add_comma) { + const size_t binsize = _mi_bin_size(bin); + const size_t pagesize = (binsize <= MI_SMALL_MAX_OBJ_SIZE ? MI_SMALL_PAGE_SIZE : + (binsize <= MI_MEDIUM_MAX_OBJ_SIZE ? MI_MEDIUM_PAGE_SIZE : + (binsize <= MI_LARGE_MAX_OBJ_SIZE ? MI_LARGE_PAGE_SIZE : 0))); + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_count(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, bool add_comma) { + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld }%s\n", prefix, stat->total, stat->peak, stat->current, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_count_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_count_t* stat) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": ", name); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); + mi_heap_buf_print_count(hbuf, "", stat, true); +} + +static void mi_heap_buf_print_value(mi_heap_buf_t* hbuf, const char* name, int64_t val) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": %lld,\n", name, val); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_size(mi_heap_buf_t* hbuf, const char* name, size_t val, bool add_comma) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": %zu%s\n", name, val, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_counter_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_counter_t* stat) { + mi_heap_buf_print_value(hbuf, name, stat->total); +} + +#define MI_STAT_COUNT(stat) mi_heap_buf_print_count_value(&hbuf, #stat, &stats->stat); +#define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat); + +char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept { + mi_heap_buf_t hbuf = { NULL, 0, 0, true }; + if (output_size > 0 && output_buf != NULL) { + _mi_memzero(output_buf, output_size); + hbuf.buf = output_buf; + hbuf.size = output_size; + hbuf.can_realloc = false; + } + else { + if (!mi_heap_buf_expand(&hbuf)) return NULL; + } + mi_heap_buf_print(&hbuf, "{\n"); + mi_heap_buf_print_value(&hbuf, "version", MI_STAT_VERSION); + mi_heap_buf_print_value(&hbuf, "mimalloc_version", MI_MALLOC_VERSION); + + // process info + mi_heap_buf_print(&hbuf, " \"process\": {\n"); + size_t elapsed; + size_t user_time; + size_t sys_time; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; + mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + mi_heap_buf_print_size(&hbuf, "elapsed_msecs", elapsed, true); + mi_heap_buf_print_size(&hbuf, "user_msecs", user_time, true); + mi_heap_buf_print_size(&hbuf, "system_msecs", sys_time, true); + mi_heap_buf_print_size(&hbuf, "page_faults", page_faults, true); + mi_heap_buf_print_size(&hbuf, "rss_current", current_rss, true); + mi_heap_buf_print_size(&hbuf, "rss_peak", peak_rss, true); + mi_heap_buf_print_size(&hbuf, "commit_current", current_commit, true); + mi_heap_buf_print_size(&hbuf, "commit_peak", peak_commit, false); + mi_heap_buf_print(&hbuf, " },\n"); + + // statistics + mi_stats_t* stats = &_mi_subproc()->stats; + MI_STAT_FIELDS() + + // size bins + mi_heap_buf_print(&hbuf, " \"malloc_bins\": [\n"); + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_heap_buf_print_count_bin(&hbuf, " ", &stats->malloc_bins[i], i, i!=MI_BIN_HUGE); + } + mi_heap_buf_print(&hbuf, " ],\n"); + mi_heap_buf_print(&hbuf, " \"page_bins\": [\n"); + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE); + } + mi_heap_buf_print(&hbuf, " ]\n"); + mi_heap_buf_print(&hbuf, "}\n"); + return hbuf.buf; +} diff --git a/test/test-stress.c b/test/test-stress.c index 338fe32a..cd63b05e 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -79,6 +79,7 @@ static bool main_participates = false; // main thread participates as a #define custom_free(p) free(p) #else #include +#include #define custom_calloc(n,s) mi_calloc(n,s) #define custom_realloc(p,s) mi_realloc(p,s) #define custom_free(p) mi_free(p) @@ -134,7 +135,7 @@ static void* alloc_items(size_t items, random_t r) { else if (chance(10, r) && allow_large_objects) items *= 1000; // 0.1% huge else items *= 100; // 1% large objects; } - if (items == 40) items++; // pthreads uses that size for stack increases + if (items>=32 && items<=40) items*=2; // pthreads uses 320b allocations (this shows that more clearly in the stats) if (use_one_size > 0) items = (use_one_size / sizeof(uintptr_t)); if (items==0) items = 1; uintptr_t* p = (uintptr_t*)custom_calloc(items,sizeof(uintptr_t)); @@ -367,8 +368,13 @@ int main(int argc, char** argv) { #ifndef NDEBUG mi_debug_show_arenas(); mi_collect(true); + char* json = mi_stats_get_json(0, NULL); + if (json != NULL) { + fputs(json,stderr); + mi_free(json); + } #endif - mi_stats_print(NULL); + mi_stats_print(NULL); #endif //bench_end_program(); return 0;