merge from dev statistics update

This commit is contained in:
Daan 2025-03-21 19:43:26 -07:00
commit ea2c7c6e56
7 changed files with 54 additions and 31 deletions

View file

@ -683,22 +683,25 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line
// add to stat keeping track of the peak
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount);
// counters can just be increased
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
#if (MI_STAT)
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount)
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
#else
#define mi_stat_increase(stat,amount) ((void)0)
#define mi_stat_decrease(stat,amount) ((void)0)
#define mi_stat_adjust_decrease(stat,amount) ((void)0)
#define mi_stat_counter_increase(stat,amount) ((void)0)
#endif
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_adjust_decrease(heap,stat,amount) mi_stat_adjust_decrease( (heap)->tld->stats.stat, amount)
#endif

View file

@ -191,10 +191,11 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
if mi_likely(is_aligned)
{
void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
#if MI_STAT>1
mi_heap_stat_adjust_decrease(heap, malloc_requested, padsize);
mi_heap_stat_increase(heap, malloc_requested, size);
#endif
void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
mi_track_malloc(p,size,zero);

View file

@ -30,6 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
{
mi_assert_internal(size >= MI_PADDING_SIZE);
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
// check the free list
@ -88,6 +89,7 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
#if (MI_STAT>1)
const size_t bin = _mi_bin(bsize);
mi_heap_stat_increase(heap, malloc_bins[bin], 1);
mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE);
#endif
}
#endif
@ -146,12 +148,6 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
}
#endif
#if MI_DEBUG>3
if (p != NULL && zero) {
mi_assert_expensive(mi_mem_is_zero(p, size));
@ -188,12 +184,6 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
}
#endif
#if MI_DEBUG>3
if (p != NULL && zero) {
mi_assert_expensive(mi_mem_is_zero(p, size));
@ -666,7 +656,8 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size);
mi_heap_stat_increase(heap, malloc_requested, size);
#endif
_mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1);
}

View file

@ -522,15 +522,13 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2)
MI_UNUSED(block);
#endif
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_usable_block_size(page);
#if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc_requested, usize);
#endif
// #if (MI_STAT>1)
// const size_t usize = mi_page_usable_size_of(page, block);
// mi_heap_stat_decrease(heap, malloc_requested, usize);
// #endif
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, malloc_normal, bsize);
#if (MI_STAT > 1)

View file

@ -344,17 +344,17 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
mi_heap_stat_decrease(heap, malloc_huge, bsize);
}
}
#if (MI_STAT)
#if (MI_STAT>0)
_mi_page_free_collect(page, false); // update used count
const size_t inuse = page->used;
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse);
#if (MI_STAT>1)
#if (MI_STAT>1)
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse);
#endif
#endif
}
mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
#endif
// mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
#endif
/// pretend it is all free now
mi_assert_internal(mi_page_thread_free(page) == NULL);

View file

@ -62,6 +62,25 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
}
static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) {
if (amount == 0) return;
if mi_unlikely(mi_is_in_main(stat))
{
// adjust atomically
mi_atomic_addi64_relaxed(&stat->current, amount);
mi_atomic_addi64_relaxed(&stat->total,amount);
}
else {
// adjust local
stat->current += amount;
stat->total += amount;
}
}
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) {
mi_stat_adjust(stat, -((int64_t)amount));
}
// must be thread safe as it is called from stats_merge
static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) {
@ -199,6 +218,13 @@ static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int
_mi_fprintf(out, arg, "\n");
}
static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) {
_mi_fprintf(out, arg, "%10s:", msg);
_mi_fprintf(out, arg, "%12s", " "); // no peak
mi_print_amount(stat->total, unit, out, arg);
_mi_fprintf(out, arg, "\n");
}
static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) {
_mi_fprintf(out, arg, "%10s:", msg);
mi_print_amount(stat->total, -1, out, arg);
@ -215,7 +241,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char*
static void mi_print_header(mi_output_fun* out, void* arg ) {
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "unit ", "total# ");
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# ");
}
#if MI_STAT>1
@ -284,10 +310,10 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
// and print using that
mi_print_header(out,arg);
#if MI_STAT>1
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "normal",out,arg);
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg);
#endif
#if MI_STAT
mi_stat_print(&stats->malloc_normal, "normal", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
// mi_stat_print(&stats->malloc_large, "large", (stats->malloc_large_count.total == 0 ? 1 : -1), out, arg);
mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg);
mi_stat_count_t total = { 0,0,0 };
@ -297,7 +323,7 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
mi_stat_print_ex(&total, "total", 1, out, arg, "");
#endif
#if MI_STAT>1
mi_stat_print_ex(&stats->malloc_requested, "malloc req", 1, out, arg, "");
mi_stat_total_print(&stats->malloc_requested, "malloc req", 1, out, arg);
_mi_fprintf(out, arg, "\n");
#endif
mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, "");

View file

@ -203,7 +203,11 @@ int main(void) {
CHECK_BODY("malloc-aligned9") { // test large alignments
bool ok = true;
void* p[8];
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1,
#if SIZE_MAX > UINT32_MAX
2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX,
#endif
0 };
for (int i = 0; i < 28 && ok; i++) {
int align = (1 << i);
for (int j = 0; j < 8 && ok; j++) {