mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-03 14:09:31 +03:00
add acces to statistics for runtime performance counters
This commit is contained in:
parent
385c8be259
commit
72b1b76e48
15 changed files with 544 additions and 333 deletions
117
include/mimalloc-stats.h
Normal file
117
include/mimalloc-stats.h
Normal file
|
@ -0,0 +1,117 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_STATS_H
|
||||
#define MIMALLOC_STATS_H
|
||||
|
||||
#include <mimalloc.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define MI_STAT_VERSION 1 // increased on every backward incompatible change
|
||||
|
||||
// count allocation over time
|
||||
typedef struct mi_stat_count_s {
|
||||
int64_t total; // total allocated
|
||||
int64_t peak; // peak allocation
|
||||
int64_t current; // current allocation
|
||||
} mi_stat_count_t;
|
||||
|
||||
// counters only increase
|
||||
typedef struct mi_stat_counter_s {
|
||||
int64_t total; // total count
|
||||
} mi_stat_counter_t;
|
||||
|
||||
#define MI_STAT_FIELDS() \
|
||||
MI_STAT_COUNT(pages) /* count of mimalloc pages */ \
|
||||
MI_STAT_COUNT(reserved) /* reserved memory bytes */ \
|
||||
MI_STAT_COUNT(committed) /* committed bytes */ \
|
||||
MI_STAT_COUNT(reset) /* reset bytes */ \
|
||||
MI_STAT_COUNT(purged) /* purged bytes */ \
|
||||
MI_STAT_COUNT(page_committed) /* committed memory inside pages */ \
|
||||
MI_STAT_COUNT(pages_abandoned) /* abandonded pages count */ \
|
||||
MI_STAT_COUNT(threads) /* number of threads */ \
|
||||
MI_STAT_COUNT(malloc_normal) /* allocated bytes <= MI_LARGE_OBJ_SIZE_MAX */ \
|
||||
MI_STAT_COUNT(malloc_huge) /* allocated bytes in huge pages */ \
|
||||
MI_STAT_COUNT(malloc_requested) /* malloc requested bytes */ \
|
||||
\
|
||||
MI_STAT_COUNTER(mmap_calls) \
|
||||
MI_STAT_COUNTER(commit_calls) \
|
||||
MI_STAT_COUNTER(reset_calls) \
|
||||
MI_STAT_COUNTER(purge_calls) \
|
||||
MI_STAT_COUNTER(arena_count) /* number of memory arena's */ \
|
||||
MI_STAT_COUNTER(malloc_normal_count) /* number of blocks <= MI_LARGE_OBJ_SIZE_MAX */ \
|
||||
MI_STAT_COUNTER(malloc_huge_count) /* number of huge bloks */ \
|
||||
MI_STAT_COUNTER(malloc_guarded_count) /* number of allocations with guard pages */ \
|
||||
\
|
||||
/* internal statistics */ \
|
||||
MI_STAT_COUNTER(arena_rollback_count) \
|
||||
MI_STAT_COUNTER(arena_purges) \
|
||||
MI_STAT_COUNTER(pages_extended) /* number of page extensions */ \
|
||||
MI_STAT_COUNTER(pages_retire) /* number of pages that are retired */ \
|
||||
MI_STAT_COUNTER(page_searches) /* searches for a fresh page */ \
|
||||
/* only on v1 and v2 */ \
|
||||
MI_STAT_COUNT(segments) \
|
||||
MI_STAT_COUNT(segments_abandoned) \
|
||||
MI_STAT_COUNT(segments_cache) \
|
||||
MI_STAT_COUNT(_segments_reserved) \
|
||||
/* only on v3 */ \
|
||||
MI_STAT_COUNTER(pages_reclaim_on_alloc) \
|
||||
MI_STAT_COUNTER(pages_reclaim_on_free) \
|
||||
MI_STAT_COUNTER(pages_reabandon_full) \
|
||||
MI_STAT_COUNTER(pages_unabandon_busy_wait) \
|
||||
|
||||
|
||||
// Size bins for chunks
|
||||
typedef enum mi_chunkbin_e {
|
||||
MI_CBIN_NONE, // no bin assigned yet (the chunk is completely free)
|
||||
MI_CBIN_SMALL, // slice_count == 1
|
||||
MI_CBIN_OTHER, // slice_count: any other from the other bins, and 1 <= slice_count <= MI_BCHUNK_BITS
|
||||
MI_CBIN_MEDIUM, // slice_count == 8
|
||||
MI_CBIN_LARGE, // slice_count == MI_SIZE_BITS (only used if MI_ENABLE_LARGE_PAGES is 1)
|
||||
MI_CBIN_COUNT
|
||||
} mi_chunkbin_t;
|
||||
|
||||
|
||||
// Define the statistics structure
|
||||
#define MI_BIN_HUGE (73U) // see types.h
|
||||
#define MI_STAT_COUNT(stat) mi_stat_count_t stat;
|
||||
#define MI_STAT_COUNTER(stat) mi_stat_counter_t stat;
|
||||
|
||||
typedef struct mi_stats_s
|
||||
{
|
||||
int version;
|
||||
|
||||
MI_STAT_FIELDS()
|
||||
|
||||
// future extension
|
||||
mi_stat_count_t _stat_reserved[4];
|
||||
mi_stat_counter_t _stat_counter_reserved[4];
|
||||
|
||||
// size segregated statistics
|
||||
mi_stat_count_t malloc_bins[MI_BIN_HUGE+1]; // allocation per size bin
|
||||
mi_stat_count_t page_bins[MI_BIN_HUGE+1]; // pages allocated per size bin
|
||||
mi_stat_count_t chunk_bins[MI_CBIN_COUNT]; // chunks per page sizes
|
||||
} mi_stats_t;
|
||||
|
||||
#undef MI_STAT_COUNT
|
||||
#undef MI_STAT_COUNTER
|
||||
|
||||
|
||||
// Exported definitions
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
mi_decl_export void mi_stats_get( size_t stats_size, mi_stats_t* stats ) mi_attr_noexcept;
|
||||
mi_decl_export char* mi_stats_get_json( size_t buf_size, char* buf ) mi_attr_noexcept; // use mi_free to free the result if the input buf == NULL
|
||||
mi_decl_export size_t mi_stats_get_bin_size(size_t bin) mi_attr_noexcept;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // MIMALLOC_STATS_H
|
|
@ -134,6 +134,12 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
|
|||
static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
|
||||
return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
|
||||
}
|
||||
static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) {
|
||||
const int64_t add = mi_atomic_load_relaxed((_Atomic(int64_t)*)padd);
|
||||
if (add != 0) {
|
||||
mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
|
||||
}
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
|
||||
int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
|
||||
while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ };
|
||||
|
@ -265,6 +271,14 @@ static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int6
|
|||
return current;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) {
|
||||
const int64_t add = *padd;
|
||||
if (add != 0) {
|
||||
mi_atomic_addi64_relaxed((volatile _Atomic(int64_t)*)p, add);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
|
||||
int64_t current;
|
||||
do {
|
||||
|
|
|
@ -298,6 +298,47 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line
|
|||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Statistics (in `stats.c`)
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// add to stat keeping track of the peak
|
||||
void __mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_increase_mt(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_decrease_mt(mi_stat_count_t* stat, size_t amount);
|
||||
|
||||
// adjust stat in special cases to compensate for double counting (and does not adjust peak values and can decrease the total)
|
||||
void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount);
|
||||
|
||||
// counters can just be increased
|
||||
void __mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||
void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount);
|
||||
|
||||
#define mi_subproc_stat_counter_increase(subproc,stat,amount) __mi_stat_counter_increase_mt( &(subproc)->stats.stat, amount)
|
||||
#define mi_subproc_stat_increase(subproc,stat,amount) __mi_stat_increase_mt( &(subproc)->stats.stat, amount)
|
||||
#define mi_subproc_stat_decrease(subproc,stat,amount) __mi_stat_decrease_mt( &(subproc)->stats.stat, amount)
|
||||
#define mi_subproc_stat_adjust_increase(subproc,stat,amnt) __mi_stat_adjust_increase_mt( &(subproc)->stats.stat, amnt)
|
||||
#define mi_subproc_stat_adjust_decrease(subproc,stat,amnt) __mi_stat_adjust_decrease_mt( &(subproc)->stats.stat, amnt)
|
||||
|
||||
#define mi_tld_stat_counter_increase(tld,stat,amount) __mi_stat_counter_increase( &(tld)->stats.stat, amount)
|
||||
#define mi_tld_stat_increase(tld,stat,amount) __mi_stat_increase( &(tld)->stats.stat, amount)
|
||||
#define mi_tld_stat_decrease(tld,stat,amount) __mi_stat_decrease( &(tld)->stats.stat, amount)
|
||||
#define mi_tld_stat_adjust_increase(tld,stat,amnt) __mi_stat_adjust_increase( &(tld)->stats.stat, amnt)
|
||||
#define mi_tld_stat_adjust_decrease(tld,stat,amnt) __mi_stat_adjust_decrease( &(tld)->stats.stat, amnt)
|
||||
|
||||
#define mi_os_stat_counter_increase(stat,amount) mi_subproc_stat_counter_increase(_mi_subproc(),stat,amount)
|
||||
#define mi_os_stat_increase(stat,amount) mi_subproc_stat_increase(_mi_subproc(),stat,amount)
|
||||
#define mi_os_stat_decrease(stat,amount) mi_subproc_stat_decrease(_mi_subproc(),stat,amount)
|
||||
|
||||
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_tld_stat_counter_increase(heap->tld, stat, amount)
|
||||
#define mi_heap_stat_increase(heap,stat,amount) mi_tld_stat_increase( heap->tld, stat, amount)
|
||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_tld_stat_decrease( heap->tld, stat, amount)
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Inlined definitions
|
||||
----------------------------------------------------------- */
|
||||
|
@ -316,6 +357,8 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line
|
|||
#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
|
||||
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
|
||||
|
||||
#define MI_INIT74(x) MI_INIT64(x),MI_INIT8(x),x(),x()
|
||||
#define MI_INIT5(x) MI_INIT4(x),x()
|
||||
|
||||
#include <string.h>
|
||||
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
|
||||
|
|
|
@ -18,7 +18,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// using plain "page" for mimalloc pages (`mi_page_t`).
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
|
||||
#include <mimalloc-stats.h>
|
||||
#include <stddef.h> // ptrdiff_t
|
||||
#include <stdint.h> // uintptr_t, uint16_t, etc
|
||||
#include <errno.h> // error codes
|
||||
|
@ -74,6 +74,15 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#endif
|
||||
#endif
|
||||
|
||||
// Statistics (0=only essential, 1=normal, 2=more fine-grained (expensive) tracking)
|
||||
#ifndef MI_STAT
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_STAT 2
|
||||
#else
|
||||
#define MI_STAT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
|
||||
// Padding should be disabled when using guard pages
|
||||
// #define MI_GUARDED 1
|
||||
|
@ -446,125 +455,6 @@ struct mi_heap_s {
|
|||
};
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Statistics
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifndef MI_STAT
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_STAT 2
|
||||
#else
|
||||
#define MI_STAT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef struct mi_stat_count_s {
|
||||
int64_t allocated;
|
||||
int64_t freed;
|
||||
int64_t peak;
|
||||
int64_t current;
|
||||
} mi_stat_count_t;
|
||||
|
||||
typedef struct mi_stat_counter_s {
|
||||
int64_t total;
|
||||
int64_t count;
|
||||
} mi_stat_counter_t;
|
||||
|
||||
typedef struct mi_stats_s {
|
||||
mi_stat_count_t pages;
|
||||
mi_stat_count_t reserved;
|
||||
mi_stat_count_t committed;
|
||||
mi_stat_count_t reset;
|
||||
mi_stat_count_t purged;
|
||||
mi_stat_count_t page_committed;
|
||||
mi_stat_count_t pages_abandoned;
|
||||
mi_stat_count_t threads;
|
||||
mi_stat_count_t normal;
|
||||
mi_stat_count_t huge;
|
||||
mi_stat_count_t giant;
|
||||
mi_stat_count_t malloc;
|
||||
mi_stat_counter_t pages_extended;
|
||||
mi_stat_counter_t pages_reclaim_on_alloc;
|
||||
mi_stat_counter_t pages_reclaim_on_free;
|
||||
mi_stat_counter_t pages_reabandon_full;
|
||||
mi_stat_counter_t pages_unabandon_busy_wait;
|
||||
mi_stat_counter_t mmap_calls;
|
||||
mi_stat_counter_t commit_calls;
|
||||
mi_stat_counter_t reset_calls;
|
||||
mi_stat_counter_t purge_calls;
|
||||
mi_stat_counter_t arena_purges;
|
||||
mi_stat_counter_t page_no_retire;
|
||||
mi_stat_counter_t searches;
|
||||
mi_stat_counter_t normal_count;
|
||||
mi_stat_counter_t huge_count;
|
||||
mi_stat_counter_t arena_count;
|
||||
mi_stat_counter_t guarded_alloc_count;
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal_bins[MI_BIN_COUNT];
|
||||
#endif
|
||||
} mi_stats_t;
|
||||
|
||||
|
||||
// add to stat keeping track of the peak
|
||||
void __mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_increase_mt(mi_stat_count_t* stat, size_t amount);
|
||||
void __mi_stat_decrease_mt(mi_stat_count_t* stat, size_t amount);
|
||||
// adjust stat in special cases to compensate for double counting
|
||||
void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount, bool on_alloc);
|
||||
void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount, bool on_free);
|
||||
void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount, bool on_alloc);
|
||||
void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount, bool on_free);
|
||||
// counters can just be increased
|
||||
void __mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||
void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount);
|
||||
|
||||
#if (MI_STAT)
|
||||
#define mi_debug_stat_increase(stat,amount) __mi_stat_increase( &(stat), amount)
|
||||
#define mi_debug_stat_decrease(stat,amount) __mi_stat_decrease( &(stat), amount)
|
||||
#define mi_debug_stat_counter_increase(stat,amount) __mi_stat_counter_increase( &(stat), amount)
|
||||
#define mi_debug_stat_increase_mt(stat,amount) __mi_stat_increase_mt( &(stat), amount)
|
||||
#define mi_debug_stat_decrease_mt(stat,amount) __mi_stat_decrease_mt( &(stat), amount)
|
||||
#define mi_debug_stat_counter_increase_mt(stat,amount) __mi_stat_counter_increase_mt( &(stat), amount)
|
||||
#define mi_debug_stat_adjust_increase_mt(stat,amnt,b) __mi_stat_adjust_increase_mt( &(stat), amnt, b)
|
||||
#define mi_debug_stat_adjust_decrease_mt(stat,amnt,b) __mi_stat_adjust_decrease_mt( &(stat), amnt, b)
|
||||
#else
|
||||
#define mi_debug_stat_increase(stat,amount) ((void)0)
|
||||
#define mi_debug_stat_decrease(stat,amount) ((void)0)
|
||||
#define mi_debug_stat_counter_increase(stat,amount) ((void)0)
|
||||
#define mi_debug_stat_increase_mt(stat,amount) ((void)0)
|
||||
#define mi_debug_stat_decrease_mt(stat,amount) ((void)0)
|
||||
#define mi_debug_stat_counter_increase_mt(stat,amount) ((void)0)
|
||||
#define mi_debug_stat_adjust_increase(stat,amnt,b) ((void)0)
|
||||
#define mi_debug_stat_adjust_decrease(stat,amnt,b) ((void)0)
|
||||
#endif
|
||||
|
||||
#define mi_subproc_stat_counter_increase(subproc,stat,amount) __mi_stat_counter_increase_mt( &(subproc)->stats.stat, amount)
|
||||
#define mi_subproc_stat_increase(subproc,stat,amount) __mi_stat_increase_mt( &(subproc)->stats.stat, amount)
|
||||
#define mi_subproc_stat_decrease(subproc,stat,amount) __mi_stat_decrease_mt( &(subproc)->stats.stat, amount)
|
||||
#define mi_subproc_stat_adjust_increase(subproc,stat,amnt,b) __mi_stat_adjust_increase_mt( &(subproc)->stats.stat, amnt, b)
|
||||
#define mi_subproc_stat_adjust_decrease(subproc,stat,amnt,b) __mi_stat_adjust_decrease_mt( &(subproc)->stats.stat, amnt, b)
|
||||
|
||||
#define mi_tld_stat_counter_increase(tld,stat,amount) __mi_stat_counter_increase( &(tld)->stats.stat, amount)
|
||||
#define mi_tld_stat_increase(tld,stat,amount) __mi_stat_increase( &(tld)->stats.stat, amount)
|
||||
#define mi_tld_stat_decrease(tld,stat,amount) __mi_stat_decrease( &(tld)->stats.stat, amount)
|
||||
#define mi_tld_stat_adjust_increase(tld,stat,amnt,b) __mi_stat_adjust_increase( &(tld)->stats.stat, amnt, b)
|
||||
#define mi_tld_stat_adjust_decrease(tld,stat,amnt,b) __mi_stat_adjust_decrease( &(tld)->stats.stat, amnt, b)
|
||||
|
||||
|
||||
#define mi_os_stat_counter_increase(stat,amount) mi_subproc_stat_counter_increase(_mi_subproc(),stat,amount)
|
||||
#define mi_os_stat_increase(stat,amount) mi_subproc_stat_increase(_mi_subproc(),stat,amount)
|
||||
#define mi_os_stat_decrease(stat,amount) mi_subproc_stat_decrease(_mi_subproc(),stat,amount)
|
||||
|
||||
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_tld_stat_counter_increase(heap->tld, stat, amount)
|
||||
#define mi_heap_stat_increase(heap,stat,amount) mi_tld_stat_increase( heap->tld, stat, amount)
|
||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_tld_stat_decrease( heap->tld, stat, amount)
|
||||
|
||||
#define mi_debug_heap_stat_counter_increase(heap,stat,amount) mi_debug_stat_counter_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_debug_heap_stat_increase(heap,stat,amount) mi_debug_stat_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_debug_heap_stat_decrease(heap,stat,amount) mi_debug_stat_decrease( (heap)->tld->stats.stat, amount)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Sub processes use separate arena's and no heaps/pages/blocks
|
||||
// are shared between sub processes.
|
||||
|
|
|
@ -193,7 +193,9 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
|
||||
if mi_likely(is_aligned)
|
||||
{
|
||||
mi_debug_heap_stat_increase(heap, malloc, size);
|
||||
#if MI_STAT>1
|
||||
mi_heap_stat_increase(heap, malloc_requested, size);
|
||||
#endif
|
||||
void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
|
||||
mi_assert_internal(p != NULL);
|
||||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
||||
|
|
21
src/alloc.c
21
src/alloc.c
|
@ -87,11 +87,12 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
|
|||
#if (MI_STAT>0)
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
if (bsize <= MI_LARGE_MAX_OBJ_SIZE) {
|
||||
mi_heap_stat_increase(heap, normal, bsize);
|
||||
mi_heap_stat_counter_increase(heap, normal_count, 1);
|
||||
mi_heap_stat_increase(heap, malloc_normal, bsize);
|
||||
mi_heap_stat_counter_increase(heap, malloc_normal_count, 1);
|
||||
#if (MI_STAT>1)
|
||||
const size_t bin = _mi_bin(bsize);
|
||||
mi_heap_stat_increase(heap, normal_bins[bin], 1);
|
||||
mi_heap_stat_increase(heap, malloc_bins[bin], 1);
|
||||
mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -150,12 +151,6 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
|
@ -191,13 +186,7 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
|
|||
mi_assert(heap->tld->thread_id == 0 || heap->tld->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
|
|
18
src/arena.c
18
src/arena.c
|
@ -316,19 +316,19 @@ static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_
|
|||
// is actually allocated for the first time it will be counted.
|
||||
const bool adjust = (overcommit && arena_commit);
|
||||
if (adjust) {
|
||||
mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve, true /* on alloc */);
|
||||
mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve);
|
||||
}
|
||||
// and try to reserve the arena
|
||||
int err = mi_reserve_os_memory_ex2(subproc, arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
|
||||
if (err != 0) {
|
||||
if (adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve, true); } // roll back
|
||||
if (adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve); } // roll back
|
||||
// failed, try a smaller size?
|
||||
const size_t small_arena_reserve = (MI_SIZE_BITS == 32 ? 128*MI_MiB : 1*MI_GiB);
|
||||
if (adjust) { mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve, true); }
|
||||
if (adjust) { mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve); }
|
||||
if (arena_reserve > small_arena_reserve) {
|
||||
// try again
|
||||
err = mi_reserve_os_memory_ex(small_arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
|
||||
if (err != 0 && adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve, true); } // roll back
|
||||
if (err != 0 && adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve); } // roll back
|
||||
}
|
||||
}
|
||||
return (err==0);
|
||||
|
@ -918,7 +918,7 @@ bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page) {
|
|||
else {
|
||||
mi_tld_t* tld = _mi_thread_tld();
|
||||
mi_tld_stat_counter_increase( tld, pages_reabandon_full, 1);
|
||||
mi_tld_stat_adjust_decrease( tld, pages_abandoned, 1, true /* on alloc */); // adjust as we are not abandoning fresh
|
||||
mi_tld_stat_adjust_decrease( tld, pages_abandoned, 1 ); // adjust as we are not abandoning fresh
|
||||
_mi_arenas_page_abandon(page,tld);
|
||||
return true;
|
||||
}
|
||||
|
@ -1398,10 +1398,10 @@ static size_t mi_debug_show_chunks(const char* header1, const char* header2, con
|
|||
char chunk_kind = ' ';
|
||||
if (chunk_bins != NULL) {
|
||||
switch (mi_atomic_load_relaxed(&chunk_bins[i])) {
|
||||
case MI_BBIN_SMALL: chunk_kind = 'S'; break;
|
||||
case MI_BBIN_MEDIUM: chunk_kind = 'M'; break;
|
||||
case MI_BBIN_LARGE: chunk_kind = 'L'; break;
|
||||
case MI_BBIN_OTHER: chunk_kind = 'X'; break;
|
||||
case MI_CBIN_SMALL: chunk_kind = 'S'; break;
|
||||
case MI_CBIN_MEDIUM: chunk_kind = 'M'; break;
|
||||
case MI_CBIN_LARGE: chunk_kind = 'L'; break;
|
||||
case MI_CBIN_OTHER: chunk_kind = 'X'; break;
|
||||
// case MI_BBIN_NONE: chunk_kind = 'N'; break;
|
||||
}
|
||||
}
|
||||
|
|
21
src/bitmap.c
21
src/bitmap.c
|
@ -1411,8 +1411,15 @@ void mi_bbitmap_unsafe_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) {
|
|||
-------------------------------------------------------------------------------- */
|
||||
|
||||
// Assign a specific size bin to a chunk
|
||||
static void mi_bbitmap_set_chunk_bin(mi_bbitmap_t* bbitmap, size_t chunk_idx, mi_bbin_t bin) {
|
||||
static void mi_bbitmap_set_chunk_bin(mi_bbitmap_t* bbitmap, size_t chunk_idx, mi_chunkbin_t bin) {
|
||||
mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap));
|
||||
if (bin!=MI_CBIN_NONE) {
|
||||
mi_os_stat_increase(chunk_bins[bin],1);
|
||||
}
|
||||
else {
|
||||
const mi_chunkbin_t oldbin = (mi_chunkbin_t)mi_atomic_load_relaxed(&bbitmap->chunk_bins[chunk_idx]);
|
||||
if (oldbin!=MI_CBIN_NONE) { mi_os_stat_decrease(chunk_bins[oldbin],1); }
|
||||
}
|
||||
mi_atomic_store_release(&bbitmap->chunk_bins[chunk_idx], (uint8_t)bin);
|
||||
}
|
||||
|
||||
|
@ -1430,7 +1437,7 @@ static void mi_bbitmap_chunkmap_set(mi_bbitmap_t* bbitmap, size_t chunk_idx, boo
|
|||
if (check_all_set) {
|
||||
if (mi_bchunk_all_are_set_relaxed(&bbitmap->chunks[chunk_idx])) {
|
||||
// all slices are free in this chunk: return back to the NONE bin
|
||||
mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, MI_BBIN_NONE);
|
||||
mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, MI_CBIN_NONE);
|
||||
}
|
||||
}
|
||||
mi_bchunk_set(&bbitmap->chunkmap, chunk_idx, NULL);
|
||||
|
@ -1541,9 +1548,9 @@ static inline bool mi_bbitmap_try_find_and_clear_generic(mi_bbitmap_t* bbitmap,
|
|||
mi_assert_internal(MI_BFIELD_BITS >= MI_BCHUNK_FIELDS);
|
||||
const mi_bfield_t cmap_mask = mi_bfield_mask(cmap_max_count,0);
|
||||
const size_t cmap_cycle = cmap_acc+1;
|
||||
const mi_bbin_t bbin = mi_bbin_of(n);
|
||||
const mi_chunkbin_t bbin = mi_chunkbin_of(n);
|
||||
// visit bins from smallest to largest (to reduce fragmentation on the larger blocks)
|
||||
for(mi_bbin_t bin = MI_BBIN_SMALL; bin <= bbin; bin = mi_bbin_inc(bin)) // no need to traverse for MI_BBIN_NONE as anyone can allocate in MI_BBIN_SMALL
|
||||
for(mi_chunkbin_t bin = MI_CBIN_SMALL; bin <= bbin; bin = mi_chunkbin_inc(bin)) // no need to traverse for MI_BBIN_NONE as anyone can allocate in MI_BBIN_SMALL
|
||||
// (int bin = bbin; bin >= MI_BBIN_SMALL; bin--) // visit bins from largest size bin up to the NONE bin
|
||||
{
|
||||
size_t cmap_idx = 0;
|
||||
|
@ -1566,14 +1573,14 @@ static inline bool mi_bbitmap_try_find_and_clear_generic(mi_bbitmap_t* bbitmap,
|
|||
const size_t chunk_idx = cmap_idx*MI_BFIELD_BITS + eidx;
|
||||
mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap));
|
||||
// only in the current size class!
|
||||
const mi_bbin_t chunk_bin = (mi_bbin_t)mi_atomic_load_relaxed(&bbitmap->chunk_bins[chunk_idx]);
|
||||
if ((mi_bbin_t)bin == chunk_bin || (bin == bbin && chunk_bin == MI_BBIN_NONE)) // only allow NONE at the final run
|
||||
const mi_chunkbin_t chunk_bin = (mi_chunkbin_t)mi_atomic_load_relaxed(&bbitmap->chunk_bins[chunk_idx]);
|
||||
if ((mi_chunkbin_t)bin == chunk_bin || (bin == bbin && chunk_bin == MI_CBIN_NONE)) // only allow NONE at the final run
|
||||
// ((mi_bbin_t)bin == chunk_bin || (bin <= MI_BBIN_SMALL && chunk_bin <= MI_BBIN_SMALL)) { largest to smallest
|
||||
{
|
||||
mi_bchunk_t* chunk = &bbitmap->chunks[chunk_idx];
|
||||
size_t cidx;
|
||||
if ((*on_find)(chunk, n, &cidx)) {
|
||||
if (cidx==0 && chunk_bin == MI_BBIN_NONE) { // only the first determines the size bin
|
||||
if (cidx==0 && chunk_bin == MI_CBIN_NONE) { // only the first determines the size bin
|
||||
// this chunk is now reserved for the `bbin` size class
|
||||
mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, bbin);
|
||||
}
|
||||
|
|
32
src/bitmap.h
32
src/bitmap.h
|
@ -212,28 +212,26 @@ bool _mi_bitmap_forall_setc_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* vis
|
|||
much fragmentation since we keep chunks for larger blocks separate.
|
||||
---------------------------------------------------------------------------- */
|
||||
|
||||
// Size bins; larger bins are allowed to go into smaller bins.
|
||||
// SMALL can only be in small (and NONE), so they cannot fragment the larger bins.
|
||||
typedef enum mi_bbin_e {
|
||||
MI_BBIN_NONE, // no bin assigned yet (the chunk is completely free)
|
||||
MI_BBIN_SMALL, // slice_count == 1
|
||||
MI_BBIN_OTHER, // slice_count: any other from the other bins, and 1 <= slice_count <= MI_BCHUNK_BITS
|
||||
MI_BBIN_MEDIUM, // slice_count == 8
|
||||
MI_BBIN_LARGE, // slice_count == MI_BFIELD_BITS -- only used if MI_ENABLE_LARGE_PAGES is 1
|
||||
MI_BBIN_COUNT
|
||||
} mi_bbin_t;
|
||||
|
||||
static inline mi_bbin_t mi_bbin_inc(mi_bbin_t bbin) {
|
||||
return (mi_bbin_t)((int)bbin + 1);
|
||||
// mi_chunkbin_t is defined in mimalloc-stats.h
|
||||
|
||||
static inline mi_chunkbin_t mi_chunkbin_inc(mi_chunkbin_t bbin) {
|
||||
mi_assert_internal(bbin < MI_CBIN_COUNT);
|
||||
return (mi_chunkbin_t)((int)bbin + 1);
|
||||
}
|
||||
|
||||
static inline mi_bbin_t mi_bbin_of(size_t slice_count) {
|
||||
if (slice_count==1) return MI_BBIN_SMALL;
|
||||
if (slice_count==8) return MI_BBIN_MEDIUM;
|
||||
static inline mi_chunkbin_t mi_chunkbin_dec(mi_chunkbin_t bbin) {
|
||||
mi_assert_internal(bbin > MI_CBIN_NONE);
|
||||
return (mi_chunkbin_t)((int)bbin - 1);
|
||||
}
|
||||
|
||||
static inline mi_chunkbin_t mi_chunkbin_of(size_t slice_count) {
|
||||
if (slice_count==1) return MI_CBIN_SMALL;
|
||||
if (slice_count==8) return MI_CBIN_MEDIUM;
|
||||
#if MI_ENABLE_LARGE_PAGES
|
||||
if (slice_count==MI_BFIELD_BITS) return MI_BBIN_LARGE;
|
||||
if (slice_count==MI_BFIELD_BITS) return MI_CBIN_LARGE;
|
||||
#endif
|
||||
return MI_BBIN_OTHER;
|
||||
return MI_CBIN_OTHER;
|
||||
}
|
||||
|
||||
// An atomic "binned" bitmap for the free slices where we keep chunks reserved for particalar size classes
|
||||
|
|
16
src/free.c
16
src/free.c
|
@ -530,24 +530,22 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
|||
// only maintain stats for smaller objects if requested
|
||||
#if (MI_STAT>0)
|
||||
void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
#if (MI_STAT < 2)
|
||||
MI_UNUSED(block);
|
||||
#endif
|
||||
mi_heap_t* const heap = mi_heap_get_default();
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
#if (MI_STAT>1)
|
||||
const size_t usize = mi_page_usable_size_of(page, block);
|
||||
mi_heap_stat_decrease(heap, malloc, usize);
|
||||
#endif
|
||||
// #if (MI_STAT>1)
|
||||
// const size_t usize = mi_page_usable_size_of(page, block);
|
||||
// mi_heap_stat_decrease(heap, malloc_requested, usize);
|
||||
// #endif
|
||||
if (bsize <= MI_LARGE_MAX_OBJ_SIZE) {
|
||||
mi_heap_stat_decrease(heap, normal, bsize);
|
||||
mi_heap_stat_decrease(heap, malloc_normal, bsize);
|
||||
#if (MI_STAT > 1)
|
||||
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
|
||||
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
const size_t bpsize = mi_page_block_size(page); // match stat in page.c:mi_huge_page_alloc
|
||||
mi_heap_stat_decrease(heap, huge, bpsize);
|
||||
mi_heap_stat_decrease(heap, malloc_huge, bpsize);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -329,18 +329,18 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
|
|||
// stats
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if (bsize > MI_LARGE_MAX_OBJ_SIZE) {
|
||||
mi_heap_stat_decrease(heap, huge, bsize);
|
||||
mi_heap_stat_decrease(heap, malloc_huge, bsize);
|
||||
}
|
||||
#if (MI_STAT)
|
||||
_mi_page_free_collect(page, false); // update used count
|
||||
const size_t inuse = page->used;
|
||||
if (bsize <= MI_LARGE_MAX_OBJ_SIZE) {
|
||||
mi_heap_stat_decrease(heap, normal, bsize * inuse);
|
||||
mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse);
|
||||
#if (MI_STAT>1)
|
||||
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
|
||||
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse);
|
||||
#endif
|
||||
}
|
||||
mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
|
||||
// mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
|
||||
#endif
|
||||
|
||||
/// pretend it is all free now
|
||||
|
|
41
src/init.c
41
src/init.c
|
@ -65,27 +65,26 @@ const mi_page_t _mi_page_empty = {
|
|||
QNULL(MI_LARGE_MAX_OBJ_WSIZE + 1 /* 655360, Huge queue */), \
|
||||
QNULL(MI_LARGE_MAX_OBJ_WSIZE + 2) /* Full queue */ }
|
||||
|
||||
#define MI_STAT_COUNT_NULL() {0,0,0,0}
|
||||
#define MI_STAT_COUNT_NULL() {0,0,0}
|
||||
|
||||
// Empty statistics
|
||||
#if MI_STAT>1
|
||||
#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) }
|
||||
#else
|
||||
#define MI_STAT_COUNT_END_NULL()
|
||||
#endif
|
||||
|
||||
#define MI_STATS_NULL \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
MI_STAT_COUNT_END_NULL()
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
{ 0 }, { 0 }, { 0 }, { 0 }, \
|
||||
{ 0 }, { 0 }, { 0 }, { 0 }, \
|
||||
\
|
||||
{ 0 }, { 0 }, { 0 }, { 0 }, { 0 }, \
|
||||
MI_INIT4(MI_STAT_COUNT_NULL), \
|
||||
{ 0 }, { 0 }, { 0 }, { 0 }, \
|
||||
\
|
||||
{ MI_INIT4(MI_STAT_COUNT_NULL) }, \
|
||||
{ { 0 }, { 0 }, { 0 }, { 0 } }, \
|
||||
\
|
||||
{ MI_INIT74(MI_STAT_COUNT_NULL) }, \
|
||||
{ MI_INIT74(MI_STAT_COUNT_NULL) }, \
|
||||
{ MI_INIT5(MI_STAT_COUNT_NULL) }
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Statically allocate an empty heap as the initial
|
||||
|
@ -112,7 +111,7 @@ static mi_decl_cache_align mi_tld_t tld_empty = {
|
|||
0, // heartbeat
|
||||
false, // recurse
|
||||
false, // is_in_threadpool
|
||||
{ MI_STATS_NULL }, // stats
|
||||
{ MI_STAT_VERSION, MI_STATS_NULL }, // stats
|
||||
MI_MEMID_STATIC // memid
|
||||
};
|
||||
|
||||
|
@ -149,7 +148,7 @@ static mi_decl_cache_align mi_tld_t tld_main = {
|
|||
0, // heartbeat
|
||||
false, // recurse
|
||||
false, // is_in_threadpool
|
||||
{ MI_STATS_NULL }, // stats
|
||||
{ MI_STAT_VERSION, MI_STATS_NULL }, // stats
|
||||
MI_MEMID_STATIC // memid
|
||||
};
|
||||
|
||||
|
@ -186,7 +185,7 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
|
|||
|
||||
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
|
||||
|
||||
mi_stats_t _mi_stats_main = { MI_STATS_NULL };
|
||||
mi_stats_t _mi_stats_main = { MI_STAT_VERSION, MI_STATS_NULL };
|
||||
|
||||
#if MI_GUARDED
|
||||
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
|
||||
|
|
22
src/page.c
22
src/page.c
|
@ -427,7 +427,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|||
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
|
||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_debug_heap_stat_counter_increase(heap, page_no_retire, 1);
|
||||
#if MI_STAT>0
|
||||
mi_heap_stat_counter_increase(heap, pages_retire, 1);
|
||||
#endif
|
||||
page->retire_expire = (bsize <= MI_SMALL_MAX_OBJ_SIZE ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
mi_assert_internal(pq >= heap->pages);
|
||||
const size_t index = pq - heap->pages;
|
||||
|
@ -616,7 +618,9 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
|
|||
size_t page_size;
|
||||
//uint8_t* page_start =
|
||||
mi_page_area(page, &page_size);
|
||||
mi_debug_heap_stat_counter_increase(heap, pages_extended, 1);
|
||||
#if MI_STAT>0
|
||||
mi_heap_stat_counter_increase(heap, pages_extended, 1);
|
||||
#endif
|
||||
|
||||
// calculate the extend count
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
|
@ -656,7 +660,9 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
|
|||
}
|
||||
// enable the new free list
|
||||
page->capacity += (uint16_t)extend;
|
||||
mi_debug_heap_stat_increase(heap, page_committed, extend * bsize);
|
||||
#if MI_STAT>0
|
||||
mi_heap_stat_increase(heap, page_committed, extend * bsize);
|
||||
#endif
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
}
|
||||
|
||||
|
@ -711,9 +717,7 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
|
|||
static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
|
||||
{
|
||||
// search through the pages in "next fit" order
|
||||
#if MI_STAT
|
||||
size_t count = 0;
|
||||
#endif
|
||||
long candidate_limit = 0; // we reset this on the first candidate to limit the search
|
||||
long page_full_retain = (pq->block_size > MI_SMALL_MAX_OBJ_SIZE ? 0 : heap->page_full_retain); // only retain small pages
|
||||
mi_page_t* page_candidate = NULL; // a page with free space
|
||||
|
@ -722,9 +726,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
while (page != NULL)
|
||||
{
|
||||
mi_page_t* next = page->next; // remember next (as this page can move to another queue)
|
||||
#if MI_STAT
|
||||
count++;
|
||||
#endif
|
||||
candidate_limit--;
|
||||
|
||||
// search up to N pages for a best candidate
|
||||
|
@ -784,7 +786,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
page = next;
|
||||
} // for each page
|
||||
|
||||
mi_debug_heap_stat_counter_increase(heap, searches, count);
|
||||
mi_heap_stat_counter_increase(heap, page_searches, count);
|
||||
|
||||
// set the page to the best candidate
|
||||
if (page_candidate != NULL) {
|
||||
|
@ -897,8 +899,8 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
|
|||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_page_set_heap(page, NULL);
|
||||
#endif
|
||||
mi_heap_stat_increase(heap, huge, mi_page_block_size(page));
|
||||
mi_heap_stat_counter_increase(heap, huge_count, 1);
|
||||
mi_heap_stat_increase(heap, malloc_huge, mi_page_block_size(page));
|
||||
mi_heap_stat_counter_increase(heap, malloc_huge_count, 1);
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
|
384
src/stats.c
384
src/stats.c
|
@ -25,10 +25,7 @@ static void mi_stat_update_mt(mi_stat_count_t* stat, int64_t amount) {
|
|||
int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount);
|
||||
mi_atomic_maxi64_relaxed(&stat->peak, current + amount);
|
||||
if (amount > 0) {
|
||||
mi_atomic_addi64_relaxed(&stat->allocated, amount);
|
||||
}
|
||||
else {
|
||||
mi_atomic_addi64_relaxed(&stat->freed, -amount);
|
||||
mi_atomic_addi64_relaxed(&stat->total, amount);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,44 +33,16 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
|
|||
if (amount == 0) return;
|
||||
// add thread local
|
||||
stat->current += amount;
|
||||
if (stat->current > stat->peak) stat->peak = stat->current;
|
||||
if (amount > 0) {
|
||||
stat->allocated += amount;
|
||||
}
|
||||
else {
|
||||
stat->freed += -amount;
|
||||
}
|
||||
if (stat->current > stat->peak) { stat->peak = stat->current; }
|
||||
if (amount > 0) { stat->total += amount; }
|
||||
}
|
||||
|
||||
|
||||
// Adjust stats to compensate; for example before committing a range,
|
||||
// first adjust downwards with parts that were already committed so
|
||||
// we avoid double counting.
|
||||
static void mi_stat_adjust_mt(mi_stat_count_t* stat, int64_t amount, bool on_alloc) {
|
||||
if (amount == 0) return;
|
||||
// adjust atomically
|
||||
mi_atomic_addi64_relaxed(&stat->current, amount);
|
||||
mi_atomic_addi64_relaxed((on_alloc ? &stat->allocated : &stat->freed), amount);
|
||||
}
|
||||
|
||||
static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount, bool on_alloc) {
|
||||
if (amount == 0) return;
|
||||
stat->current += amount;
|
||||
if (on_alloc) {
|
||||
stat->allocated += amount;
|
||||
}
|
||||
else {
|
||||
stat->freed += amount;
|
||||
}
|
||||
}
|
||||
|
||||
void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount) {
|
||||
mi_atomic_addi64_relaxed(&stat->count, 1);
|
||||
mi_atomic_addi64_relaxed(&stat->total, (int64_t)amount);
|
||||
}
|
||||
|
||||
void __mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
|
||||
stat->count++;
|
||||
stat->total += amount;
|
||||
}
|
||||
|
||||
|
@ -91,82 +60,76 @@ void __mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
|
|||
mi_stat_update(stat, -((int64_t)amount));
|
||||
}
|
||||
|
||||
void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount, bool on_alloc) {
|
||||
mi_stat_adjust_mt(stat, (int64_t)amount, on_alloc);
|
||||
}
|
||||
void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount, bool on_alloc) {
|
||||
mi_stat_adjust(stat, (int64_t)amount, on_alloc);
|
||||
|
||||
// Adjust stats to compensate; for example before committing a range,
|
||||
// first adjust downwards with parts that were already committed so
|
||||
// we avoid double counting.
|
||||
static void mi_stat_adjust_mt(mi_stat_count_t* stat, int64_t amount) {
|
||||
if (amount == 0) return;
|
||||
// adjust atomically
|
||||
mi_atomic_addi64_relaxed(&stat->current, amount);
|
||||
mi_atomic_addi64_relaxed(&stat->total, amount);
|
||||
}
|
||||
|
||||
void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount, bool on_alloc) {
|
||||
mi_stat_adjust_mt(stat, -((int64_t)amount), on_alloc);
|
||||
static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) {
|
||||
if (amount == 0) return;
|
||||
stat->current += amount;
|
||||
stat->total += amount;
|
||||
}
|
||||
void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount, bool on_alloc) {
|
||||
mi_stat_adjust(stat, -((int64_t)amount), on_alloc);
|
||||
|
||||
void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount) {
|
||||
mi_stat_adjust_mt(stat, (int64_t)amount);
|
||||
}
|
||||
void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount) {
|
||||
mi_stat_adjust(stat, (int64_t)amount);
|
||||
}
|
||||
void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount) {
|
||||
mi_stat_adjust_mt(stat, -((int64_t)amount));
|
||||
}
|
||||
void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) {
|
||||
mi_stat_adjust(stat, -((int64_t)amount));
|
||||
}
|
||||
|
||||
|
||||
// must be thread safe as it is called from stats_merge
|
||||
static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) {
|
||||
static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) {
|
||||
if (stat==src) return;
|
||||
if (src->allocated==0 && src->freed==0) return;
|
||||
mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit);
|
||||
mi_atomic_addi64_relaxed( &stat->current, src->current * unit);
|
||||
mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit);
|
||||
// peak scores do not work across threads..
|
||||
mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit);
|
||||
mi_atomic_void_addi64_relaxed(&stat->total, &src->total);
|
||||
mi_atomic_void_addi64_relaxed(&stat->current, &src->current);
|
||||
// peak scores do really not work across threads .. we just add them
|
||||
mi_atomic_void_addi64_relaxed( &stat->peak, &src->peak);
|
||||
// or, take the max?
|
||||
// mi_atomic_maxi64_relaxed(&stat->peak, src->peak);
|
||||
}
|
||||
|
||||
static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t* src, int64_t unit) {
|
||||
static void mi_stat_counter_add_mt(mi_stat_counter_t* stat, const mi_stat_counter_t* src) {
|
||||
if (stat==src) return;
|
||||
mi_atomic_addi64_relaxed( &stat->total, src->total * unit);
|
||||
mi_atomic_addi64_relaxed( &stat->count, src->count * unit);
|
||||
mi_atomic_void_addi64_relaxed(&stat->total, &src->total);
|
||||
}
|
||||
|
||||
#define MI_STAT_COUNT(stat) mi_stat_count_add_mt(&stats->stat, &src->stat);
|
||||
#define MI_STAT_COUNTER(stat) mi_stat_counter_add_mt(&stats->stat, &src->stat);
|
||||
|
||||
// must be thread safe as it is called from stats_merge
|
||||
static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
|
||||
if (stats==src) return;
|
||||
mi_stat_add(&stats->pages, &src->pages,1);
|
||||
mi_stat_add(&stats->reserved, &src->reserved, 1);
|
||||
mi_stat_add(&stats->committed, &src->committed, 1);
|
||||
mi_stat_add(&stats->reset, &src->reset, 1);
|
||||
mi_stat_add(&stats->purged, &src->purged, 1);
|
||||
mi_stat_add(&stats->page_committed, &src->page_committed, 1);
|
||||
|
||||
mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1);
|
||||
mi_stat_add(&stats->threads, &src->threads, 1);
|
||||
// copy all fields
|
||||
MI_STAT_FIELDS()
|
||||
|
||||
mi_stat_add(&stats->malloc, &src->malloc, 1);
|
||||
mi_stat_add(&stats->normal, &src->normal, 1);
|
||||
mi_stat_add(&stats->huge, &src->huge, 1);
|
||||
mi_stat_add(&stats->giant, &src->giant, 1);
|
||||
|
||||
mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1);
|
||||
mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1);
|
||||
mi_stat_counter_add(&stats->commit_calls, &src->commit_calls, 1);
|
||||
mi_stat_counter_add(&stats->reset_calls, &src->reset_calls, 1);
|
||||
mi_stat_counter_add(&stats->purge_calls, &src->purge_calls, 1);
|
||||
|
||||
mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1);
|
||||
mi_stat_counter_add(&stats->searches, &src->searches, 1);
|
||||
mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1);
|
||||
mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1);
|
||||
mi_stat_counter_add(&stats->guarded_alloc_count, &src->guarded_alloc_count, 1);
|
||||
|
||||
mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1);
|
||||
mi_stat_counter_add(&stats->pages_reclaim_on_alloc, &src->pages_reclaim_on_alloc, 1);
|
||||
mi_stat_counter_add(&stats->pages_reclaim_on_free, &src->pages_reclaim_on_free, 1);
|
||||
mi_stat_counter_add(&stats->pages_reabandon_full, &src->pages_reabandon_full, 1);
|
||||
mi_stat_counter_add(&stats->pages_unabandon_busy_wait, &src->pages_unabandon_busy_wait, 1);
|
||||
#if MI_STAT>1
|
||||
#if MI_STAT>1
|
||||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) {
|
||||
mi_stat_add(&stats->normal_bins[i], &src->normal_bins[i], 1);
|
||||
}
|
||||
mi_stat_count_add_mt(&stats->malloc_bins[i], &src->malloc_bins[i]);
|
||||
}
|
||||
#endif
|
||||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef MI_STAT_COUNT
|
||||
#undef MI_STAT_COUNTER
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Display statistics
|
||||
----------------------------------------------------------- */
|
||||
|
@ -217,26 +180,26 @@ static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64
|
|||
if (unit != 0) {
|
||||
if (unit > 0) {
|
||||
mi_print_amount(stat->peak, unit, out, arg);
|
||||
mi_print_amount(stat->allocated, unit, out, arg);
|
||||
mi_print_amount(stat->freed, unit, out, arg);
|
||||
mi_print_amount(stat->total, unit, out, arg);
|
||||
// mi_print_amount(stat->freed, unit, out, arg);
|
||||
mi_print_amount(stat->current, unit, out, arg);
|
||||
mi_print_amount(unit, 1, out, arg);
|
||||
mi_print_count(stat->allocated, unit, out, arg);
|
||||
mi_print_count(stat->total, unit, out, arg);
|
||||
}
|
||||
else {
|
||||
mi_print_amount(stat->peak, -1, out, arg);
|
||||
mi_print_amount(stat->allocated, -1, out, arg);
|
||||
mi_print_amount(stat->freed, -1, out, arg);
|
||||
mi_print_amount(stat->total, -1, out, arg);
|
||||
// mi_print_amount(stat->freed, -1, out, arg);
|
||||
mi_print_amount(stat->current, -1, out, arg);
|
||||
if (unit == -1) {
|
||||
_mi_fprintf(out, arg, "%24s", "");
|
||||
}
|
||||
else {
|
||||
mi_print_amount(-unit, 1, out, arg);
|
||||
mi_print_count((stat->allocated / -unit), 0, out, arg);
|
||||
mi_print_count((stat->total / -unit), 0, out, arg);
|
||||
}
|
||||
}
|
||||
if (stat->allocated > stat->freed) {
|
||||
if (stat->current != 0) {
|
||||
_mi_fprintf(out, arg, " ");
|
||||
_mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok));
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
|
@ -247,7 +210,7 @@ static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64
|
|||
}
|
||||
else {
|
||||
mi_print_amount(stat->peak, 1, out, arg);
|
||||
mi_print_amount(stat->allocated, 1, out, arg);
|
||||
mi_print_amount(stat->total, 1, out, arg);
|
||||
_mi_fprintf(out, arg, "%11s", " "); // no freed
|
||||
mi_print_amount(stat->current, 1, out, arg);
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
|
@ -272,7 +235,7 @@ static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg
|
|||
|
||||
|
||||
static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) {
|
||||
const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count));
|
||||
const int64_t avg_tens = (stat->total == 0 ? 0 : (stat->total*10 / stat->total));
|
||||
const long avg_whole = (long)(avg_tens/10);
|
||||
const long avg_frac1 = (long)(avg_tens%10);
|
||||
_mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1);
|
||||
|
@ -280,7 +243,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char*
|
|||
|
||||
|
||||
static void mi_print_header(mi_output_fun* out, void* arg ) {
|
||||
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count ");
|
||||
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# ");
|
||||
}
|
||||
|
||||
#if MI_STAT>1
|
||||
|
@ -288,7 +251,7 @@ static void mi_stats_print_bins(const mi_stat_count_t* bins, size_t max, const c
|
|||
bool found = false;
|
||||
char buf[64];
|
||||
for (size_t i = 0; i <= max; i++) {
|
||||
if (bins[i].allocated > 0) {
|
||||
if (bins[i].total > 0) {
|
||||
found = true;
|
||||
int64_t unit = _mi_bin_size((uint8_t)i);
|
||||
_mi_snprintf(buf, 64, "%s %3lu", fmt, (long)i);
|
||||
|
@ -349,45 +312,47 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
|
|||
// and print using that
|
||||
mi_print_header(out,arg);
|
||||
#if MI_STAT>1
|
||||
mi_stats_print_bins(stats->normal_bins, MI_BIN_HUGE, "normal",out,arg);
|
||||
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg);
|
||||
#endif
|
||||
#if MI_STAT
|
||||
mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg);
|
||||
mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg);
|
||||
mi_stat_count_t total = { 0,0,0,0 };
|
||||
mi_stat_add(&total, &stats->normal, 1);
|
||||
mi_stat_add(&total, &stats->huge, 1);
|
||||
mi_stat_print(&total, "total", 1, out, arg);
|
||||
mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
|
||||
mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg);
|
||||
mi_stat_count_t total = { 0,0,0 };
|
||||
mi_stat_count_add_mt(&total, &stats->malloc_normal);
|
||||
mi_stat_count_add_mt(&total, &stats->malloc_huge);
|
||||
mi_stat_print_ex(&total, "total", 1, out, arg, "");
|
||||
#endif
|
||||
#if MI_STAT>1
|
||||
mi_stat_print(&stats->malloc, "malloc req", 1, out, arg);
|
||||
mi_stat_peak_print(&stats->malloc_requested, "malloc req", 1, out, arg);
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
#endif
|
||||
mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, "");
|
||||
mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, "");
|
||||
mi_stat_peak_print(&stats->reset, "reset", 1, out, arg );
|
||||
mi_stat_peak_print(&stats->purged, "purged", 1, out, arg );
|
||||
//mi_stat_print(&stats->segments, "segments", -1, out, arg);
|
||||
//mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg);
|
||||
//mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg);
|
||||
mi_stat_print_ex(&stats->page_committed, "touched", 1, out, arg, "");
|
||||
mi_stat_print_ex(&stats->pages, "pages", -1, out, arg, "");
|
||||
// mi_stat_print(&stats->segments, "segments", -1, out, arg);
|
||||
// mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg);
|
||||
// mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg);
|
||||
mi_stat_print(&stats->pages, "pages", -1, out, arg);
|
||||
mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg);
|
||||
mi_stat_counter_print(&stats->pages_reclaim_on_alloc, "-reclaima", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_reclaim_on_free, "-reclaimf", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_reabandon_full, "-reabandon", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_unabandon_busy_wait, "-waits", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg);
|
||||
mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_retire, "-retire", out, arg);
|
||||
mi_stat_counter_print(&stats->arena_count, "arenas", out, arg);
|
||||
mi_stat_counter_print(&stats->arena_purges, "-purges", out, arg);
|
||||
mi_stat_counter_print(&stats->mmap_calls, "mmap calls", out, arg);
|
||||
mi_stat_counter_print(&stats->commit_calls, " -commit", out, arg);
|
||||
mi_stat_counter_print(&stats->reset_calls, "-reset", out, arg);
|
||||
mi_stat_counter_print(&stats->purge_calls, "-purge", out, arg);
|
||||
mi_stat_counter_print(&stats->guarded_alloc_count, "guarded", out, arg);
|
||||
// mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg);
|
||||
// mi_stat_counter_print(&stats->arena_purges, "-purges", out, arg);
|
||||
mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg);
|
||||
mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg);
|
||||
mi_stat_counter_print(&stats->commit_calls, "commits", out, arg);
|
||||
mi_stat_counter_print(&stats->reset_calls, "resets", out, arg);
|
||||
mi_stat_counter_print(&stats->purge_calls, "purges", out, arg);
|
||||
mi_stat_counter_print(&stats->malloc_guarded_count, "guarded", out, arg);
|
||||
mi_stat_print(&stats->threads, "threads", -1, out, arg);
|
||||
mi_stat_counter_print_avg(&stats->searches, "searches", out, arg);
|
||||
mi_stat_counter_print_avg(&stats->page_searches, "searches", out, arg);
|
||||
_mi_fprintf(out, arg, "%10s: %5zu\n", "numa nodes", _mi_os_numa_node_count());
|
||||
|
||||
size_t elapsed;
|
||||
|
@ -508,3 +473,184 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
|
|||
if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit;
|
||||
if (page_faults!=NULL) *page_faults = pinfo.page_faults;
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Return statistics
|
||||
// --------------------------------------------------------
|
||||
|
||||
size_t mi_stats_get_bin_size(size_t bin) mi_attr_noexcept {
|
||||
if (bin > MI_BIN_HUGE) return 0;
|
||||
return _mi_bin_size(bin);
|
||||
}
|
||||
|
||||
void mi_stats_get(size_t stats_size, mi_stats_t* stats) mi_attr_noexcept {
|
||||
if (stats == NULL || stats_size == 0) return;
|
||||
_mi_memzero(stats, stats_size);
|
||||
const size_t size = (stats_size > sizeof(mi_stats_t) ? sizeof(mi_stats_t) : stats_size);
|
||||
_mi_memcpy(stats, &_mi_subproc()->stats, size);
|
||||
stats->version = MI_STAT_VERSION;
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Statics in json format
|
||||
// --------------------------------------------------------
|
||||
|
||||
typedef struct mi_heap_buf_s {
|
||||
char* buf;
|
||||
size_t size;
|
||||
size_t used;
|
||||
bool can_realloc;
|
||||
} mi_heap_buf_t;
|
||||
|
||||
static bool mi_heap_buf_expand(mi_heap_buf_t* hbuf) {
|
||||
if (hbuf==NULL) return false;
|
||||
if (hbuf->buf != NULL && hbuf->size>0) {
|
||||
hbuf->buf[hbuf->size-1] = 0;
|
||||
}
|
||||
if (hbuf->size > SIZE_MAX/2 || !hbuf->can_realloc) return false;
|
||||
const size_t newsize = (hbuf->size == 0 ? 2*MI_KiB : 2*hbuf->size);
|
||||
char* const newbuf = (char*)mi_rezalloc(hbuf->buf, newsize);
|
||||
if (newbuf == NULL) return false;
|
||||
hbuf->buf = newbuf;
|
||||
hbuf->size = newsize;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print(mi_heap_buf_t* hbuf, const char* msg) {
|
||||
if (msg==NULL || hbuf==NULL) return;
|
||||
if (hbuf->used + 1 >= hbuf->size && !hbuf->can_realloc) return;
|
||||
for (const char* src = msg; *src != 0; src++) {
|
||||
char c = *src;
|
||||
if (hbuf->used + 1 >= hbuf->size) {
|
||||
if (!mi_heap_buf_expand(hbuf)) return;
|
||||
}
|
||||
mi_assert_internal(hbuf->used < hbuf->size);
|
||||
hbuf->buf[hbuf->used++] = c;
|
||||
}
|
||||
mi_assert_internal(hbuf->used < hbuf->size);
|
||||
hbuf->buf[hbuf->used] = 0;
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, size_t bin, bool add_comma) {
|
||||
const size_t binsize = mi_stats_get_bin_size(bin);
|
||||
const size_t pagesize = (binsize <= MI_SMALL_MAX_OBJ_SIZE ? MI_SMALL_PAGE_SIZE :
|
||||
(binsize <= MI_MEDIUM_MAX_OBJ_SIZE ? MI_MEDIUM_PAGE_SIZE :
|
||||
(binsize <= MI_LARGE_MAX_OBJ_SIZE ? MI_LARGE_PAGE_SIZE : 0)));
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : ""));
|
||||
buf[127] = 0;
|
||||
mi_heap_buf_print(hbuf, buf);
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_count_cbin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, mi_chunkbin_t bin, bool add_comma) {
|
||||
const char* cbin = " ";
|
||||
switch(bin) {
|
||||
case MI_CBIN_SMALL: cbin = "S"; break;
|
||||
case MI_CBIN_MEDIUM: cbin = "M"; break;
|
||||
case MI_CBIN_LARGE: cbin = "L"; break;
|
||||
case MI_CBIN_OTHER: cbin = "X"; break;
|
||||
default: cbin = " "; break;
|
||||
}
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"bin\": \"%s\" }%s\n", prefix, stat->total, stat->peak, stat->current, cbin, (add_comma ? "," : ""));
|
||||
buf[127] = 0;
|
||||
mi_heap_buf_print(hbuf, buf);
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_count(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, bool add_comma) {
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld }%s\n", prefix, stat->total, stat->peak, stat->current, (add_comma ? "," : ""));
|
||||
buf[127] = 0;
|
||||
mi_heap_buf_print(hbuf, buf);
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_count_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_count_t* stat) {
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, " \"%s\": ", name);
|
||||
buf[127] = 0;
|
||||
mi_heap_buf_print(hbuf, buf);
|
||||
mi_heap_buf_print_count(hbuf, "", stat, true);
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_value(mi_heap_buf_t* hbuf, const char* name, int64_t val) {
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, " \"%s\": %lld,\n", name, val);
|
||||
buf[127] = 0;
|
||||
mi_heap_buf_print(hbuf, buf);
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_size(mi_heap_buf_t* hbuf, const char* name, size_t val, bool add_comma) {
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, " \"%s\": %zu%s\n", name, val, (add_comma ? "," : ""));
|
||||
buf[127] = 0;
|
||||
mi_heap_buf_print(hbuf, buf);
|
||||
}
|
||||
|
||||
static void mi_heap_buf_print_counter_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_counter_t* stat) {
|
||||
mi_heap_buf_print_value(hbuf, name, stat->total);
|
||||
}
|
||||
|
||||
#define MI_STAT_COUNT(stat) mi_heap_buf_print_count_value(&hbuf, #stat, &stats->stat);
|
||||
#define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat);
|
||||
|
||||
char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept {
|
||||
mi_heap_buf_t hbuf = { NULL, 0, 0, true };
|
||||
if (output_size > 0 && output_buf != NULL) {
|
||||
_mi_memzero(output_buf, output_size);
|
||||
hbuf.buf = output_buf;
|
||||
hbuf.size = output_size;
|
||||
hbuf.can_realloc = false;
|
||||
}
|
||||
else {
|
||||
if (!mi_heap_buf_expand(&hbuf)) return NULL;
|
||||
}
|
||||
mi_heap_buf_print(&hbuf, "{\n");
|
||||
mi_heap_buf_print_value(&hbuf, "version", MI_STAT_VERSION);
|
||||
mi_heap_buf_print_value(&hbuf, "mimalloc_version", MI_MALLOC_VERSION);
|
||||
|
||||
// process info
|
||||
mi_heap_buf_print(&hbuf, " \"process\": {\n");
|
||||
size_t elapsed;
|
||||
size_t user_time;
|
||||
size_t sys_time;
|
||||
size_t current_rss;
|
||||
size_t peak_rss;
|
||||
size_t current_commit;
|
||||
size_t peak_commit;
|
||||
size_t page_faults;
|
||||
mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults);
|
||||
mi_heap_buf_print_size(&hbuf, "elapsed_msecs", elapsed, true);
|
||||
mi_heap_buf_print_size(&hbuf, "user_msecs", user_time, true);
|
||||
mi_heap_buf_print_size(&hbuf, "system_msecs", sys_time, true);
|
||||
mi_heap_buf_print_size(&hbuf, "page_faults", page_faults, true);
|
||||
mi_heap_buf_print_size(&hbuf, "rss_current", current_rss, true);
|
||||
mi_heap_buf_print_size(&hbuf, "rss_peak", peak_rss, true);
|
||||
mi_heap_buf_print_size(&hbuf, "commit_current", current_commit, true);
|
||||
mi_heap_buf_print_size(&hbuf, "commit_peak", peak_commit, false);
|
||||
mi_heap_buf_print(&hbuf, " },\n");
|
||||
|
||||
// statistics
|
||||
mi_stats_t* stats = &_mi_subproc()->stats;
|
||||
MI_STAT_FIELDS()
|
||||
|
||||
// size bins
|
||||
mi_heap_buf_print(&hbuf, " \"malloc_bins\": [\n");
|
||||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
mi_heap_buf_print_count_bin(&hbuf, " ", &stats->malloc_bins[i], i, i!=MI_BIN_HUGE);
|
||||
}
|
||||
mi_heap_buf_print(&hbuf, " ],\n");
|
||||
mi_heap_buf_print(&hbuf, " \"page_bins\": [\n");
|
||||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE);
|
||||
}
|
||||
mi_heap_buf_print(&hbuf, " ]\n");
|
||||
mi_heap_buf_print(&hbuf, " \"chunk_bins\": [\n");
|
||||
for (size_t i = 0; i < MI_CBIN_COUNT; i++) {
|
||||
mi_heap_buf_print_count_cbin(&hbuf, " ", &stats->chunk_bins[i], (mi_chunkbin_t)i, i!=MI_CBIN_COUNT-1);
|
||||
}
|
||||
mi_heap_buf_print(&hbuf, " ]\n");
|
||||
mi_heap_buf_print(&hbuf, "}\n");
|
||||
return hbuf.buf;
|
||||
}
|
||||
|
|
|
@ -79,6 +79,7 @@ static bool main_participates = false; // main thread participates as a
|
|||
#define custom_free(p) free(p)
|
||||
#else
|
||||
#include <mimalloc.h>
|
||||
#include <mimalloc-stats.h>
|
||||
#define custom_calloc(n,s) mi_calloc(n,s)
|
||||
#define custom_realloc(p,s) mi_realloc(p,s)
|
||||
#define custom_free(p) mi_free(p)
|
||||
|
@ -365,6 +366,11 @@ int main(int argc, char** argv) {
|
|||
#ifndef NDEBUG
|
||||
mi_debug_show_arenas();
|
||||
mi_collect(true);
|
||||
char* json = mi_stats_get_json(0, NULL);
|
||||
if (json != NULL) {
|
||||
fputs(json,stderr);
|
||||
mi_free(json);
|
||||
}
|
||||
#endif
|
||||
mi_stats_print(NULL);
|
||||
#endif
|
||||
|
|
Loading…
Add table
Reference in a new issue