mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 03:48:42 +03:00
merge from dev
This commit is contained in:
commit
1b0de9b4cf
48 changed files with 384 additions and 934 deletions
|
@ -41,8 +41,11 @@ extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_im
|
|||
------------------------------------------------------ */
|
||||
|
||||
static size_t zone_size(malloc_zone_t* zone, const void* p) {
|
||||
UNUSED(zone); UNUSED(p);
|
||||
return 0; // as we cannot guarantee that `p` comes from us, just return 0
|
||||
UNUSED(zone);
|
||||
if (!mi_is_in_heap_region(p))
|
||||
return 0; // not our pointer, bail out
|
||||
|
||||
return mi_usable_size(p);
|
||||
}
|
||||
|
||||
static void* zone_malloc(malloc_zone_t* zone, size_t size) {
|
||||
|
|
|
@ -163,18 +163,30 @@ extern "C" {
|
|||
// Posix & Unix functions definitions
|
||||
// ------------------------------------------------------
|
||||
|
||||
void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize);
|
||||
size_t malloc_size(void* p) MI_FORWARD1(mi_usable_size,p);
|
||||
size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p);
|
||||
void cfree(void* p) MI_FORWARD0(mi_free, p);
|
||||
void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize);
|
||||
size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p);
|
||||
#if !defined(__ANDROID__)
|
||||
size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p);
|
||||
#else
|
||||
size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p);
|
||||
#endif
|
||||
|
||||
// no forwarding here due to aliasing/name mangling issues
|
||||
void* valloc(size_t size) { return mi_valloc(size); }
|
||||
void* pvalloc(size_t size) { return mi_pvalloc(size); }
|
||||
void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); }
|
||||
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
|
||||
void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
|
||||
int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); }
|
||||
void* valloc(size_t size) { return mi_valloc(size); }
|
||||
void* pvalloc(size_t size) { return mi_pvalloc(size); }
|
||||
void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); }
|
||||
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
|
||||
int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); }
|
||||
void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
|
||||
|
||||
// on some glibc `aligned_alloc` is declared `static inline` so we cannot override it (e.g. Conda). This happens
|
||||
// when _GLIBCXX_HAVE_ALIGNED_ALLOC is not defined. However, in those cases it will use `memalign`, `posix_memalign`,
|
||||
// or `_aligned_malloc` and we can avoid overriding it ourselves.
|
||||
#if _GLIBCXX_HAVE_ALIGNED_ALLOC
|
||||
void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(__GLIBC__) && defined(__linux__)
|
||||
// forward __libc interface (needed for glibc-based Linux distributions)
|
||||
|
@ -184,10 +196,10 @@ int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_me
|
|||
void __libc_free(void* p) MI_FORWARD0(mi_free,p);
|
||||
void __libc_cfree(void* p) MI_FORWARD0(mi_free,p);
|
||||
|
||||
void* __libc_valloc(size_t size) { return mi_valloc(size); }
|
||||
void* __libc_pvalloc(size_t size) { return mi_pvalloc(size); }
|
||||
void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment,size); }
|
||||
int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p,alignment,size); }
|
||||
void* __libc_valloc(size_t size) { return mi_valloc(size); }
|
||||
void* __libc_pvalloc(size_t size) { return mi_pvalloc(size); }
|
||||
void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment,size); }
|
||||
int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p,alignment,size); }
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
24
src/alloc.c
24
src/alloc.c
|
@ -44,7 +44,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
mi_heap_stat_increase(heap, normal[bin], 1);
|
||||
}
|
||||
#endif
|
||||
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
|
||||
#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
|
||||
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
||||
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
|
||||
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
|
||||
|
@ -203,7 +203,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
|||
// Check for heap block overflow by setting up padding at the end of the block
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
||||
*bsize = mi_page_usable_block_size(page);
|
||||
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
||||
|
@ -506,15 +506,16 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
|
|||
if (p==NULL) return 0;
|
||||
const mi_segment_t* const segment = _mi_ptr_segment(p);
|
||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
const mi_block_t* const block = (const mi_block_t*)p;
|
||||
const size_t size = mi_page_usable_size_of(page, block);
|
||||
const mi_block_t* block = (const mi_block_t*)p;
|
||||
if (mi_unlikely(mi_page_has_aligned(page))) {
|
||||
ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
|
||||
block = _mi_page_ptr_unalign(segment, page, p);
|
||||
size_t size = mi_page_usable_size_of(page, block);
|
||||
ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)block;
|
||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||
return (size - adjust);
|
||||
}
|
||||
else {
|
||||
return size;
|
||||
return mi_page_usable_size_of(page, block);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -677,12 +678,13 @@ mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
|
|||
// `strndup` using mi_malloc
|
||||
mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
|
||||
if (s == NULL) return NULL;
|
||||
size_t m = strlen(s);
|
||||
if (n > m) n = m;
|
||||
char* t = (char*)mi_heap_malloc(heap, n+1);
|
||||
const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found)
|
||||
const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string
|
||||
mi_assert_internal(m <= n);
|
||||
char* t = (char*)mi_heap_malloc(heap, m+1);
|
||||
if (t == NULL) return NULL;
|
||||
memcpy(t, s, n);
|
||||
t[n] = 0;
|
||||
memcpy(t, s, m);
|
||||
t[m] = 0;
|
||||
return t;
|
||||
}
|
||||
|
||||
|
|
10
src/init.c
10
src/init.c
|
@ -35,9 +35,9 @@ const mi_page_t _mi_page_empty = {
|
|||
|
||||
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
|
||||
|
||||
#if defined(MI_PADDING) && (MI_INTPTR_SIZE >= 8)
|
||||
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#elif defined(MI_PADDING)
|
||||
#elif (MI_PADDING>0)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#else
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
|
||||
|
@ -312,6 +312,12 @@ static void _mi_thread_done(mi_heap_t* default_heap);
|
|||
// use thread local storage keys to detect thread ending
|
||||
#include <windows.h>
|
||||
#include <fibersapi.h>
|
||||
#if (_WIN32_WINNT < 0x600) // before Windows Vista
|
||||
WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
|
||||
WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
|
||||
WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
|
||||
WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex);
|
||||
#endif
|
||||
static DWORD mi_fls_key = (DWORD)(-1);
|
||||
static void NTAPI mi_fls_done(PVOID value) {
|
||||
if (value!=NULL) _mi_thread_done((mi_heap_t*)value);
|
||||
|
|
|
@ -51,7 +51,11 @@ typedef struct mi_option_desc_s {
|
|||
static mi_option_desc_t options[_mi_option_last] =
|
||||
{
|
||||
// stable options
|
||||
{ MI_DEBUG, UNINIT, MI_OPTION(show_errors) },
|
||||
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
|
||||
{ 1, UNINIT, MI_OPTION(show_errors) },
|
||||
#else
|
||||
{ 0, UNINIT, MI_OPTION(show_errors) },
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(show_stats) },
|
||||
{ 0, UNINIT, MI_OPTION(verbose) },
|
||||
|
||||
|
@ -262,13 +266,17 @@ static void mi_recurse_exit(void) {
|
|||
}
|
||||
|
||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
|
||||
if (!mi_recurse_enter()) return;
|
||||
if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr?
|
||||
if (!mi_recurse_enter()) return;
|
||||
out = mi_out_get_default(&arg);
|
||||
if (prefix != NULL) out(prefix, arg);
|
||||
out(message, arg);
|
||||
mi_recurse_exit();
|
||||
}
|
||||
else {
|
||||
if (prefix != NULL) out(prefix, arg);
|
||||
out(message, arg);
|
||||
}
|
||||
if (prefix != NULL) out(prefix,arg);
|
||||
out(message,arg);
|
||||
mi_recurse_exit();
|
||||
}
|
||||
|
||||
// Define our own limited `fprintf` that avoids memory allocation.
|
||||
|
@ -350,6 +358,11 @@ static void mi_error_default(int err) {
|
|||
abort();
|
||||
}
|
||||
#endif
|
||||
#if defined(MI_XMALLOC)
|
||||
if (err==ENOMEM || err==EOVERFLOW) { // abort on memory allocation fails in xmalloc mode
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void mi_register_error(mi_error_fun* fun, void* arg) {
|
||||
|
|
22
src/os.c
22
src/os.c
|
@ -211,10 +211,12 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
|
|||
void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
|
||||
if (p != NULL) return p;
|
||||
DWORD err = GetLastError();
|
||||
if (err != ERROR_INVALID_ADDRESS) { // if linked with multiple instances, we may have tried to allocate at an already allocated area
|
||||
if (err != ERROR_INVALID_ADDRESS && // If linked with multiple instances, we may have tried to allocate at an already allocated area (#210)
|
||||
err != ERROR_INVALID_PARAMETER) { // Windows7 instability (#230)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
// fall through
|
||||
}
|
||||
#endif
|
||||
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
|
||||
// on modern Windows try use VirtualAlloc2 for aligned allocation
|
||||
|
@ -227,6 +229,7 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
|
|||
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1);
|
||||
}
|
||||
#endif
|
||||
// last resort
|
||||
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
|
||||
}
|
||||
|
||||
|
@ -614,7 +617,7 @@ static void mi_mprotect_hint(int err) {
|
|||
}
|
||||
|
||||
// Commit/Decommit memory.
|
||||
// Usuelly commit is aligned liberal, while decommit is aligned conservative.
|
||||
// Usually commit is aligned liberal, while decommit is aligned conservative.
|
||||
// (but not for the reset version where we want commit to be conservative as well)
|
||||
static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) {
|
||||
// page align in the range, commit liberally, decommit conservative
|
||||
|
@ -822,7 +825,7 @@ and possibly associated with a specific NUMA node. (use `numa_node>=0`)
|
|||
-----------------------------------------------------------------------------*/
|
||||
#define MI_HUGE_OS_PAGE_SIZE (GiB)
|
||||
|
||||
#if defined(WIN32) && (MI_INTPTR_SIZE >= 8)
|
||||
#if defined(_WIN32) && (MI_INTPTR_SIZE >= 8)
|
||||
static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
|
||||
{
|
||||
mi_assert_internal(size%GiB == 0);
|
||||
|
@ -865,6 +868,8 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
|
|||
params[0].ULong = (unsigned)numa_node;
|
||||
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, params, 1);
|
||||
}
|
||||
#else
|
||||
UNUSED(numa_node);
|
||||
#endif
|
||||
// otherwise use regular virtual alloc on older windows
|
||||
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
|
||||
|
@ -904,6 +909,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
|
|||
}
|
||||
#else
|
||||
static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
|
||||
UNUSED(addr); UNUSED(size); UNUSED(numa_node);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
@ -939,6 +945,7 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
|
|||
}
|
||||
#else
|
||||
static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
|
||||
UNUSED(pages);
|
||||
if (total_size != NULL) *total_size = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1011,7 +1018,12 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) {
|
|||
/* ----------------------------------------------------------------------------
|
||||
Support NUMA aware allocation
|
||||
-----------------------------------------------------------------------------*/
|
||||
#ifdef WIN32
|
||||
#ifdef _WIN32
|
||||
#if (_WIN32_WINNT < 0x601) // before Win7
|
||||
typedef struct _PROCESSOR_NUMBER { WORD Group; BYTE Number; BYTE Reserved; } PROCESSOR_NUMBER, *PPROCESSOR_NUMBER;
|
||||
WINBASEAPI VOID WINAPI GetCurrentProcessorNumberEx(_Out_ PPROCESSOR_NUMBER ProcNumber);
|
||||
WINBASEAPI BOOL WINAPI GetNumaProcessorNodeEx(_In_ PPROCESSOR_NUMBER Processor, _Out_ PUSHORT NodeNumber);
|
||||
#endif
|
||||
static size_t mi_os_numa_nodex() {
|
||||
PROCESSOR_NUMBER pnum;
|
||||
USHORT numa_node = 0;
|
||||
|
|
|
@ -49,7 +49,7 @@ bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
|
|||
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
|
||||
// arena.c
|
||||
void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats);
|
||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
|
||||
|
@ -187,7 +187,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
|||
const uintptr_t idx = mi_atomic_increment(®ions_count);
|
||||
if (idx >= MI_REGION_MAX) {
|
||||
mi_atomic_decrement(®ions_count);
|
||||
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, tld->stats);
|
||||
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, region_commit, tld->stats);
|
||||
_mi_warning_message("maximum regions used: %zu GiB (perhaps recompile with a larger setting for MI_HEAP_REGION_MAX_SIZE)", _mi_divide_up(MI_HEAP_REGION_MAX_SIZE, GiB));
|
||||
return false;
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
|
|||
mem_region_t* region;
|
||||
if (mi_memid_is_arena(id,®ion,&bit_idx,&arena_memid)) {
|
||||
// was a direct arena allocation, pass through
|
||||
_mi_arena_free(p, size, arena_memid, tld->stats);
|
||||
_mi_arena_free(p, size, arena_memid, full_commit, tld->stats);
|
||||
}
|
||||
else {
|
||||
// allocated in a region
|
||||
|
@ -454,12 +454,13 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
|
|||
// on success, free the whole region
|
||||
uint8_t* start = mi_atomic_read_ptr(uint8_t,®ions[i].start);
|
||||
size_t arena_memid = mi_atomic_read_relaxed(®ions[i].arena_memid);
|
||||
uintptr_t commit = mi_atomic_read_relaxed(®ions[i].commit);
|
||||
memset(®ions[i], 0, sizeof(mem_region_t));
|
||||
// and release the whole region
|
||||
mi_atomic_write(®ion->info, 0);
|
||||
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
|
||||
_mi_abandoned_await_readers(); // ensure no pending reads
|
||||
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, tld->stats);
|
||||
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -202,7 +202,6 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
|||
uint8_t* p = (uint8_t*)segment + (idx*MI_SEGMENT_SLICE_SIZE);
|
||||
/*
|
||||
if (idx == 0) {
|
||||
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
psize -= segment->segment_info_size;
|
||||
|
@ -1300,7 +1299,7 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
|||
mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0);
|
||||
|
||||
// claim it and free
|
||||
mi_heap_t* heap = mi_get_default_heap();
|
||||
mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
|
||||
// paranoia: if this it the last reference, the cas should always succeed
|
||||
if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) {
|
||||
mi_block_set_next(page, block, page->free);
|
||||
|
@ -1308,16 +1307,16 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
|||
page->used--;
|
||||
page->is_zero = false;
|
||||
mi_assert(page->used == 0);
|
||||
mi_segments_tld_t* tld = &heap->tld->segments;
|
||||
mi_tld_t* tld = heap->tld;
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
_mi_stat_decrease(&tld->stats->large, bsize);
|
||||
_mi_stat_decrease(&tld->stats.large, bsize);
|
||||
}
|
||||
else {
|
||||
_mi_stat_decrease(&tld->stats->huge, bsize);
|
||||
_mi_stat_decrease(&tld->stats.huge, bsize);
|
||||
}
|
||||
// mi_segments_track_size((long)segment->segment_size, tld);
|
||||
_mi_segment_page_free(page, true, tld);
|
||||
_mi_segment_page_free(page, true, &tld->segments);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,5 +24,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#include "alloc.c"
|
||||
#include "alloc-aligned.c"
|
||||
#include "alloc-posix.c"
|
||||
#if MI_OSX_ZONE
|
||||
#include "alloc-override-osx.c"
|
||||
#endif
|
||||
#include "init.c"
|
||||
#include "options.c"
|
||||
|
|
46
src/stats.c
46
src/stats.c
|
@ -237,9 +237,51 @@ static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bin
|
|||
#endif
|
||||
|
||||
|
||||
|
||||
//------------------------------------------------------------
|
||||
// Use an output wrapper for line-buffered output
|
||||
// (which is nice when using loggers etc.)
|
||||
//------------------------------------------------------------
|
||||
typedef struct buffered_s {
|
||||
mi_output_fun* out; // original output function
|
||||
void* arg; // and state
|
||||
char* buf; // local buffer of at least size `count+1`
|
||||
size_t used; // currently used chars `used <= count`
|
||||
size_t count; // total chars available for output
|
||||
} buffered_t;
|
||||
|
||||
static void mi_buffered_flush(buffered_t* buf) {
|
||||
buf->buf[buf->used] = 0;
|
||||
_mi_fputs(buf->out, buf->arg, NULL, buf->buf);
|
||||
buf->used = 0;
|
||||
}
|
||||
|
||||
static void mi_buffered_out(const char* msg, void* arg) {
|
||||
buffered_t* buf = (buffered_t*)arg;
|
||||
if (msg==NULL || buf==NULL) return;
|
||||
for (const char* src = msg; *src != 0; src++) {
|
||||
char c = *src;
|
||||
if (buf->used >= buf->count) mi_buffered_flush(buf);
|
||||
mi_assert_internal(buf->used < buf->count);
|
||||
buf->buf[buf->used++] = c;
|
||||
if (c == '\n') mi_buffered_flush(buf);
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------
|
||||
// Print statistics
|
||||
//------------------------------------------------------------
|
||||
|
||||
static void mi_process_info(mi_msecs_t* utime, mi_msecs_t* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit);
|
||||
|
||||
static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out, void* arg) mi_attr_noexcept {
|
||||
static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out0, void* arg0) mi_attr_noexcept {
|
||||
// wrap the output function to be line buffered
|
||||
char buf[256];
|
||||
buffered_t buffer = { out0, arg0, buf, 0, 255 };
|
||||
mi_output_fun* out = &mi_buffered_out;
|
||||
void* arg = &buffer;
|
||||
|
||||
// and print using that
|
||||
mi_print_header(out,arg);
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal = { 0,0,0,0 };
|
||||
|
@ -287,7 +329,7 @@ static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun
|
|||
_mi_fprintf(out, arg, ", commit charge: ");
|
||||
mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s");
|
||||
}
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
}
|
||||
|
||||
static mi_msecs_t mi_time_start; // = 0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue