merge from dev-reset

This commit is contained in:
daanx 2023-04-16 12:32:20 -07:00
commit becf379ecd
8 changed files with 54 additions and 57 deletions

View file

@ -88,8 +88,10 @@ void _mi_thread_data_collect(void);
// os.c // os.c
void _mi_os_init(void); // called from process init void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, bool* is_zero, mi_stats_t* stats); // to allocate thread local data void* _mi_os_alloc(size_t size, bool* is_zero, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data void _mi_os_free(void* p, size_t size, mi_stats_t* stats);
void _mi_os_free_ex(void* p, size_t size, bool is_committed, mi_stats_t* stats);
size_t _mi_os_page_size(void); size_t _mi_os_page_size(void);
size_t _mi_os_good_alloc_size(size_t size); size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void); bool _mi_os_has_overcommit(void);
@ -104,16 +106,16 @@ bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats); bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, bool* is_zero, mi_stats_t* stats); void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* stats);
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, bool* is_zero, mi_stats_t* tld_stats); void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats);
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats); void _mi_os_free_aligned_at_offset(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
bool _mi_os_use_large_page(size_t size, size_t alignment); bool _mi_os_use_large_page(size_t size, size_t alignment);
size_t _mi_os_large_page_size(void); size_t _mi_os_large_page_size(void);
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, bool* is_zero); void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, bool* is_zero);
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats); void _mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats);
// arena.c // arena.c
mi_arena_id_t _mi_arena_id_none(void); mi_arena_id_t _mi_arena_id_none(void);
@ -918,6 +920,8 @@ static inline void _mi_memzero(void* dst, size_t n) {
} }
#endif #endif
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
// ------------------------------------------------------------------------------- // -------------------------------------------------------------------------------
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned // The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned

View file

@ -26,8 +26,8 @@ typedef struct mi_os_mem_config_s {
size_t large_page_size; // 2MiB size_t large_page_size; // 2MiB
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB) size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
bool has_overcommit; // can we reserve more memory than can be actually committed? bool has_overcommit; // can we reserve more memory than can be actually committed?
bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc) bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
bool has_virtual_reserve; // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
} mi_os_mem_config_t; } mi_os_mem_config_t;
// Initialize // Initialize

View file

@ -325,6 +325,10 @@ typedef struct mi_page_s {
// ------------------------------------------------------
// Mimalloc segments contain mimalloc pages
// ------------------------------------------------------
typedef enum mi_page_kind_e { typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
@ -393,7 +397,6 @@ typedef struct mi_memid_s {
mi_memid_arena_info_t arena;// only used for MI_MEM_ARENA mi_memid_arena_info_t arena;// only used for MI_MEM_ARENA
} mem; } mem;
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages) bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
bool is_large; // `true` if the memory is in OS large (2MiB) or huge (1GiB) pages. (`is_pinned` will be true)
bool was_committed; // `true` if the memory was originally allocated as committed bool was_committed; // `true` if the memory was originally allocated as committed
bool was_zero; // `true` if the memory was originally zero initialized bool was_zero; // `true` if the memory was originally zero initialized
mi_memkind_t memkind; mi_memkind_t memkind;

View file

@ -50,9 +50,9 @@ typedef struct mi_arena_s {
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node int numa_node; // associated NUMA node
bool is_zero_init; // is the arena zero initialized? bool is_zero_init; // is the arena zero initialized?
bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
bool is_large; // large- or huge OS pages (always committed) bool is_large; // large- or huge OS pages (always committed)
bool is_huge_alloc; // huge OS pages allocated by `_mi_os_alloc_huge_pages` bool is_huge_alloc; // huge OS pages allocated by `_mi_os_alloc_huge_pages`
bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
_Atomic(size_t) search_idx; // optimization to start the search for free blocks _Atomic(size_t) search_idx; // optimization to start the search for free blocks
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
@ -96,22 +96,19 @@ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclus
memory id's memory id's
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_memid_t mi_memid_none(void) {
mi_memid_t memid;
_mi_memzero(&memid, sizeof(memid));
memid.memkind = MI_MEM_NONE;
return memid;
}
static mi_memid_t mi_memid_create(mi_memkind_t memkind) { static mi_memid_t mi_memid_create(mi_memkind_t memkind) {
mi_memid_t memid = mi_memid_none(); mi_memid_t memid;
_mi_memzero_var(memid);
memid.memkind = memkind; memid.memkind = memkind;
return memid; return memid;
} }
static mi_memid_t mi_memid_none(void) {
return mi_memid_create(MI_MEM_NONE);
}
static mi_memid_t mi_memid_create_os(bool committed) { static mi_memid_t mi_memid_create_os(bool committed) {
mi_memid_t memid = mi_memid_none(); mi_memid_t memid = mi_memid_create(MI_MEM_OS);
memid.memkind = MI_MEM_OS;
memid.was_committed = committed; memid.was_committed = committed;
return memid; return memid;
} }
@ -166,11 +163,10 @@ static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bit
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Special static area for mimalloc internal structures Special static area for mimalloc internal structures
to avoid OS calls (for example, for the arena and thread to avoid OS calls (for example, for the arena metadata)
metadata)
----------------------------------------------------------- */ ----------------------------------------------------------- */
#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*8*MI_KiB) // 64 KiB on 64-bit #define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit
static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
static _Atomic(size_t) mi_arena_static_top; static _Atomic(size_t) mi_arena_static_top;
@ -212,7 +208,7 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st
p = _mi_os_alloc(size, &is_zero, stats); p = _mi_os_alloc(size, &is_zero, stats);
if (p != NULL) { if (p != NULL) {
*memid = mi_memid_create_os(true); *memid = mi_memid_create_os(true);
if (!is_zero) { _mi_memzero(p, size); } if (!is_zero) { _mi_memzero_aligned(p, size); }
return p; return p;
} }
@ -259,7 +255,6 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
// claimed it! // claimed it!
void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index)); void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index));
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index); *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
memid->is_large = arena->is_large;
memid->is_pinned = (arena->is_large || !arena->allow_decommit); memid->is_pinned = (arena->is_large || !arena->allow_decommit);
// none of the claimed blocks should be scheduled for a decommit // none of the claimed blocks should be scheduled for a decommit
@ -437,13 +432,12 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
} }
// finally, fall back to the OS // finally, fall back to the OS
bool os_large = allow_large; bool os_is_large = false;
bool os_is_zero = false; bool os_is_zero = false;
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, commit, &os_large, &os_is_zero, tld->stats); void* p = _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, &os_is_large, &os_is_zero, tld->stats);
if (p != NULL) { if (p != NULL) {
*memid = mi_memid_create_os(commit); *memid = mi_memid_create_os(commit);
memid->is_large = os_large; memid->is_pinned = os_is_large;
memid->is_pinned = os_large;
memid->was_zero = os_is_zero; memid->was_zero = os_is_zero;
memid->mem.os.alignment = alignment; memid->mem.os.alignment = alignment;
memid->mem.os.align_offset = align_offset; memid->mem.os.align_offset = align_offset;
@ -647,7 +641,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
_mi_stat_decrease(&stats->committed, committed_size); _mi_stat_decrease(&stats->committed, committed_size);
} }
if (memid.mem.os.align_offset != 0) { if (memid.mem.os.align_offset != 0) {
_mi_os_free_aligned(p, size, memid.mem.os.alignment, memid.mem.os.align_offset, all_committed, stats); _mi_os_free_aligned_at_offset(p, size, memid.mem.os.alignment, memid.mem.os.align_offset, all_committed, stats);
} }
else { else {
_mi_os_free(p, size, stats); _mi_os_free(p, size, stats);
@ -729,7 +723,7 @@ static void mi_arenas_unsafe_destroy(void) {
if (arena->owned && arena->start != NULL) { if (arena->owned && arena->start != NULL) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
if (arena->is_huge_alloc) { if (arena->is_huge_alloc) {
_mi_os_free_huge_pages(arena->start, mi_arena_size(arena), &_mi_stats_main); _mi_os_free_huge_os_pages(arena->start, mi_arena_size(arena), &_mi_stats_main);
} }
else { else {
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main); _mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
@ -860,16 +854,16 @@ static int mi_reserve_os_memory_ex2(size_t size, bool commit, bool allow_large,
{ {
if (arena_id != NULL) *arena_id = _mi_arena_id_none(); if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
bool large = allow_large; bool is_large = false;
bool is_zero; bool is_zero = false;
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &is_zero, &_mi_stats_main); void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &is_large, &is_zero, &_mi_stats_main);
if (start==NULL) return ENOMEM; if (start==NULL) return ENOMEM;
if (!mi_manage_os_memory_ex2(start, size, (large || commit), large, false, is_zero, -1, exclusive, owned, arena_id)) { if (!mi_manage_os_memory_ex2(start, size, (is_large || commit), is_large, false, is_zero, -1, exclusive, owned, arena_id)) {
_mi_os_free_ex(start, size, commit, &_mi_stats_main); _mi_os_free_ex(start, size, commit, &_mi_stats_main);
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024)); _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
return ENOMEM; return ENOMEM;
} }
_mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : ""); _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), is_large ? " (in large os pages)" : "");
return 0; return 0;
} }
@ -943,7 +937,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(p, hsize, true, true, true, is_zero, numa_node, exclusive, true /* owned */, arena_id)) { if (!mi_manage_os_memory_ex2(p, hsize, true, true, true, is_zero, numa_node, exclusive, true /* owned */, arena_id)) {
_mi_os_free_huge_pages(p, hsize, &_mi_stats_main); _mi_os_free_huge_os_pages(p, hsize, &_mi_stats_main);
return ENOMEM; return ENOMEM;
} }
return 0; return 0;

View file

@ -243,7 +243,7 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
} }
if (td != NULL && !is_zero) { if (td != NULL && !is_zero) {
_mi_memzero(td, sizeof(*td)); _mi_memzero_aligned(td, sizeof(*td));
} }
return td; return td;
} }

View file

@ -264,23 +264,19 @@ void* _mi_os_alloc(size_t size, bool* is_zero, mi_stats_t* tld_stats) {
return p; return p;
} }
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, bool* is_zero, mi_stats_t* tld_stats) void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats)
{ {
MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
MI_UNUSED(tld_stats); MI_UNUSED(tld_stats);
if (size == 0) return NULL; if (size == 0) return NULL;
size = _mi_os_good_alloc_size(size); size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size()); alignment = _mi_align_up(alignment, _mi_os_page_size());
bool allow_large = false;
if (large != NULL) { bool os_is_large = false;
allow_large = *large; bool os_is_zero = false;
*large = false; void* p = mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &_mi_stats_main /*tld->stats*/ );
} if (is_large != NULL) { *is_large = os_is_large; }
bool is_largex = false; if (is_zero != NULL) { *is_zero = os_is_zero; }
bool is_zerox = false;
void* p = mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, &is_largex, &is_zerox, &_mi_stats_main /*tld->stats*/ );
if (large != NULL) { *large = is_largex; }
if (is_zero != NULL) { *is_zero = is_zerox; }
return p; return p;
} }
@ -292,20 +288,20 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* lar
to use the actual start of the memory region. to use the actual start of the memory region.
----------------------------------------------------------- */ ----------------------------------------------------------- */
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t offset, bool commit, bool* large, bool* is_zero, mi_stats_t* tld_stats) { void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) {
mi_assert(offset <= MI_SEGMENT_SIZE); mi_assert(offset <= MI_SEGMENT_SIZE);
mi_assert(offset <= size); mi_assert(offset <= size);
mi_assert((alignment % _mi_os_page_size()) == 0); mi_assert((alignment % _mi_os_page_size()) == 0);
if (offset > MI_SEGMENT_SIZE) return NULL; if (offset > MI_SEGMENT_SIZE) return NULL;
if (offset == 0) { if (offset == 0) {
// regular aligned allocation // regular aligned allocation
return _mi_os_alloc_aligned(size, alignment, commit, large, is_zero, tld_stats); return _mi_os_alloc_aligned(size, alignment, commit, allow_large, is_large, is_zero, tld_stats);
} }
else { else {
// overallocate to align at an offset // overallocate to align at an offset
const size_t extra = _mi_align_up(offset, alignment) - offset; const size_t extra = _mi_align_up(offset, alignment) - offset;
const size_t oversize = size + extra; const size_t oversize = size + extra;
void* start = _mi_os_alloc_aligned(oversize, alignment, commit, large, is_zero, tld_stats); void* start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, is_large, is_zero, tld_stats);
if (start == NULL) return NULL; if (start == NULL) return NULL;
void* p = (uint8_t*)start + extra; void* p = (uint8_t*)start + extra;
mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
@ -317,7 +313,7 @@ void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t offset,
} }
} }
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats) { void _mi_os_free_aligned_at_offset(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats) {
mi_assert(align_offset <= MI_SEGMENT_SIZE); mi_assert(align_offset <= MI_SEGMENT_SIZE);
const size_t extra = _mi_align_up(align_offset, alignment) - align_offset; const size_t extra = _mi_align_up(align_offset, alignment) - align_offset;
void* start = (uint8_t*)p - extra; void* start = (uint8_t*)p - extra;
@ -594,7 +590,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// free every huge page in a range individually (as we allocated per page) // free every huge page in a range individually (as we allocated per page)
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems. // note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) { void _mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) {
if (p==NULL || size==0) return; if (p==NULL || size==0) return;
uint8_t* base = (uint8_t*)p; uint8_t* base = (uint8_t*)p;
while (size >= MI_HUGE_OS_PAGE_SIZE) { while (size >= MI_HUGE_OS_PAGE_SIZE) {

View file

@ -842,7 +842,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
mi_track_mem_undefined(segment, (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE); // todo: should not be necessary? mi_track_mem_undefined(segment, (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE); // todo: should not be necessary?
segment->memid = memid; segment->memid = memid;
segment->allow_decommit = !memid.is_pinned && !memid.is_large; segment->allow_decommit = !memid.is_pinned;
segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge); segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge);
segment->segment_size = segment_size; segment->segment_size = segment_size;
segment->commit_mask = commit_mask; segment->commit_mask = commit_mask;

View file

@ -444,7 +444,7 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start) {
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept
{ {
mi_process_info_t pinfo; mi_process_info_t pinfo;
_mi_memzero(&pinfo,sizeof(pinfo)); _mi_memzero_var(pinfo);
pinfo.elapsed = _mi_clock_end(mi_process_start); pinfo.elapsed = _mi_clock_end(mi_process_start);
pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current)); pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak)); pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));