Fix whitespace

This mostly deletes trailing spaces.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
This commit is contained in:
Johannes Schindelin 2022-12-03 00:23:43 +01:00
parent ddc9841019
commit 745a34f475
66 changed files with 760 additions and 769 deletions

View file

@ -34,7 +34,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
size_t oversize;
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
if mi_unlikely(offset != 0) {
// todo: cannot support offset alignment for very large alignments yet
@ -46,7 +46,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
// zero afterwards as only the area from the aligned_p may be committed!
if (p == NULL) return NULL;
if (p == NULL) return NULL;
}
else {
// otherwise over-allocate
@ -61,9 +61,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
mi_assert_internal(adjust < alignment);
void* aligned_p = (void*)((uintptr_t)p + adjust);
if (aligned_p != p) {
mi_page_set_has_aligned(_mi_ptr_page(p), true);
mi_page_set_has_aligned(_mi_ptr_page(p), true);
}
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
@ -74,10 +74,10 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
ptrdiff_t zsize = mi_page_usable_block_size(_mi_ptr_page(p)) - diff - MI_PADDING_SIZE;
#if MI_PADDING
zsize -= MI_MAX_ALIGN_SIZE;
#endif
#endif
if (zsize > 0) { _mi_memzero(aligned_p, zsize); }
}
#if MI_TRACK_ENABLED
if (p != aligned_p) {
mi_track_free_size(p, oversize);
@ -87,7 +87,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
mi_track_resize(aligned_p, oversize, size);
}
#endif
return aligned_p;
return aligned_p;
}
// Primitive aligned allocation
@ -109,7 +109,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
return NULL;
}
*/
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
@ -306,4 +306,3 @@ mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
}

View file

@ -19,8 +19,8 @@ terms of the MIT license. A copy of the license can be found in the file
This is done through the malloc zone interface.
It seems to be most robust in combination with interposing
though or otherwise we may get zone errors as there are could
be allocations done by the time we take over the
zone.
be allocations done by the time we take over the
zone.
------------------------------------------------------ */
#include <AvailabilityMacros.h>
@ -215,7 +215,7 @@ static malloc_zone_t mi_malloc_zone = {
.zone_name = "mimalloc",
.batch_malloc = &zone_batch_malloc,
.batch_free = &zone_batch_free,
.introspect = &mi_introspect,
.introspect = &mi_introspect,
#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6)
#if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14)
.version = 10,
@ -242,7 +242,7 @@ static malloc_zone_t mi_malloc_zone = {
#if defined(MI_OSX_INTERPOSE) && defined(MI_SHARED_LIB_EXPORT)
// ------------------------------------------------------
// Override malloc_xxx and malloc_zone_xxx api's to use only
// Override malloc_xxx and malloc_zone_xxx api's to use only
// our mimalloc zone. Since even the loader uses malloc
// on macOS, this ensures that all allocations go through
// mimalloc (as all calls are interposed).
@ -254,7 +254,7 @@ static malloc_zone_t mi_malloc_zone = {
static inline malloc_zone_t* mi_get_default_zone(void)
{
static bool init;
if mi_unlikely(!init) {
if mi_unlikely(!init) {
init = true;
malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see <http://eatmyrandom.blogspot.com/2010/03/mallocfree-interception-on-mac-os-x.html>)
}
@ -272,7 +272,7 @@ static malloc_zone_t* mi_malloc_create_zone(vm_size_t size, unsigned flags) {
return mi_get_default_zone();
}
static malloc_zone_t* mi_malloc_default_zone (void) {
static malloc_zone_t* mi_malloc_default_zone (void) {
return mi_get_default_zone();
}
@ -292,11 +292,11 @@ static kern_return_t mi_malloc_get_all_zones (task_t task, memory_reader_t mr, v
return KERN_SUCCESS;
}
static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) {
static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) {
return (zone == NULL ? mi_malloc_zone.zone_name : zone->zone_name);
}
static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) {
static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) {
MI_UNUSED(zone); MI_UNUSED(name);
}
@ -306,7 +306,7 @@ static int mi_malloc_jumpstart(uintptr_t cookie) {
}
static void mi__malloc_fork_prepare(void) {
// nothing
// nothing
}
static void mi__malloc_fork_parent(void) {
// nothing
@ -367,13 +367,13 @@ __attribute__((used)) static const struct mi_interpose_s _mi_zone_interposes[]
MI_INTERPOSE_MI(malloc_destroy_zone),
MI_INTERPOSE_MI(malloc_get_all_zones),
MI_INTERPOSE_MI(malloc_get_zone_name),
MI_INTERPOSE_MI(malloc_jumpstart),
MI_INTERPOSE_MI(malloc_jumpstart),
MI_INTERPOSE_MI(malloc_printf),
MI_INTERPOSE_MI(malloc_set_zone_name),
MI_INTERPOSE_MI(_malloc_fork_child),
MI_INTERPOSE_MI(_malloc_fork_parent),
MI_INTERPOSE_MI(_malloc_fork_prepare),
MI_INTERPOSE_ZONE(zone_batch_free),
MI_INTERPOSE_ZONE(zone_batch_malloc),
MI_INTERPOSE_ZONE(zone_calloc),
@ -416,7 +416,7 @@ static inline malloc_zone_t* mi_get_default_zone(void)
}
#if defined(__clang__)
__attribute__((constructor(0)))
__attribute__((constructor(0)))
#else
__attribute__((constructor)) // seems not supported by g++-11 on the M1
#endif

View file

@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)"
#endif
#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32))
#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32))
#if defined(__APPLE__)
#include <AvailabilityMacros.h>
@ -43,8 +43,8 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
#define MI_FORWARD0(fun,x) MI_FORWARD(fun)
#define MI_FORWARD02(fun,x,y) MI_FORWARD(fun)
#else
// otherwise use forwarding by calling our `mi_` function
#define MI_FORWARD1(fun,x) { return fun(x); }
// otherwise use forwarding by calling our `mi_` function
#define MI_FORWARD1(fun,x) { return fun(x); }
#define MI_FORWARD2(fun,x,y) { return fun(x,y); }
#define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); }
#define MI_FORWARD0(fun,x) { fun(x); }
@ -52,8 +52,8 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
#endif
#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE)
// define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for
#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE)
// define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for
// functions that are interposed (or the interposing does not work)
#define MI_OSX_IS_INTERPOSED
@ -70,7 +70,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
};
#define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun }
#define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun)
__attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) =
{
MI_INTERPOSE_MI(malloc),
@ -84,7 +84,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
MI_INTERPOSE_MI(valloc),
MI_INTERPOSE_FUN(malloc_size,mi_malloc_size_checked),
MI_INTERPOSE_MI(malloc_good_size),
#if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
#if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
MI_INTERPOSE_MI(aligned_alloc),
#endif
#ifdef MI_OSX_ZONE
@ -128,11 +128,11 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
// cannot override malloc unless using a dll.
// we just override new/delete which does work in a static library.
#else
// On all other systems forward to our API
// On all other systems forward to our API
mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size)
mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n)
mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize)
mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p)
mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p)
#endif
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__)
@ -174,20 +174,20 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast<size_t>(al)); };
void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast<size_t>(al)); }
void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast<size_t>(al)); }
void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast<size_t>(al)); }
void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast<size_t>(al)); }
#endif
#elif (defined(__GNUC__) || defined(__clang__))
#elif (defined(__GNUC__) || defined(__clang__))
// ------------------------------------------------------
// Override by defining the mangled C++ names of the operators (as
// used by GCC and CLang).
// See <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling>
// ------------------------------------------------------
void _ZdlPv(void* p) MI_FORWARD0(mi_free,p) // delete
void _ZdaPv(void* p) MI_FORWARD0(mi_free,p) // delete[]
void _ZdlPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n)
@ -196,12 +196,12 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
void _ZdaPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); }
void _ZdlPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); }
void _ZdaPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); }
#if (MI_INTPTR_SIZE==8)
void* _Znwm(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit
void* _Znam(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit
void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
void* _ZnwmSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
void* _ZnamSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
void* _ZnwmSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); }
@ -210,7 +210,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
void* _Znwj(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit
void* _Znaj(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit
void* _ZnwjRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
void* _ZnwjSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
void* _ZnajSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); }
@ -240,22 +240,22 @@ extern "C" {
// No forwarding here due to aliasing/name mangling issues
void* valloc(size_t size) { return mi_valloc(size); }
void vfree(void* p) { mi_free(p); }
void vfree(void* p) { mi_free(p); }
size_t malloc_good_size(size_t size) { return mi_malloc_good_size(size); }
int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); }
// `aligned_alloc` is only available when __USE_ISOC11 is defined.
// Note: Conda has a custom glibc where `aligned_alloc` is declared `static inline` and we cannot
// override it, but both _ISOC11_SOURCE and __USE_ISOC11 are undefined in Conda GCC7 or GCC9.
// Fortunately, in the case where `aligned_alloc` is declared as `static inline` it
// uses internally `memalign`, `posix_memalign`, or `_aligned_malloc` so we can avoid overriding it ourselves.
#if __USE_ISOC11
#if __USE_ISOC11
void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
#endif
#endif
// no forwarding here due to aliasing/name mangling issues
void cfree(void* p) { mi_free(p); }
void cfree(void* p) { mi_free(p); }
void* pvalloc(size_t size) { return mi_pvalloc(size); }
void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); }
int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); }

View file

@ -91,7 +91,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size
#endif
return NULL;
}
*/
*/
// C11 also requires alignment to be a power-of-two (and > 0) which is checked in mi_malloc_aligned
void* p = mi_malloc_aligned(size, alignment);
mi_assert_internal(((uintptr_t)p % alignment) == 0);
@ -110,7 +110,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att
errno = EINVAL;
return EINVAL;
}
void** op = (void**)p;
void** op = (void**)p;
void* newp = mi_reallocarray(*op, count, size);
if mi_unlikely(newp == NULL) { return errno; }
*op = newp;

View file

@ -30,7 +30,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* const block = page->free;
if mi_unlikely(block == NULL) {
return _mi_malloc_generic(heap, size, zero, 0);
return _mi_malloc_generic(heap, size, zero, 0);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
@ -38,21 +38,21 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
// allow use of the block internally
// allow use of the block internally
// note: when tracking we need to avoid ever touching the MI_PADDING since
// that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`)
mi_track_mem_undefined(block, mi_page_usable_block_size(page));
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size);
_mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
_mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
}
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED
if (!page->is_zero && !zero && !mi_page_is_huge(page)) {
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
if (!page->is_zero && !zero && !mi_page_is_huge(page)) {
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
}
#elif (MI_SECURE!=0)
if (!zero) { block->next = 0; } // don't leak internal data
@ -91,7 +91,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap != NULL);
#if MI_DEBUG
#if MI_DEBUG
const uintptr_t tid = _mi_thread_id();
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
#endif
@ -232,9 +232,9 @@ static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* bloc
mi_track_mem_defined(padding,sizeof(mi_padding_t));
*delta = padding->delta;
uint32_t canary = padding->canary;
uintptr_t keys[2];
uintptr_t keys[2];
keys[0] = page->keys[0];
keys[1] = page->keys[1];
keys[1] = page->keys[1];
bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
return ok;
@ -245,7 +245,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
return (ok ? bsize - delta : 0);
}
@ -319,15 +319,15 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2)
#if (MI_STAT < 2)
MI_UNUSED(block);
#endif
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_usable_block_size(page);
const size_t bsize = mi_page_usable_block_size(page);
#if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc, usize);
#endif
#endif
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal, bsize);
#if (MI_STAT > 1)
@ -352,7 +352,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
}
#endif
#if MI_HUGE_PAGE_ABANDON
#if MI_HUGE_PAGE_ABANDON
#if (MI_STAT>0)
// maintain stats for huge objects
static void mi_stat_huge_free(const mi_page_t* page) {
@ -383,7 +383,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
mi_segment_t* const segment = _mi_page_segment(page);
if (segment->page_kind == MI_PAGE_HUGE) {
#if MI_HUGE_PAGE_ABANDON
@ -392,13 +392,13 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
_mi_segment_huge_page_free(segment, page, block);
return;
#else
// huge pages are special as they occupy the entire segment
// huge pages are special as they occupy the entire segment
// as these are large we reset the memory occupied by the page so it is available to other threads
// (as the owning thread needs to actually free the memory later).
_mi_segment_huge_page_reset(segment, page, block);
#endif
}
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
@ -484,13 +484,13 @@ void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* p
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
mi_stat_free(page, block); // stat_free may access the padding
mi_track_free(p);
_mi_free_block(page, is_local, block);
_mi_free_block(page, is_local, block);
}
// Get the segment data belonging to a pointer
// This is just a single `and` in assembly but does further checks in debug mode
// (and secure mode) if this was a valid pointer.
static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
{
MI_UNUSED(msg);
mi_assert(p != NULL);
@ -524,7 +524,7 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
return segment;
}
// Free a block
// Free a block
// fast path written carefully to prevent spilling on the stack
void mi_free(void* p) mi_attr_noexcept
{
@ -532,10 +532,10 @@ void mi_free(void* p) mi_attr_noexcept
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
const bool is_local= (_mi_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
mi_page_t* const page = _mi_segment_page_of(segment, p);
if mi_likely(is_local) { // thread-local free?
if mi_likely(page->flags.full_aligned == 0) // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
{
{
mi_block_t* const block = (mi_block_t*)p;
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
@ -546,7 +546,7 @@ void mi_free(void* p) mi_attr_noexcept
mi_track_free(p);
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
_mi_page_retire(page);
}
}
@ -558,7 +558,7 @@ void mi_free(void* p) mi_attr_noexcept
else {
// not thread-local; use generic path
_mi_free_generic(segment, page, false, p);
}
}
}
// return true if successful
@ -598,7 +598,7 @@ mi_decl_noinline static size_t mi_page_usable_aligned_size_of(const mi_segment_t
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
if (p == NULL) return 0;
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
const mi_page_t* const page = _mi_segment_page_of(segment, p);
const mi_page_t* const page = _mi_segment_page_of(segment, p);
if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
@ -679,7 +679,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) m
// Expand (or shrink) in place (or fail)
void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
#if MI_PADDING
// we do not shrink/expand with padding enabled
// we do not shrink/expand with padding enabled
MI_UNUSED(p); MI_UNUSED(newsize);
return NULL;
#else
@ -721,7 +721,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
}
mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
return _mi_heap_realloc_zero(heap, p, newsize, false);
}
mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
@ -881,9 +881,9 @@ static bool mi_try_new_handler(bool nothrow) {
#else
std::new_handler h = std::set_new_handler();
std::set_new_handler(h);
#endif
#endif
if (h==NULL) {
_mi_error_message(ENOMEM, "out of memory in 'new'");
_mi_error_message(ENOMEM, "out of memory in 'new'");
if (!nothrow) {
throw std::bad_alloc();
}
@ -914,7 +914,7 @@ static std_new_handler_t mi_get_new_handler() {
static bool mi_try_new_handler(bool nothrow) {
std_new_handler_t h = mi_get_new_handler();
if (h==NULL) {
_mi_error_message(ENOMEM, "out of memory in 'new'");
_mi_error_message(ENOMEM, "out of memory in 'new'");
if (!nothrow) {
abort(); // cannot throw in plain C, use abort
}

View file

@ -18,7 +18,7 @@ which is sometimes needed for embedded devices or shared memory for example.
(We can also employ this with WASI or `sbrk` systems to reserve large arenas
on demand and be able to reuse them efficiently).
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
#include "mimalloc-internal.h"
@ -95,7 +95,7 @@ mi_arena_id_t _mi_arena_id_none(void) {
}
static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
(arena_id == req_arena_id));
}
@ -152,7 +152,7 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
----------------------------------------------------------- */
static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
{
MI_UNUSED(arena_index);
@ -269,7 +269,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
return NULL;
}
*is_zero = true;
*memid = MI_MEMID_OS;
*memid = MI_MEMID_OS;
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, tld->stats);
if (p != NULL) { *is_pinned = *large; }
return p;
@ -333,7 +333,7 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
// todo: use reset instead of decommit on windows?
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
}
// and make it available to others again
// and make it available to others again
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!all_inuse) {
_mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
@ -372,8 +372,8 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
mi_assert_internal(is_committed);
is_committed = true;
}
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
const size_t bitmaps = (is_committed ? 2 : 3);
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
@ -411,7 +411,7 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
}
// Reserve a range of regular OS memory
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
{
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block

View file

@ -169,15 +169,15 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// between the fields. This is used in arena allocation
//--------------------------------------------------------------------------
// Try to atomically claim a sequence of `count` bits starting from the field
// Try to atomically claim a sequence of `count` bits starting from the field
// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
{
mi_assert_internal(bitmap_idx != NULL);
// check initial trailing zeros
mi_bitmap_field_t* field = &bitmap[idx];
size_t map = mi_atomic_load_relaxed(field);
size_t map = mi_atomic_load_relaxed(field);
const size_t initial = mi_clz(map); // count of initial zeros starting at idx
mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS);
if (initial == 0) return false;
@ -212,14 +212,14 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
newmap = map | initial_mask;
if ((map & initial_mask) != 0) { goto rollback; };
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
// intermediate fields
while (++field < final_field) {
newmap = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
map = 0;
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
}
// final field
mi_assert_internal(field == final_field);
map = mi_atomic_load_relaxed(field);
@ -232,7 +232,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
*bitmap_idx = mi_bitmap_index_create(idx, MI_BITMAP_FIELD_BITS - initial);
return true;
rollback:
rollback:
// roll back intermediate fields
while (--field > initial_field) {
newmap = 0;
@ -246,7 +246,7 @@ rollback:
mi_assert_internal((map & initial_mask) == initial_mask);
newmap = map & ~initial_mask;
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
}
}
// retry? (we make a recursive call instead of goto to be able to use const declarations)
if (retries < 4) {
return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
@ -311,7 +311,7 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
size_t pre_mask;
size_t mid_mask;
size_t post_mask;
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
bool all_one = true;
mi_bitmap_field_t* field = &bitmap[idx];
size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask);
@ -324,7 +324,7 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
prev = mi_atomic_and_acq_rel(field, ~post_mask);
if ((prev & post_mask) != post_mask) all_one = false;
}
return all_one;
return all_one;
}
// Set `count` bits at `bitmap_idx` to 1 atomically
@ -356,7 +356,7 @@ bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t co
}
// Returns `true` if all `count` bits were 1.
// Returns `true` if all `count` bits were 1.
// `any_ones` is `true` if there was at least one bit set to one.
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) {
size_t idx = mi_bitmap_index_field(bitmap_idx);
@ -379,7 +379,7 @@ static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_field
prev = mi_atomic_load_relaxed(field);
if ((prev & post_mask) != post_mask) all_ones = false;
if ((prev & post_mask) != 0) any_ones = true;
}
}
if (pany_ones != NULL) *pany_ones = any_ones;
return all_ones;
}

View file

@ -92,7 +92,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
mi_collect_t collect = *((mi_collect_t*)arg_collect);
_mi_page_free_collect(page, collect >= MI_FORCE);
if (mi_page_all_free(page)) {
// no more used blocks, free the page.
// no more used blocks, free the page.
// note: this will free retired pages as well.
_mi_page_free(page, pq, collect >= MI_FORCE);
}
@ -117,7 +117,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
_mi_deferred_free(heap, collect >= MI_FORCE);
// note: never reclaim on collect but leave it to threads that need storage to reclaim
// note: never reclaim on collect but leave it to threads that need storage to reclaim
if (
#ifdef NDEBUG
collect == MI_FORCE
@ -130,7 +130,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// if all memory is freed by now, all segments should be freed.
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
}
// if abandoning, mark all pages to no longer add to delayed_free
if (collect == MI_ABANDON) {
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
@ -240,7 +240,7 @@ static void mi_heap_free(mi_heap_t* heap) {
// remove ourselves from the thread local heaps list
// linear search but we expect the number of heaps to be relatively small
mi_heap_t* prev = NULL;
mi_heap_t* curr = heap->tld->heaps;
mi_heap_t* curr = heap->tld->heaps;
while (curr != heap && curr != NULL) {
prev = curr;
curr = curr->next;
@ -353,8 +353,8 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
// reduce the size of the delayed frees
_mi_heap_delayed_free_partial(from);
// transfer all pages by appending the queues; this will set a new heap field
// transfer all pages by appending the queues; this will set a new heap field
// so threads may do delayed frees in either heap for a while.
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
// so after this only the new heap will get delayed frees
@ -367,17 +367,17 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
}
mi_assert_internal(from->page_count == 0);
// and do outstanding delayed frees in the `from` heap
// and do outstanding delayed frees in the `from` heap
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
// the regular `_mi_free_delayed_block` which is safe.
_mi_heap_delayed_free_all(from);
_mi_heap_delayed_free_all(from);
#if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
#endif
// and reset the `from` heap
mi_heap_reset_pages(from);
mi_heap_reset_pages(from);
}
// Safe delete a heap without freeing any still allocated blocks in that heap.

View file

@ -173,7 +173,7 @@ typedef struct mi_thread_data_s {
// Thread meta-data is allocated directly from the OS. For
// some programs that do not use thread pools and allocate and
// destroy many OS threads, this may causes too much overhead
// destroy many OS threads, this may causes too much overhead
// per thread so we maintain a small cache of recently freed metadata.
#define TD_CACHE_SIZE (8)
@ -185,7 +185,7 @@ static mi_thread_data_t* mi_thread_data_alloc(void) {
for (int i = 0; i < TD_CACHE_SIZE; i++) {
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
if (td != NULL) {
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
if (td != NULL) {
return td;
}
@ -262,7 +262,7 @@ static bool _mi_heap_init(void) {
tld->segments.stats = &tld->stats;
tld->segments.os = &tld->os;
tld->os.stats = &tld->stats;
_mi_heap_set_default_direct(heap);
_mi_heap_set_default_direct(heap);
}
return false;
}
@ -295,9 +295,9 @@ static bool _mi_heap_done(mi_heap_t* heap) {
if (heap != &_mi_heap_main) {
_mi_heap_collect_abandon(heap);
}
// merge stats
_mi_stats_done(&heap->tld->stats);
_mi_stats_done(&heap->tld->stats);
// free if not the main thread
if (heap != &_mi_heap_main) {
@ -305,8 +305,8 @@ static bool _mi_heap_done(mi_heap_t* heap) {
mi_thread_data_free((mi_thread_data_t*)heap);
}
else {
mi_thread_data_collect(); // free cached thread metadata
#if 0
mi_thread_data_collect(); // free cached thread metadata
#if 0
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
_mi_heap_destroy_pages(heap);
@ -342,7 +342,7 @@ static void _mi_thread_done(mi_heap_t* default_heap);
// use thread local storage keys to detect thread ending
#include <windows.h>
#include <fibersapi.h>
#if (_WIN32_WINNT < 0x600) // before Windows Vista
#if (_WIN32_WINNT < 0x600) // before Windows Vista
WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
@ -397,7 +397,7 @@ void mi_thread_init(void) mi_attr_noexcept
{
// ensure our process has started already
mi_process_init();
// initialize the thread local default heap
// (this will call `_mi_heap_set_default_direct` and thus set the
// fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
@ -418,7 +418,7 @@ static void _mi_thread_done(mi_heap_t* heap) {
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
if (heap->thread_id != _mi_thread_id()) return;
// abandon the thread local heap
if (_mi_heap_done(heap)) return; // returns true if already ran
}
@ -509,11 +509,11 @@ static void mi_process_load(void) {
os_preloading = false;
mi_assert_internal(_mi_is_main_thread());
#if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521)
atexit(&mi_process_done);
atexit(&mi_process_done);
#endif
_mi_options_init();
mi_process_setup_auto_thread_done();
mi_process_init();
mi_process_init();
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
// show message from the redirector (if present)
@ -578,7 +578,7 @@ void mi_process_init(void) mi_attr_noexcept {
} else {
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
}
}
}
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
long ksize = mi_option_get(mi_option_reserve_os_memory);
if (ksize > 0) {
@ -601,7 +601,7 @@ static void mi_cdecl mi_process_done(void) {
#endif
#ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB)
#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB)
// free all memory if possible on process exit. This is not needed for a stand-alone process
// but should be done if mimalloc is statically linked into another shared library which
// is repeatedly loaded/unloaded, see issue #281.
@ -620,7 +620,7 @@ static void mi_cdecl mi_process_done(void) {
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);
}
mi_allocator_done();
mi_allocator_done();
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
os_preloading = true; // don't call the C runtime anymore
}
@ -642,7 +642,7 @@ static void mi_cdecl mi_process_done(void) {
if (!mi_is_redirected()) {
mi_thread_done();
}
}
}
return TRUE;
}

View file

@ -106,7 +106,7 @@ void _mi_options_init(void) {
for(int i = 0; i < _mi_option_last; i++ ) {
mi_option_t option = (mi_option_t)i;
long l = mi_option_get(option); MI_UNUSED(l); // initialize
// if (option != mi_option_verbose)
// if (option != mi_option_verbose)
{
mi_option_desc_t* desc = &options[option];
_mi_verbose_message("option '%s': %ld\n", desc->name, desc->value);
@ -177,7 +177,7 @@ static void mi_cdecl mi_out_stderr(const char* msg, void* arg) {
#ifdef _WIN32
// on windows with redirection, the C runtime cannot handle locale dependent output
// after the main thread closes so we use direct console output.
if (!_mi_preloading()) {
if (!_mi_preloading()) {
// _cputs(msg); // _cputs cannot be used at is aborts if it fails to lock the console
static HANDLE hcon = INVALID_HANDLE_VALUE;
if (hcon == INVALID_HANDLE_VALUE) {
@ -281,7 +281,7 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop
// inside the C runtime causes another message.
// In some cases (like on macOS) the loader already allocates which
// calls into mimalloc; if we then access thread locals (like `recurse`)
// this may crash as the access may call _tlv_bootstrap that tries to
// this may crash as the access may call _tlv_bootstrap that tries to
// (recursively) invoke malloc again to allocate space for the thread local
// variables on demand. This is why we use a _mi_preloading test on such
// platforms. However, C code generator may move the initial thread local address
@ -407,7 +407,7 @@ static _Atomic(void*) mi_error_arg; // = NULL
static void mi_error_default(int err) {
MI_UNUSED(err);
#if (MI_DEBUG>0)
#if (MI_DEBUG>0)
if (err==EFAULT) {
#ifdef _MSC_VER
__debugbreak();
@ -500,23 +500,23 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
return (len > 0 && len < result_size);
}
#elif !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0)
// On Posix systemsr use `environ` to acces environment variables
// On Posix systemsr use `environ` to acces environment variables
// even before the C runtime is initialized.
#if defined(__APPLE__) && defined(__has_include) && __has_include(<crt_externs.h>)
#include <crt_externs.h>
static char** mi_get_environ(void) {
return (*_NSGetEnviron());
}
#else
#else
extern char** environ;
static char** mi_get_environ(void) {
return environ;
}
#endif
static bool mi_getenv(const char* name, char* result, size_t result_size) {
if (name==NULL) return false;
if (name==NULL) return false;
const size_t len = strlen(name);
if (len == 0) return false;
if (len == 0) return false;
char** env = mi_get_environ();
if (env == NULL) return false;
// compare up to 256 entries
@ -530,7 +530,7 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
}
return false;
}
#else
#else
// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime
static bool mi_getenv(const char* name, char* result, size_t result_size) {
// cannot call getenv() when still initializing the C runtime.
@ -558,7 +558,7 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
#endif // !MI_USE_ENVIRON
#endif // !MI_NO_GETENV
static void mi_option_init(mi_option_desc_t* desc) {
static void mi_option_init(mi_option_desc_t* desc) {
// Read option value from the environment
char buf[64+1];
mi_strlcpy(buf, "mimalloc_", sizeof(buf));

View file

@ -99,7 +99,7 @@ static size_t os_alloc_granularity = 4096;
// if non-zero, use large page allocation
static size_t large_os_page_size = 0;
// is memory overcommit allowed?
// is memory overcommit allowed?
// set dynamically in _mi_os_init (and if true we use MAP_NORESERVE)
static bool os_overcommit = true;
@ -150,7 +150,7 @@ typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E {
MiMemExtendedParameterUserPhysicalHandle,
MiMemExtendedParameterAttributeFlags,
MiMemExtendedParameterMax
} MI_MEM_EXTENDED_PARAMETER_TYPE;
} MI_MEM_EXTENDED_PARAMETER_TYPE;
typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S {
struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type;
@ -216,7 +216,7 @@ static bool mi_win_enable_large_os_pages(void)
return (ok!=0);
}
void _mi_os_init(void)
void _mi_os_init(void)
{
os_overcommit = false;
// get the page size
@ -277,9 +277,9 @@ static void os_detect_overcommit(void) {
size_t olen = sizeof(val);
if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) {
os_overcommit = (val != 0);
}
}
#else
// default: overcommit is true
// default: overcommit is true
#endif
}
@ -317,10 +317,10 @@ static int mi_madvise(void* addr, size_t length, int advice) {
static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
// Return a MI_SEGMENT_SIZE aligned address that is probably available.
// If this returns NULL, the OS will determine the address but on some OS's that may not be
// If this returns NULL, the OS will determine the address but on some OS's that may not be
// properly aligned which can be more costly as it needs to be adjusted afterwards.
// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization;
// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses
// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization;
// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses
// in the middle of the 2TiB - 6TiB address range (see issue #372))
#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start
@ -394,12 +394,12 @@ static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats
#endif
if (was_committed) { _mi_stat_decrease(&stats->committed, size); }
_mi_stat_decrease(&stats->reserved, size);
return !err;
return !err;
}
/* -----------------------------------------------------------
Raw allocation on Windows (VirtualAlloc)
Raw allocation on Windows (VirtualAlloc)
-------------------------------------------------------------- */
#ifdef _WIN32
@ -414,7 +414,7 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
_mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags);
// fall through on error
}
}
}
#endif
// on modern Windows try use VirtualAlloc2 for aligned allocation
if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
@ -472,12 +472,12 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
-------------------------------------------------------------- */
#elif defined(MI_USE_SBRK) || defined(__wasi__)
#if defined(MI_USE_SBRK)
#if defined(MI_USE_SBRK)
static void* mi_memory_grow( size_t size ) {
void* p = sbrk(size);
if (p == (void*)(-1)) return NULL;
#if !defined(__wasi__) // on wasi this is always zero initialized already (?)
memset(p,0,size);
memset(p,0,size);
#endif
return p;
}
@ -485,8 +485,8 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
static void* mi_memory_grow( size_t size ) {
size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size()))
: __builtin_wasm_memory_size(0));
if (base == SIZE_MAX) return NULL;
return (void*)(base * _mi_os_page_size());
if (base == SIZE_MAX) return NULL;
return (void*)(base * _mi_os_page_size());
}
#endif
@ -498,7 +498,7 @@ static void* mi_heap_grow(size_t size, size_t try_alignment) {
void* p = NULL;
if (try_alignment <= 1) {
// `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now)
#if defined(MI_USE_PTHREADS)
#if defined(MI_USE_PTHREADS)
pthread_mutex_lock(&mi_heap_grow_mutex);
#endif
p = mi_memory_grow(size);
@ -520,7 +520,7 @@ static void* mi_heap_grow(size_t size, size_t try_alignment) {
if (current != NULL) {
void* aligned_current = mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space
alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size());
base = mi_memory_grow(alloc_size);
base = mi_memory_grow(alloc_size);
}
}
#if defined(MI_USE_PTHREADS)
@ -537,7 +537,7 @@ static void* mi_heap_grow(size_t size, size_t try_alignment) {
}
}
if (p == NULL) {
_mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment);
_mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment);
errno = ENOMEM;
return NULL;
}
@ -548,10 +548,10 @@ static void* mi_heap_grow(size_t size, size_t try_alignment) {
/* -----------------------------------------------------------
Raw allocation on Unix's (mmap)
-------------------------------------------------------------- */
#else
#else
#define MI_OS_USE_MMAP
static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
MI_UNUSED(try_alignment);
MI_UNUSED(try_alignment);
#if defined(MAP_ALIGNED) // BSD
if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
size_t n = mi_bsr(try_alignment);
@ -582,7 +582,7 @@ static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int pr
#endif
// regular mmap
void* p = mmap(addr, size, protect_flags, flags, fd, 0);
if (p!=MAP_FAILED) return p;
if (p!=MAP_FAILED) return p;
// failed to allocate
return NULL;
}
@ -599,7 +599,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
int fd = -1;
if (_mi_os_has_overcommit()) {
flags |= MAP_NORESERVE;
}
}
#if defined(PROT_MAX)
protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
#endif
@ -688,7 +688,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
*is_large = true;
}
}
}
#endif
}
}
@ -756,7 +756,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
// try first with a hint (this will be aligned directly on Win 10+ or BSD)
void* p = mi_os_mem_alloc(size, alignment, commit, allow_large, is_large, stats);
if (p == NULL) return NULL;
// if not aligned, free it, overallocate, and unmap around it
if (((uintptr_t)p % alignment != 0)) {
mi_os_mem_free(p, size, commit, stats);
@ -768,7 +768,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
// over-allocate uncommitted (virtual) memory
p = mi_os_mem_alloc(over_size, 0 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, stats);
if (p == NULL) return NULL;
// set p to the aligned part in the full region
// note: this is dangerous on Windows as VirtualFree needs the actual region pointer
// but in mi_os_mem_free we handle this (hopefully exceptional) situation.
@ -844,7 +844,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* lar
OS aligned allocation with an offset. This is used
for large alignments > MI_ALIGNMENT_MAX. We use a large mimalloc
page where the object can be aligned at an offset from the start of the segment.
As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
to use the actual start of the memory region.
----------------------------------------------------------- */
@ -877,7 +877,7 @@ void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_of
mi_assert(align_offset <= MI_SEGMENT_SIZE);
const size_t extra = _mi_align_up(align_offset, alignment) - align_offset;
void* start = (uint8_t*)p - extra;
_mi_os_free_ex(start, size + extra, was_committed, tld_stats);
_mi_os_free_ex(start, size + extra, was_committed, tld_stats);
}
/* -----------------------------------------------------------
@ -957,7 +957,7 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
// commit: just change the protection
err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; }
}
}
else {
// decommit: use mmap with MAP_FIXED to discard the existing memory (and reduce rss)
const int fd = mi_unix_mmap_fd();
@ -967,10 +967,10 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
#else
// Linux, macOSX and others.
if (commit) {
// commit: ensure we can access the area
// commit: ensure we can access the area
err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; }
}
}
else {
#if defined(MADV_DONTNEED) && MI_DEBUG == 0 && MI_SECURE == 0
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
@ -1007,7 +1007,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
}
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, true /* conservative */, is_zero, stats);
}
@ -1046,7 +1046,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
int oadvice = (int)mi_atomic_load_relaxed(&advice);
int err;
while ((err = mi_madvise(start, csize, oadvice)) != 0 && errno == EAGAIN) { errno = 0; };
if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
// if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED);
err = mi_madvise(start, csize, MADV_DONTNEED);
@ -1079,7 +1079,7 @@ bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stat
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
*is_zero = false;
return mi_os_resetx(addr, size, false, stats);
return mi_os_resetx(addr, size, false, stats);
}
@ -1188,7 +1188,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
params[0].Arg.ULong = (unsigned)numa_node;
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, params, 1);
}
// otherwise use regular virtual alloc on older windows
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
}
@ -1337,7 +1337,7 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) {
/* ----------------------------------------------------------------------------
Support NUMA aware allocation
-----------------------------------------------------------------------------*/
#ifdef _WIN32
#ifdef _WIN32
static size_t mi_os_numa_nodex(void) {
USHORT numa_node = 0;
if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) {
@ -1353,7 +1353,7 @@ static size_t mi_os_numa_nodex(void) {
DWORD pnum = GetCurrentProcessorNumber();
UCHAR nnode = 0;
BOOL ok = GetNumaProcessorNode((UCHAR)pnum, &nnode);
if (ok) numa_node = nnode;
if (ok) numa_node = nnode;
}
return numa_node;
}
@ -1457,7 +1457,7 @@ size_t _mi_os_numa_node_count_get(void) {
else {
count = mi_os_numa_node_countx(); // or detect dynamically
if (count == 0) count = 1;
}
}
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
_mi_verbose_message("using %zd numa regions\n", count);
}

View file

@ -76,7 +76,7 @@ static inline uint8_t mi_bin(size_t size) {
bin = MI_BIN_HUGE;
}
else {
#if defined(MI_ALIGN4W)
#if defined(MI_ALIGN4W)
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
#endif
wsize--;
@ -303,7 +303,7 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue
for (mi_page_t* page = append->first; page != NULL; page = page->next) {
// inline `mi_page_set_heap` to avoid wrong assertion during absorption;
// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
// that after appending only the new heap will be used for delayed free operations.

View file

@ -112,7 +112,7 @@ bool _mi_page_is_valid(mi_page_t* page) {
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
#if MI_HUGE_PAGE_ABANDON
if (segment->page_kind != MI_PAGE_HUGE)
if (segment->page_kind != MI_PAGE_HUGE)
#endif
{
mi_page_queue_t* pq = mi_page_queue_of(page);
@ -134,7 +134,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
mi_thread_free_t tfreex;
mi_delayed_t old_delay;
mi_thread_free_t tfree;
mi_thread_free_t tfree;
size_t yield_count = 0;
do {
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
@ -262,7 +262,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
mi_assert_internal(pq != NULL);
mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size);
#endif
#endif
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
@ -413,7 +413,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld);
}
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
#define MI_RETIRE_CYCLES (8)
// Retire a page with no more used blocks
@ -603,7 +603,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
if (page->capacity >= page->reserved) return;
size_t page_size;
//uint8_t* page_start =
//uint8_t* page_start =
_mi_page_start(_mi_page_segment(page), page, &page_size);
mi_stat_counter_increase(tld->stats.pages_extended, 1);
@ -615,7 +615,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
mi_assert_internal(max_extend > 0);
if (extend > max_extend) {
// ensure we don't touch memory beyond the page to reduce page commit.
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
@ -734,7 +734,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
page = mi_page_queue_find_free_ex(heap, pq, false);
page = mi_page_queue_find_free_ex(heap, pq, false);
}
}
else {
@ -752,17 +752,17 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
mi_page_extend_free(heap, page, heap->tld);
mi_assert_internal(mi_page_immediate_available(page));
}
else
else
#endif
{
_mi_page_free_collect(page,false);
}
if (mi_page_immediate_available(page)) {
page->retire_expire = 0;
return page; // fast path
@ -825,7 +825,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
mi_page_set_heap(page, NULL);
#endif
#endif
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
mi_heap_stat_increase(heap, giant, bsize);
@ -844,7 +844,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
if mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
@ -863,7 +863,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
// very large requested alignments in which case we use a huge segment.
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
{
@ -891,7 +891,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
}
if mi_unlikely(page == NULL) { // out of memory
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
return NULL;
}

View file

@ -169,8 +169,8 @@ If we cannot get good randomness, we fall back to weak randomness based on a tim
#if defined(_WIN32)
#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus)
// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using
// dynamic overriding, we observed it can raise an exception when compiled with C++, and
// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using
// dynamic overriding, we observed it can raise an exception when compiled with C++, and
// sometimes deadlocks when also running under the VS debugger.
// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom.
// To be continued..
@ -203,7 +203,7 @@ static bool os_random_buf(void* buf, size_t buf_len) {
static bool os_random_buf(void* buf, size_t buf_len) {
#if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
// We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf
// may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
// may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
#else
// fall back on older macOS
@ -281,7 +281,7 @@ static bool os_random_buf(void* buf, size_t buf_len) {
uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
#if defined(_WIN32)
LARGE_INTEGER pcount;
QueryPerformanceCounter(&pcount);
@ -325,7 +325,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
}
void _mi_random_init(mi_random_ctx_t* ctx) {
mi_random_init_ex(ctx, false);
mi_random_init_ex(ctx, false);
}
void _mi_random_init_weak(mi_random_ctx_t * ctx) {

View file

@ -16,8 +16,8 @@ We need this memory layer between the raw OS calls because of:
1. on `sbrk` like systems (like WebAssembly) we need our own memory maps in order
to reuse memory effectively.
2. It turns out that for large objects, between 1MiB and 32MiB (?), the cost of
an OS allocation/free is still (much) too expensive relative to the accesses
in that object :-( (`malloc-large` tests this). This means we need a cheaper
an OS allocation/free is still (much) too expensive relative to the accesses
in that object :-( (`malloc-large` tests this). This means we need a cheaper
way to reuse memory.
3. This layer allows for NUMA aware allocation.
@ -59,7 +59,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offs
// Constants
#if (MI_INTPTR_SIZE==8)
#define MI_HEAP_REGION_MAX_SIZE (256 * MI_GiB) // 64KiB for the region map
#define MI_HEAP_REGION_MAX_SIZE (256 * MI_GiB) // 64KiB for the region map
#elif (MI_INTPTR_SIZE==4)
#define MI_HEAP_REGION_MAX_SIZE (3 * MI_GiB) // ~ KiB for the region map
#else
@ -70,11 +70,11 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offs
#define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_BITMAP_FIELD_BITS) // 256MiB (64MiB on 32 bits)
#define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE) // 1024 (48 on 32 bits)
#define MI_REGION_MAX_OBJ_BLOCKS (MI_REGION_MAX_BLOCKS/4) // 64MiB
#define MI_REGION_MAX_OBJ_SIZE (MI_REGION_MAX_OBJ_BLOCKS*MI_SEGMENT_SIZE)
#define MI_REGION_MAX_OBJ_SIZE (MI_REGION_MAX_OBJ_BLOCKS*MI_SEGMENT_SIZE)
// Region info
// Region info
typedef union mi_region_info_u {
size_t value;
size_t value;
struct {
bool valid; // initialized?
bool is_large:1; // allocated in fixed large/huge OS pages
@ -88,7 +88,7 @@ typedef union mi_region_info_u {
// a bit map with one bit per MI_SEGMENT_SIZE (4MiB) block.
typedef struct mem_region_s {
_Atomic(size_t) info; // mi_region_info_t.value
_Atomic(void*) start; // start of the memory area
_Atomic(void*) start; // start of the memory area
mi_bitmap_field_t in_use; // bit per in-use block
mi_bitmap_field_t dirty; // track if non-zero per block
mi_bitmap_field_t commit; // track if committed per block
@ -101,7 +101,7 @@ typedef struct mem_region_s {
static mem_region_t regions[MI_REGION_MAX];
// Allocated regions
static _Atomic(size_t) regions_count; // = 0;
static _Atomic(size_t) regions_count; // = 0;
/* ----------------------------------------------------------------------------
@ -136,7 +136,7 @@ mi_decl_nodiscard bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
static void* mi_region_blocks_start(const mem_region_t* region, mi_bitmap_index_t bit_idx) {
uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t, &((mem_region_t*)region)->start);
mi_assert_internal(start != NULL);
return (start + (bit_idx * MI_SEGMENT_SIZE));
return (start + (bit_idx * MI_SEGMENT_SIZE));
}
static size_t mi_memid_create(mem_region_t* region, mi_bitmap_index_t bit_idx) {
@ -205,7 +205,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
_mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL);
mi_atomic_store_ptr_release(void,&r->start, start);
// and share it
// and share it
mi_region_info_t info;
info.value = 0; // initialize the full union to zero
info.x.valid = true;
@ -242,7 +242,7 @@ static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, boo
static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large, mem_region_t** region, mi_bitmap_index_t* bit_idx, mi_os_tld_t* tld)
{
// try all regions for a free slot
// try all regions for a free slot
const size_t count = mi_atomic_load_relaxed(&regions_count); // monotonic, so ok to be relaxed
size_t idx = tld->region_idx; // Or start at 0 to reuse low addresses? Starting at 0 seems to increase latency though
for (size_t visited = 0; visited < count; visited++, idx++) {
@ -276,7 +276,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
return NULL;
}
}
// ------------------------------------------------
// found a region and claimed `blocks` at `bit_idx`, initialize them now
mi_assert_internal(region != NULL);
@ -288,7 +288,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
mi_assert_internal(!(info.x.is_large && !*large));
mi_assert_internal(start != NULL);
*is_zero = _mi_bitmap_claim(&region->dirty, 1, blocks, bit_idx, NULL);
*is_zero = _mi_bitmap_claim(&region->dirty, 1, blocks, bit_idx, NULL);
*large = info.x.is_large;
*is_pinned = info.x.is_pinned;
*memid = mi_memid_create(region, bit_idx);
@ -307,20 +307,20 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
mi_bitmap_unclaim(&region->in_use, 1, blocks, bit_idx);
return NULL;
}
if (commit_zero) *is_zero = true;
if (commit_zero) *is_zero = true;
}
}
else {
// no need to commit, but check if already fully committed
*commit = _mi_bitmap_is_claimed(&region->commit, 1, blocks, bit_idx);
}
}
mi_assert_internal(!*commit || _mi_bitmap_is_claimed(&region->commit, 1, blocks, bit_idx));
// unreset reset blocks
if (_mi_bitmap_is_any_claimed(&region->reset, 1, blocks, bit_idx)) {
// some blocks are still reset
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
mi_bitmap_unclaim(&region->reset, 1, blocks, bit_idx);
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
bool reset_zero = false;
@ -329,13 +329,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
}
}
mi_assert_internal(!_mi_bitmap_is_any_claimed(&region->reset, 1, blocks, bit_idx));
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
if (*commit) { ((uint8_t*)p)[0] = 0; }
#endif
// and return the allocation
mi_assert_internal(p != NULL);
// and return the allocation
mi_assert_internal(p != NULL);
return p;
}
@ -354,7 +354,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t align_offset,
*is_zero = false;
*is_pinned = false;
bool default_large = false;
if (large==NULL) large = &default_large; // ensure `large != NULL`
if (large==NULL) large = &default_large; // ensure `large != NULL`
if (size == 0) return NULL;
size = _mi_align_up(size, _mi_os_page_size());
@ -363,7 +363,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t align_offset,
size_t arena_memid;
const size_t blocks = mi_region_block_count(size);
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
if (p == NULL) {
_mi_warning_message("unable to allocate from region: size %zu\n", size);
}
@ -428,9 +428,9 @@ void _mi_mem_free(void* p, size_t size, size_t alignment, size_t align_offset, s
}
// reset the blocks to reduce the working set.
if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
&& (mi_option_is_enabled(mi_option_eager_commit) ||
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
{
bool any_unreset;
_mi_bitmap_claim(&region->reset, 1, blocks, bit_idx, &any_unreset);
@ -438,7 +438,7 @@ void _mi_mem_free(void* p, size_t size, size_t alignment, size_t align_offset, s
_mi_abandoned_await_readers(); // ensure no more pending write (in case reset = decommit)
_mi_mem_reset(p, blocks * MI_SEGMENT_SIZE, tld);
}
}
}
// and unclaim
bool all_unclaimed = mi_bitmap_unclaim(&region->in_use, 1, blocks, bit_idx);
@ -467,7 +467,7 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
memset((void*)&regions[i], 0, sizeof(mem_region_t)); // cast to void* to avoid atomic warning
// and release the whole region
mi_atomic_store_release(&region->info, (size_t)0);
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
_mi_abandoned_await_readers(); // ensure no pending reads
_mi_arena_free(start, MI_REGION_SIZE, MI_SEGMENT_ALIGN, 0, arena_memid, (~commit == 0), tld->stats);
}

View file

@ -193,8 +193,8 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
mi_assert_internal(((uintptr_t)segment + segment->segment_info_size) % os_psize == 0);
mi_segment_protect_range((uint8_t*)segment + segment->segment_info_size - os_psize, os_psize, protect);
#if (MI_SECURE >= 2)
if (segment->capacity == 1)
#endif
if (segment->capacity == 1)
#endif
{
// and protect the last (or only) page too
mi_assert_internal(MI_SECURE <= 1 || segment->page_kind >= MI_PAGE_LARGE);
@ -221,7 +221,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
}
}
}
#endif
#endif
}
}
@ -404,11 +404,11 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
// for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
size_t adjust = block_size - ((uintptr_t)p % block_size);
if (psize - adjust >= block_size) {
if (adjust < block_size) {
if (adjust < block_size) {
p += adjust;
psize -= adjust;
if (pre_size != NULL) *pre_size = adjust;
}
}
mi_assert_internal((uintptr_t)p % block_size == 0);
}
}
@ -481,7 +481,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
}
// called by threads that are terminating to free cached segments
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
MI_UNUSED_RELEASE(tld);
#if MI_DEBUG>=2
if (!_mi_is_main_thread()) {
@ -567,15 +567,15 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
const bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
bool commit = eager; // || (page_kind >= MI_PAGE_LARGE);
bool is_zero = false;
// Allocate the segment from the OS (segment_size can change due to alignment)
mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, pre_size, info_size, &segment_size, &is_zero, &commit, tld, os_tld);
if (segment == NULL) return NULL;
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
// zero the segment info (but not the `mem` fields)
ptrdiff_t ofs = offsetof(mi_segment_t, next);
memset((uint8_t*)segment + ofs, 0, info_size - ofs);
@ -588,7 +588,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
segment->pages[i].is_committed = commit;
segment->pages[i].is_zero_init = is_zero;
}
// initialize
segment->page_kind = page_kind;
segment->capacity = capacity;
@ -646,7 +646,7 @@ static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg
// check commit
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_pinned);
mi_assert_internal(!page->is_reset);
mi_assert_internal(!page->is_reset);
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
@ -663,7 +663,7 @@ static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg
// check reset
if (page->is_reset) {
mi_assert_internal(!segment->mem_is_pinned);
bool ok = mi_page_unreset(segment, page, 0, tld);
bool ok = mi_page_unreset(segment, page, 0, tld);
if (!ok) {
page->segment_in_use = false;
segment->used--;
@ -809,7 +809,7 @@ static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // =
static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
// Maintain these for debug purposes (these counts may be a bit off)
static mi_decl_cache_align _Atomic(size_t) abandoned_count;
static mi_decl_cache_align _Atomic(size_t) abandoned_count;
static mi_decl_cache_align _Atomic(size_t) abandoned_visited_count;
// We also maintain a count of current readers of the abandoned list
@ -1086,7 +1086,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
{
*reclaimed = false;
mi_segment_t* segment;
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times
while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
segment->abandoned_visits++;
bool all_pages_free;
@ -1126,7 +1126,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
{
mi_assert_internal(page_kind <= MI_PAGE_LARGE);
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
// 1. try to reclaim an abandoned segment
bool reclaimed;
mi_segment_t* segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
@ -1220,7 +1220,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
#if MI_HUGE_PAGE_ABANDON
segment->thread_id = 0; // huge pages are immediately abandoned
mi_segments_track_size(-(long)segment->segment_size, tld);
#endif
#endif
mi_page_t* page = mi_segment_find_free(segment, tld);
mi_assert_internal(page != NULL);
@ -1272,8 +1272,8 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
#endif
}
#else
// reset memory of a huge block from another thread
#else
// reset memory of a huge block from another thread
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
mi_assert_internal(segment->page_kind == MI_PAGE_HUGE);
mi_assert_internal(segment == _mi_page_segment(page));
@ -1282,7 +1282,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
if (!segment->mem_is_pinned && page->is_committed) {
const size_t usize = mi_usable_size(block) - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
_mi_os_reset(p, usize, &_mi_stats_main);
_mi_os_reset(p, usize, &_mi_stats_main);
}
}
#endif

View file

@ -21,7 +21,7 @@ terms of the MIT license. A copy of the license can be found in the file
static bool mi_is_in_main(void* stat) {
return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
&& (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
&& (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
}
static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
@ -51,7 +51,7 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
}
}
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
if (mi_is_in_main(stat)) {
mi_atomic_addi64_relaxed( &stat->count, 1 );
mi_atomic_addi64_relaxed( &stat->total, (int64_t)amount );
@ -77,7 +77,7 @@ static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64
mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit);
mi_atomic_addi64_relaxed( &stat->current, src->current * unit);
mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit);
// peak scores do not work across threads..
// peak scores do not work across threads..
mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit);
}
@ -129,11 +129,11 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
Display statistics
----------------------------------------------------------- */
// unit > 0 : size in binary bytes
// unit > 0 : size in binary bytes
// unit == 0: count as decimal
// unit < 0 : count in binary
static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) {
char buf[32]; buf[0] = 0;
char buf[32]; buf[0] = 0;
int len = 32;
const char* suffix = (unit <= 0 ? " " : "B");
const int64_t base = (unit == 0 ? 1000 : 1024);
@ -146,7 +146,7 @@ static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void*
}
}
else {
int64_t divider = base;
int64_t divider = base;
const char* magnitude = "K";
if (pos >= divider*base) { divider *= base; magnitude = "M"; }
if (pos >= divider*base) { divider *= base; magnitude = "G"; }
@ -208,7 +208,7 @@ static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64
else {
mi_print_amount(stat->peak, 1, out, arg);
mi_print_amount(stat->allocated, 1, out, arg);
_mi_fprintf(out, arg, "%11s", " "); // no freed
_mi_fprintf(out, arg, "%11s", " "); // no freed
mi_print_amount(stat->current, 1, out, arg);
_mi_fprintf(out, arg, "\n");
}
@ -225,7 +225,7 @@ static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg
}
static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) {
const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count));
const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count));
const long avg_whole = (long)(avg_tens/10);
const long avg_frac1 = (long)(avg_tens%10);
_mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1);
@ -265,7 +265,7 @@ typedef struct buffered_s {
mi_output_fun* out; // original output function
void* arg; // and state
char* buf; // local buffer of at least size `count+1`
size_t used; // currently used chars `used <= count`
size_t used; // currently used chars `used <= count`
size_t count; // total chars available for output
} buffered_t;
@ -336,7 +336,7 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
mi_stat_print(&stats->threads, "threads", -1, out, arg);
mi_stat_counter_print_avg(&stats->searches, "searches", out, arg);
_mi_fprintf(out, arg, "%10s: %7zu\n", "numa nodes", _mi_os_numa_node_count());
mi_msecs_t elapsed;
mi_msecs_t user_time;
mi_msecs_t sys_time;
@ -354,7 +354,7 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
_mi_fprintf(out, arg, ", commit: ");
mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s");
}
_mi_fprintf(out, arg, "\n");
_mi_fprintf(out, arg, "\n");
}
static mi_msecs_t mi_process_start; // = 0
@ -414,7 +414,7 @@ static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) {
mfreq.QuadPart = f.QuadPart/1000LL;
if (mfreq.QuadPart == 0) mfreq.QuadPart = 1;
}
return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart);
return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart);
}
mi_msecs_t _mi_clock_now(void) {
@ -429,7 +429,7 @@ mi_msecs_t _mi_clock_now(void) {
struct timespec t;
#ifdef CLOCK_MONOTONIC
clock_gettime(CLOCK_MONOTONIC, &t);
#else
#else
clock_gettime(CLOCK_REALTIME, &t);
#endif
return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000);
@ -476,7 +476,7 @@ static mi_msecs_t filetime_msecs(const FILETIME* ftime) {
return msecs;
}
static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults)
static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults)
{
*elapsed = _mi_clock_end(mi_process_start);
FILETIME ct;
@ -492,7 +492,7 @@ static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msec
*peak_rss = (size_t)info.PeakWorkingSetSize;
*current_commit = (size_t)info.PagefileUsage;
*peak_commit = (size_t)info.PeakPagefileUsage;
*page_faults = (size_t)info.PageFaultCount;
*page_faults = (size_t)info.PageFaultCount;
}
#elif !defined(__wasi__) && (defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__))
@ -525,7 +525,7 @@ static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msec
// estimate commit using our stats
*peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));
*current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
*current_rss = *current_commit; // estimate
*current_rss = *current_commit; // estimate
#if defined(__HAIKU__)
// Haiku does not have (yet?) a way to
// get these stats per process
@ -546,7 +546,7 @@ static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msec
}
#else
*peak_rss = rusage.ru_maxrss * 1024; // Linux reports in KiB
#endif
#endif
}
#else
@ -578,7 +578,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
size_t peak_rss0 = 0;
size_t current_commit0 = 0;
size_t peak_commit0 = 0;
size_t page_faults0 = 0;
size_t page_faults0 = 0;
mi_stat_process_info(&elapsed,&utime, &stime, &current_rss0, &peak_rss0, &current_commit0, &peak_commit0, &page_faults0);
if (elapsed_msecs!=NULL) *elapsed_msecs = (elapsed < 0 ? 0 : (elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)elapsed : PTRDIFF_MAX));
if (user_msecs!=NULL) *user_msecs = (utime < 0 ? 0 : (utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)utime : PTRDIFF_MAX));
@ -589,4 +589,3 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
if (peak_commit!=NULL) *peak_commit = peak_commit0;
if (page_faults!=NULL) *page_faults = page_faults0;
}