Use __wasm__ instead of __wasi__ for wasm targets

This commit is contained in:
Cheng Shao 2023-08-28 09:33:56 +00:00
parent 10efe291af
commit 1338edf2cf
11 changed files with 109 additions and 114 deletions

View file

@ -264,7 +264,7 @@ int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
#if defined(__wasi__)
#if defined(__wasm__)
// forward __libc interface (see PR #667)
void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size)
void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size)

View file

@ -58,7 +58,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
else {
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
}
}
}
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
@ -113,7 +113,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
if (size == 0) { size = sizeof(void*); }
#endif
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
@ -346,7 +346,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2)
#if (MI_STAT < 2)
MI_UNUSED(block);
#endif
mi_heap_t* const heap = mi_heap_get_default();
@ -354,7 +354,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc, usize);
#endif
#endif
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal, bsize);
#if (MI_STAT > 1)
@ -366,7 +366,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
}
else {
mi_heap_stat_decrease(heap, huge, bsize);
}
}
}
#else
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
@ -405,7 +405,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
// huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page);
if (segment->kind == MI_SEGMENT_HUGE) {
@ -421,7 +421,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
_mi_segment_huge_page_reset(segment, page, block);
#endif
}
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
@ -823,7 +823,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_
return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
}
#ifndef __wasi__
#ifndef __wasm__
// `realpath` using mi_malloc
#ifdef _WIN32
#ifndef PATH_MAX

View file

@ -13,7 +13,7 @@ threads and need to be accessed using atomic operations.
Arenas are used to for huge OS page (1GiB) reservations or for reserving
OS memory upfront which can be improve performance or is sometimes needed
on embedded devices. We can also employ this with WASI or `sbrk` systems
on embedded devices. We can also employ this with wasm or `sbrk` systems
to reserve large arenas upfront and be able to reuse the memory more effectively.
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
@ -48,13 +48,13 @@ typedef struct mi_arena_s {
size_t meta_size; // size of the arena structure itself (including its bitmaps)
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node
bool exclusive; // only allow allocations if specifically for this arena
bool exclusive; // only allow allocations if specifically for this arena
bool is_large; // memory area consists of large- or huge OS pages (always committed)
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
} mi_arena_t;
@ -103,7 +103,7 @@ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
}
/* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the
Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */
@ -211,7 +211,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
{
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true;
};
return false;
@ -231,7 +231,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
mi_bitmap_index_t bitmap_index;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it!
// claimed it!
void* p = mi_arena_block_start(arena, bitmap_index);
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
memid->is_pinned = arena->memid.is_pinned;
@ -271,21 +271,21 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// no need to commit, but check if already fully committed
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
}
return p;
}
// allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
MI_UNUSED_RELEASE(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t bcount = mi_block_count_of_size(size);
const size_t bcount = mi_block_count_of_size(size);
const size_t arena_index = mi_arena_id_index(arena_id);
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
mi_assert_internal(size <= mi_arena_block_size(bcount));
// Check arena suitability
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
if (arena == NULL) return NULL;
@ -305,7 +305,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
@ -313,9 +313,9 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
if mi_likely(max_arena == 0) return NULL;
if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested
// try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) {
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
@ -323,7 +323,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
}
else {
// try numa affine allocation
for (size_t i = 0; i < max_arena; i++) {
for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
@ -351,22 +351,22 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
if (arena_reserve == 0) return false;
if (!_mi_os_has_virtual_reserve()) {
if (!_mi_os_has_virtual_reserve()) {
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
}
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
if (arena_count >= 8 && arena_count <= 128) {
arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
}
}
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
// commit eagerly?
bool arena_commit = false;
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
}
}
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
@ -381,9 +381,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena
// otherwise, try to first eagerly reserve a new arena
if (req_arena_id == _mi_arena_id_none()) {
mi_arena_id_t arena_id = 0;
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
@ -400,14 +400,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
errno = ENOMEM;
return NULL;
}
// finally, fall back to the OS
if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
}
else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
}
}
}
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
@ -443,22 +443,22 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned);
const size_t size = mi_arena_block_size(blocks);
void* const p = mi_arena_block_start(arena, bitmap_idx);
void* const p = mi_arena_block_start(arena, bitmap_idx);
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
// some blocks are not committed -- this can happen when a partially committed block is freed
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
_mi_stat_increase(&stats->committed, size);
}
// clear the purged blocks
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
// update committed bitmap
@ -476,7 +476,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
if (_mi_preloading() || delay == 0) {
// decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats);
mi_arena_purge(arena, bitmap_idx, blocks, stats);
}
else {
// schedule decommit
@ -518,7 +518,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
}
// returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
{
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
@ -527,10 +527,10 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
// reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
// potential purges scheduled, walk through the bitmap
bool any_purged = false;
bool full_purge = true;
bool full_purge = true;
for (size_t i = 0; i < arena->field_count; i++) {
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
if (purge != 0) {
@ -581,7 +581,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
// allow only one thread to purge at a time
static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard)
mi_atomic_guard(&purge_guard)
{
mi_msecs_t now = _mi_clock_now();
size_t max_purge_count = (visit_all ? max_arena : 1);
@ -594,7 +594,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
}
}
}
}
}
}
@ -608,7 +608,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
if (p==NULL) return;
if (size==0) return;
const bool all_committed = (committed_size == size);
if (mi_memkind_is_os(memid.memkind)) {
// was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) {
@ -626,7 +626,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL);
const size_t blocks = mi_block_count_of_size(size);
// checks
if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
@ -648,7 +648,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
else {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
if (!all_committed) {
// mark the entire range as no longer committed (so we recommit the full range when re-using)
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
@ -663,9 +663,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts).
}
// (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
}
// and make it available to others again
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!all_inuse) {
@ -690,9 +690,9 @@ static void mi_arenas_unsafe_destroy(void) {
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) {
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
}
else {
new_max_arena = i;
@ -715,7 +715,7 @@ void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
mi_arenas_unsafe_destroy();
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
@ -723,8 +723,8 @@ bool _mi_arena_contains(const void* p) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
return true;
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
return true;
}
}
return false;
@ -768,7 +768,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
mi_memid_t meta_memid;
mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
if (arena == NULL) return false;
// already zero'd due to os_alloc
// _mi_memzero(arena, asize);
arena->id = _mi_arena_id_none();
@ -785,12 +785,12 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
arena->search_idx = 0;
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
// initialize committed bitmap?
if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
}
// and claim leftover blocks if needed (so we never allocate there)
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
mi_assert_internal(post >= 0);
@ -939,4 +939,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
return err;
}

View file

@ -14,9 +14,9 @@ terms of the MIT license. A copy of the license can be found in the file
#elif defined(__APPLE__)
#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c)
#elif defined(__wasi__)
#elif defined(__wasm__)
#define MI_USE_SBRK
#include "wasi/prim.c" // memory-grow or sbrk (Wasm)
#include "wasm/prim.c" // memory-grow or sbrk (Wasm)
#else
#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)

View file

@ -3,7 +3,7 @@
This is the portability layer where all primitives needed from the OS are defined.
- `include/mimalloc/prim.h`: primitive portability API definition.
- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform
- `prim.c`: Selects one of `unix/prim.c`, `wasm/prim.c`, or `windows/prim.c` depending on the host platform
(and on macOS, `osx/prim.c` defers to `unix/prim.c`).
Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's.
Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's.

View file

@ -57,7 +57,7 @@ terms of the MIT license. A copy of the license can be found in the file
//------------------------------------------------------------------------------------
// Use syscalls for some primitives to allow for libraries that override open/read/close etc.
// and do allocation themselves; using syscalls prevents recursion when mimalloc is
// and do allocation themselves; using syscalls prevents recursion when mimalloc is
// still initializing (issue #713)
//------------------------------------------------------------------------------------
@ -120,7 +120,7 @@ static bool unix_detect_overcommit(void) {
os_overcommit = (val != 0);
}
#else
// default: overcommit is true
// default: overcommit is true
#endif
return os_overcommit;
}
@ -168,12 +168,12 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
size_t n = mi_bsr(try_alignment);
if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
int err = errno;
_mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
}
if (p!=MAP_FAILED) return p;
// fall back to regular mmap
// fall back to regular mmap
}
}
#elif defined(MAP_ALIGN) // Solaris
@ -189,7 +189,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
void* hint = _mi_os_get_aligned_hint(try_alignment, size);
if (hint != NULL) {
p = mmap(hint, size, protect_flags, flags, fd, 0);
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
#if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
int err = 0;
#else
@ -198,7 +198,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
_mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
}
if (p!=MAP_FAILED) return p;
// fall back to regular mmap
// fall back to regular mmap
}
}
#endif
@ -327,9 +327,9 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(commit || !allow_large);
mi_assert_internal(try_alignment > 0);
*is_zero = true;
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
*addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
return (*addr != NULL ? 0 : errno);
}
@ -357,19 +357,19 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
// was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but
// we sometimes call commit on a range with still partially committed
// memory and `mprotect` does not zero the range.
*is_zero = false;
*is_zero = false;
int err = mprotect(start, size, (PROT_READ | PROT_WRITE));
if (err != 0) {
err = errno;
if (err != 0) {
err = errno;
unix_mprotect_hint(err);
}
return err;
}
int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
int err = 0;
int err = 0;
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
err = unix_madvise(start, size, MADV_DONTNEED);
err = unix_madvise(start, size, MADV_DONTNEED);
#if !MI_DEBUG && !MI_SECURE
*needs_recommit = false;
#else
@ -381,15 +381,15 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
*needs_recommit = true;
const int fd = unix_mmap_fd();
void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
if (p != start) { err = errno; }
if (p != start) { err = errno; }
*/
return err;
}
int _mi_prim_reset(void* start, size_t size) {
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
// will not reduce the `rss` stats in tools like `top` even though the memory is available
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
// default `MADV_DONTNEED` is used though.
#if defined(MADV_FREE)
static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
@ -409,7 +409,7 @@ int _mi_prim_reset(void* start, size_t size) {
int _mi_prim_protect(void* start, size_t size, bool protect) {
int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; }
if (err != 0) { err = errno; }
unix_mprotect_hint(err);
return err;
}
@ -450,7 +450,7 @@ int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bo
if (err != 0) {
err = errno;
_mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
}
}
}
return (*addr != NULL ? 0 : errno);
}
@ -565,9 +565,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
// low resolution timer
mi_msecs_t _mi_prim_clock_now(void) {
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
return (mi_msecs_t)clock();
return (mi_msecs_t)clock();
#elif (CLOCKS_PER_SEC < 1000)
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
#else
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
#endif
@ -607,7 +607,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
pinfo->stime = timeval_secs(&rusage.ru_stime);
#if !defined(__HAIKU__)
pinfo->page_faults = rusage.ru_majflt;
#endif
#endif
#if defined(__HAIKU__)
// Haiku does not have (yet?) a way to
// get these stats per process
@ -642,7 +642,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
#else
#ifndef __wasi__
#ifndef __wasm__
// WebAssembly instances are not processes
#pragma message("define a way to get process info")
#endif
@ -748,7 +748,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
#elif defined(__ANDROID__) || defined(__DragonFly__) || \
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
defined(__sun)
defined(__sun)
#include <stdlib.h>
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
@ -840,7 +840,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
}
}
#else
#else
void _mi_prim_thread_init_auto_done(void) {
// nothing

View file

@ -19,7 +19,7 @@ terms of the MIT license. A copy of the license can be found in the file
void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
config->alloc_granularity = 16;
config->has_overcommit = false;
config->has_overcommit = false;
config->must_free_whole = true;
config->has_virtual_reserve = false;
}
@ -30,7 +30,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
int _mi_prim_free(void* addr, size_t size ) {
MI_UNUSED(addr); MI_UNUSED(size);
// wasi heap cannot be shrunk
// wasm linear memory cannot be shrunk
return 0;
}
@ -43,12 +43,12 @@ int _mi_prim_free(void* addr, size_t size ) {
static void* mi_memory_grow( size_t size ) {
void* p = sbrk(size);
if (p == (void*)(-1)) return NULL;
#if !defined(__wasi__) // on wasi this is always zero initialized already (?)
#if !defined(__wasm__) // on wasm this is always zero initialized already.
memset(p,0,size);
#endif
return p;
}
#elif defined(__wasi__)
#elif defined(__wasm__)
static void* mi_memory_grow( size_t size ) {
size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size()))
: __builtin_wasm_memory_size(0));
@ -129,7 +129,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
//---------------------------------------------
int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
MI_UNUSED(addr); MI_UNUSED(size);
MI_UNUSED(addr); MI_UNUSED(size);
*is_zero = false;
return 0;
}
@ -194,9 +194,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
// low resolution timer
mi_msecs_t _mi_prim_clock_now(void) {
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
return (mi_msecs_t)clock();
return (mi_msecs_t)clock();
#elif (CLOCKS_PER_SEC < 1000)
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
#else
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
#endif

View file

@ -160,7 +160,7 @@ If we cannot get good randomness, we fall back to weak randomness based on a tim
uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
x ^= _mi_prim_clock_now();
x ^= _mi_prim_clock_now();
// and do a few randomization steps
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
for (uintptr_t i = 0; i < max; i++) {
@ -175,7 +175,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) {
// if we fail to get random data from the OS, we fall back to a
// weak random source based on the current time
#if !defined(__wasi__)
#if !defined(__wasm__)
if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); }
#endif
uintptr_t x = _mi_os_random_weak(0);