Use __wasm__ instead of __wasi__ for wasm targets

This commit is contained in:
Cheng Shao 2023-08-28 09:33:56 +00:00
parent 10efe291af
commit 1338edf2cf
11 changed files with 109 additions and 114 deletions

View file

@ -304,7 +304,7 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t;
// Returns true only on the first invocation // Returns true only on the first invocation
static inline bool mi_atomic_once( mi_atomic_once_t* once ) { static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
uintptr_t expected = 0; uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
} }
@ -373,7 +373,7 @@ static inline void mi_atomic_yield(void) {
static inline void mi_atomic_yield(void) { static inline void mi_atomic_yield(void) {
smt_pause(); smt_pause();
} }
#elif defined(__wasi__) #elif defined(__wasm__)
#include <sched.h> #include <sched.h>
static inline void mi_atomic_yield(void) { static inline void mi_atomic_yield(void) {
sched_yield(); sched_yield();

View file

@ -40,10 +40,6 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_cache_align #define mi_decl_cache_align
#endif #endif
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
#define __wasi__
#endif
#if defined(__cplusplus) #if defined(__cplusplus)
#define mi_decl_externc extern "C" #define mi_decl_externc extern "C"
#else #else
@ -51,7 +47,7 @@ terms of the MIT license. A copy of the license can be found in the file
#endif #endif
// pthreads // pthreads
#if !defined(_WIN32) && !defined(__wasi__) #if !defined(_WIN32) && !defined(__wasm__)
#define MI_USE_PTHREADS #define MI_USE_PTHREADS
#include <pthread.h> #include <pthread.h>
#endif #endif
@ -88,7 +84,7 @@ void _mi_thread_data_collect(void);
// os.c // os.c
void _mi_os_init(void); // called from process init void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
@ -427,7 +423,7 @@ static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
// Segment belonging to a page // Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) { static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
mi_segment_t* segment = _mi_ptr_segment(page); mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries)); mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
return segment; return segment;
} }
@ -729,12 +725,12 @@ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
#define mi_commit_mask_foreach(cm,idx,count) \ #define mi_commit_mask_foreach(cm,idx,count) \
idx = 0; \ idx = 0; \
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) { while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
#define mi_commit_mask_foreach_end() \ #define mi_commit_mask_foreach_end() \
idx += count; \ idx += count; \
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------

View file

@ -12,7 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// This file specifies the primitive portability API. // This file specifies the primitive portability API.
// Each OS/host needs to implement these primitives, see `src/prim` // Each OS/host needs to implement these primitives, see `src/prim`
// for implementations on Window, macOS, WASI, and Linux/Unix. // for implementations on Window, macOS, wasm, and Linux/Unix.
// //
// note: on all primitive functions, we always have result parameters != NUL, and: // note: on all primitive functions, we always have result parameters != NUL, and:
// addr != NULL and page aligned // addr != NULL and page aligned
@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config );
// Free OS memory // Free OS memory
int _mi_prim_free(void* addr, size_t size ); int _mi_prim_free(void* addr, size_t size );
// Allocate OS memory. Return NULL on error. // Allocate OS memory. Return NULL on error.
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. // The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
// If `commit` is false, the virtual memory range only needs to be reserved (with no access) // If `commit` is false, the virtual memory range only needs to be reserved (with no access)
// which will later be committed explicitly using `_mi_prim_commit`. // which will later be committed explicitly using `_mi_prim_commit`.
// `is_zero` is set to true if the memory was zero initialized (as on most OS's) // `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: !commit => !allow_large // pre: !commit => !allow_large
@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void);
typedef struct mi_process_info_s { typedef struct mi_process_info_s {
mi_msecs_t elapsed; mi_msecs_t elapsed;
mi_msecs_t utime; mi_msecs_t utime;
mi_msecs_t stime; mi_msecs_t stime;
size_t current_rss; size_t current_rss;
size_t peak_rss; size_t peak_rss;
size_t current_commit; size_t current_commit;
size_t peak_commit; size_t peak_commit;
size_t page_faults; size_t page_faults;
} mi_process_info_t; } mi_process_info_t;
@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Thread id: `_mi_prim_thread_id()` // Thread id: `_mi_prim_thread_id()`
// //
// Getting the thread id should be performant as it is called in the // Getting the thread id should be performant as it is called in the
// fast path of `_mi_free` and we specialize for various platforms as // fast path of `_mi_free` and we specialize for various platforms as
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. // inlined definitions. Regular code should call `init.c:_mi_thread_id()`.

View file

@ -264,7 +264,7 @@ int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
#if defined(__wasi__) #if defined(__wasm__)
// forward __libc interface (see PR #667) // forward __libc interface (see PR #667)
void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size) void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size)
void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size) void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size)

View file

@ -58,7 +58,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
} }
else { else {
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE); _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
} }
} }
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
@ -113,7 +113,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
if (size == 0) { size = sizeof(void*); } if (size == 0) { size = sizeof(void*); }
#endif #endif
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero); mi_track_malloc(p,size,zero);
#if MI_STAT>1 #if MI_STAT>1
if (p != NULL) { if (p != NULL) {
@ -346,7 +346,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
// only maintain stats for smaller objects if requested // only maintain stats for smaller objects if requested
#if (MI_STAT>0) #if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2) #if (MI_STAT < 2)
MI_UNUSED(block); MI_UNUSED(block);
#endif #endif
mi_heap_t* const heap = mi_heap_get_default(); mi_heap_t* const heap = mi_heap_get_default();
@ -354,7 +354,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT>1) #if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block); const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc, usize); mi_heap_stat_decrease(heap, malloc, usize);
#endif #endif
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal, bsize); mi_heap_stat_decrease(heap, normal, bsize);
#if (MI_STAT > 1) #if (MI_STAT > 1)
@ -366,7 +366,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
} }
else { else {
mi_heap_stat_decrease(heap, huge, bsize); mi_heap_stat_decrease(heap, huge, bsize);
} }
} }
#else #else
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
@ -405,7 +405,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// that is safe as these are constant and the page won't be freed (as the block is not freed yet). // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block); mi_check_padding(page, block);
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection _mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
// huge page segments are always abandoned and can be freed immediately // huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page); mi_segment_t* segment = _mi_page_segment(page);
if (segment->kind == MI_SEGMENT_HUGE) { if (segment->kind == MI_SEGMENT_HUGE) {
@ -421,7 +421,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
_mi_segment_huge_page_reset(segment, page, block); _mi_segment_huge_page_reset(segment, page, block);
#endif #endif
} }
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block)); memset(block, MI_DEBUG_FREED, mi_usable_size(block));
@ -823,7 +823,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_
return mi_heap_strndup(mi_prim_get_default_heap(),s,n); return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
} }
#ifndef __wasi__ #ifndef __wasm__
// `realpath` using mi_malloc // `realpath` using mi_malloc
#ifdef _WIN32 #ifdef _WIN32
#ifndef PATH_MAX #ifndef PATH_MAX

View file

@ -13,7 +13,7 @@ threads and need to be accessed using atomic operations.
Arenas are used to for huge OS page (1GiB) reservations or for reserving Arenas are used to for huge OS page (1GiB) reservations or for reserving
OS memory upfront which can be improve performance or is sometimes needed OS memory upfront which can be improve performance or is sometimes needed
on embedded devices. We can also employ this with WASI or `sbrk` systems on embedded devices. We can also employ this with wasm or `sbrk` systems
to reserve large arenas upfront and be able to reuse the memory more effectively. to reserve large arenas upfront and be able to reuse the memory more effectively.
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
@ -48,13 +48,13 @@ typedef struct mi_arena_s {
size_t meta_size; // size of the arena structure itself (including its bitmaps) size_t meta_size; // size of the arena structure itself (including its bitmaps)
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node int numa_node; // associated NUMA node
bool exclusive; // only allow allocations if specifically for this arena bool exclusive; // only allow allocations if specifically for this arena
bool is_large; // memory area consists of large- or huge OS pages (always committed) bool is_large; // memory area consists of large- or huge OS pages (always committed)
_Atomic(size_t) search_idx; // optimization to start the search for free blocks _Atomic(size_t) search_idx; // optimization to start the search for free blocks
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
} mi_arena_t; } mi_arena_t;
@ -103,7 +103,7 @@ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index. lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */ ----------------------------------------------------------- */
@ -211,7 +211,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
{ {
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) { if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true; return true;
}; };
return false; return false;
@ -231,7 +231,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
mi_bitmap_index_t bitmap_index; mi_bitmap_index_t bitmap_index;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL; if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it! // claimed it!
void* p = mi_arena_block_start(arena, bitmap_index); void* p = mi_arena_block_start(arena, bitmap_index);
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index); *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
memid->is_pinned = arena->memid.is_pinned; memid->is_pinned = arena->memid.is_pinned;
@ -271,21 +271,21 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// no need to commit, but check if already fully committed // no need to commit, but check if already fully committed
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
} }
return p; return p;
} }
// allocate in a speficic arena // allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
MI_UNUSED_RELEASE(alignment); MI_UNUSED_RELEASE(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t bcount = mi_block_count_of_size(size); const size_t bcount = mi_block_count_of_size(size);
const size_t arena_index = mi_arena_id_index(arena_id); const size_t arena_index = mi_arena_id_index(arena_id);
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count)); mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
mi_assert_internal(size <= mi_arena_block_size(bcount)); mi_assert_internal(size <= mi_arena_block_size(bcount));
// Check arena suitability // Check arena suitability
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
if (arena == NULL) return NULL; if (arena == NULL) return NULL;
@ -305,7 +305,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS // allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
@ -313,9 +313,9 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
if mi_likely(max_arena == 0) return NULL; if mi_likely(max_arena == 0) return NULL;
if (req_arena_id != _mi_arena_id_none()) { if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested // try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) { if (mi_arena_id_index(req_arena_id) < max_arena) {
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
@ -323,7 +323,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
} }
else { else {
// try numa affine allocation // try numa affine allocation
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
} }
@ -351,22 +351,22 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
if (arena_reserve == 0) return false; if (arena_reserve == 0) return false;
if (!_mi_os_has_virtual_reserve()) { if (!_mi_os_has_virtual_reserve()) {
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example) arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
} }
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
if (arena_count >= 8 && arena_count <= 128) { if (arena_count >= 8 && arena_count <= 128) {
arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
} }
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
// commit eagerly? // commit eagerly?
bool arena_commit = false; bool arena_commit = false;
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0); return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
} }
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
@ -381,9 +381,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena // otherwise, try to first eagerly reserve a new arena
if (req_arena_id == _mi_arena_id_none()) { if (req_arena_id == _mi_arena_id_none()) {
mi_arena_id_t arena_id = 0; mi_arena_id_t arena_id = 0;
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
@ -400,14 +400,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
errno = ENOMEM; errno = ENOMEM;
return NULL; return NULL;
} }
// finally, fall back to the OS // finally, fall back to the OS
if (align_offset > 0) { if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
} }
else { else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
} }
} }
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
@ -443,22 +443,22 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned); mi_assert_internal(!arena->memid.is_pinned);
const size_t size = mi_arena_block_size(blocks); const size_t size = mi_arena_block_size(blocks);
void* const p = mi_arena_block_start(arena, bitmap_idx); void* const p = mi_arena_block_start(arena, bitmap_idx);
bool needs_recommit; bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely // all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats); needs_recommit = _mi_os_purge(p, size, stats);
} }
else { else {
// some blocks are not committed -- this can happen when a partially committed block is freed // some blocks are not committed -- this can happen when a partially committed block is freed
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted) // and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
_mi_stat_increase(&stats->committed, size); _mi_stat_increase(&stats->committed, size);
} }
// clear the purged blocks // clear the purged blocks
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx); _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
// update committed bitmap // update committed bitmap
@ -476,7 +476,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
if (_mi_preloading() || delay == 0) { if (_mi_preloading() || delay == 0) {
// decommit directly // decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats); mi_arena_purge(arena, bitmap_idx, blocks, stats);
} }
else { else {
// schedule decommit // schedule decommit
@ -518,7 +518,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
} }
// returns true if anything was purged // returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
{ {
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
@ -527,10 +527,10 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
// reset expire (if not already set concurrently) // reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0); mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
// potential purges scheduled, walk through the bitmap // potential purges scheduled, walk through the bitmap
bool any_purged = false; bool any_purged = false;
bool full_purge = true; bool full_purge = true;
for (size_t i = 0; i < arena->field_count; i++) { for (size_t i = 0; i < arena->field_count; i++) {
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]); size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
if (purge != 0) { if (purge != 0) {
@ -581,7 +581,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
// allow only one thread to purge at a time // allow only one thread to purge at a time
static mi_atomic_guard_t purge_guard; static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard) mi_atomic_guard(&purge_guard)
{ {
mi_msecs_t now = _mi_clock_now(); mi_msecs_t now = _mi_clock_now();
size_t max_purge_count = (visit_all ? max_arena : 1); size_t max_purge_count = (visit_all ? max_arena : 1);
@ -594,7 +594,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
} }
} }
} }
} }
} }
@ -608,7 +608,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
if (p==NULL) return; if (p==NULL) return;
if (size==0) return; if (size==0) return;
const bool all_committed = (committed_size == size); const bool all_committed = (committed_size == size);
if (mi_memkind_is_os(memid.memkind)) { if (mi_memkind_is_os(memid.memkind)) {
// was a direct OS allocation, pass through // was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) { if (!all_committed && committed_size > 0) {
@ -626,7 +626,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL); mi_assert_internal(arena != NULL);
const size_t blocks = mi_block_count_of_size(size); const size_t blocks = mi_block_count_of_size(size);
// checks // checks
if (arena == NULL) { if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
@ -648,7 +648,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
else { else {
mi_assert_internal(arena->blocks_committed != NULL); mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(arena->blocks_purge != NULL);
if (!all_committed) { if (!all_committed) {
// mark the entire range as no longer committed (so we recommit the full range when re-using) // mark the entire range as no longer committed (so we recommit the full range when re-using)
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
@ -663,9 +663,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts). // works (as we should never reset decommitted parts).
} }
// (delay) purge the entire range // (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
} }
// and make it available to others again // and make it available to others again
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!all_inuse) { if (!all_inuse) {
@ -690,9 +690,9 @@ static void mi_arenas_unsafe_destroy(void) {
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) { if (arena != NULL) {
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
} }
else { else {
new_max_arena = i; new_max_arena = i;
@ -715,7 +715,7 @@ void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
// for dynamic libraries that are unloaded and need to release all their allocated memory. // for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
mi_arenas_unsafe_destroy(); mi_arenas_unsafe_destroy();
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas _mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
} }
// Is a pointer inside any of our arenas? // Is a pointer inside any of our arenas?
@ -723,8 +723,8 @@ bool _mi_arena_contains(const void* p) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
return true; return true;
} }
} }
return false; return false;
@ -768,7 +768,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
mi_memid_t meta_memid; mi_memid_t meta_memid;
mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
if (arena == NULL) return false; if (arena == NULL) return false;
// already zero'd due to os_alloc // already zero'd due to os_alloc
// _mi_memzero(arena, asize); // _mi_memzero(arena, asize);
arena->id = _mi_arena_id_none(); arena->id = _mi_arena_id_none();
@ -785,12 +785,12 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
arena->search_idx = 0; arena->search_idx = 0;
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
// initialize committed bitmap? // initialize committed bitmap?
if (arena->blocks_committed != NULL && arena->memid.initially_committed) { if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
} }
// and claim leftover blocks if needed (so we never allocate there) // and claim leftover blocks if needed (so we never allocate there)
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
mi_assert_internal(post >= 0); mi_assert_internal(post >= 0);
@ -939,4 +939,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
return err; return err;
} }

View file

@ -14,9 +14,9 @@ terms of the MIT license. A copy of the license can be found in the file
#elif defined(__APPLE__) #elif defined(__APPLE__)
#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c) #include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c)
#elif defined(__wasi__) #elif defined(__wasm__)
#define MI_USE_SBRK #define MI_USE_SBRK
#include "wasi/prim.c" // memory-grow or sbrk (Wasm) #include "wasm/prim.c" // memory-grow or sbrk (Wasm)
#else #else
#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.) #include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)

View file

@ -3,7 +3,7 @@
This is the portability layer where all primitives needed from the OS are defined. This is the portability layer where all primitives needed from the OS are defined.
- `include/mimalloc/prim.h`: primitive portability API definition. - `include/mimalloc/prim.h`: primitive portability API definition.
- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform - `prim.c`: Selects one of `unix/prim.c`, `wasm/prim.c`, or `windows/prim.c` depending on the host platform
(and on macOS, `osx/prim.c` defers to `unix/prim.c`). (and on macOS, `osx/prim.c` defers to `unix/prim.c`).
Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's. Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's.

View file

@ -57,7 +57,7 @@ terms of the MIT license. A copy of the license can be found in the file
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
// Use syscalls for some primitives to allow for libraries that override open/read/close etc. // Use syscalls for some primitives to allow for libraries that override open/read/close etc.
// and do allocation themselves; using syscalls prevents recursion when mimalloc is // and do allocation themselves; using syscalls prevents recursion when mimalloc is
// still initializing (issue #713) // still initializing (issue #713)
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
@ -120,7 +120,7 @@ static bool unix_detect_overcommit(void) {
os_overcommit = (val != 0); os_overcommit = (val != 0);
} }
#else #else
// default: overcommit is true // default: overcommit is true
#endif #endif
return os_overcommit; return os_overcommit;
} }
@ -168,12 +168,12 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
size_t n = mi_bsr(try_alignment); size_t n = mi_bsr(try_alignment);
if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0); p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
int err = errno; int err = errno;
_mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr); _mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
} }
if (p!=MAP_FAILED) return p; if (p!=MAP_FAILED) return p;
// fall back to regular mmap // fall back to regular mmap
} }
} }
#elif defined(MAP_ALIGN) // Solaris #elif defined(MAP_ALIGN) // Solaris
@ -189,7 +189,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
void* hint = _mi_os_get_aligned_hint(try_alignment, size); void* hint = _mi_os_get_aligned_hint(try_alignment, size);
if (hint != NULL) { if (hint != NULL) {
p = mmap(hint, size, protect_flags, flags, fd, 0); p = mmap(hint, size, protect_flags, flags, fd, 0);
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
#if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
int err = 0; int err = 0;
#else #else
@ -198,7 +198,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
_mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint); _mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
} }
if (p!=MAP_FAILED) return p; if (p!=MAP_FAILED) return p;
// fall back to regular mmap // fall back to regular mmap
} }
} }
#endif #endif
@ -327,9 +327,9 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(commit || !allow_large); mi_assert_internal(commit || !allow_large);
mi_assert_internal(try_alignment > 0); mi_assert_internal(try_alignment > 0);
*is_zero = true; *is_zero = true;
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
*addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
return (*addr != NULL ? 0 : errno); return (*addr != NULL ? 0 : errno);
} }
@ -357,19 +357,19 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
// was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but
// we sometimes call commit on a range with still partially committed // we sometimes call commit on a range with still partially committed
// memory and `mprotect` does not zero the range. // memory and `mprotect` does not zero the range.
*is_zero = false; *is_zero = false;
int err = mprotect(start, size, (PROT_READ | PROT_WRITE)); int err = mprotect(start, size, (PROT_READ | PROT_WRITE));
if (err != 0) { if (err != 0) {
err = errno; err = errno;
unix_mprotect_hint(err); unix_mprotect_hint(err);
} }
return err; return err;
} }
int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
int err = 0; int err = 0;
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
err = unix_madvise(start, size, MADV_DONTNEED); err = unix_madvise(start, size, MADV_DONTNEED);
#if !MI_DEBUG && !MI_SECURE #if !MI_DEBUG && !MI_SECURE
*needs_recommit = false; *needs_recommit = false;
#else #else
@ -381,15 +381,15 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
*needs_recommit = true; *needs_recommit = true;
const int fd = unix_mmap_fd(); const int fd = unix_mmap_fd();
void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0); void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
if (p != start) { err = errno; } if (p != start) { err = errno; }
*/ */
return err; return err;
} }
int _mi_prim_reset(void* start, size_t size) { int _mi_prim_reset(void* start, size_t size) {
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
// will not reduce the `rss` stats in tools like `top` even though the memory is available // will not reduce the `rss` stats in tools like `top` even though the memory is available
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
// default `MADV_DONTNEED` is used though. // default `MADV_DONTNEED` is used though.
#if defined(MADV_FREE) #if defined(MADV_FREE)
static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
@ -409,7 +409,7 @@ int _mi_prim_reset(void* start, size_t size) {
int _mi_prim_protect(void* start, size_t size, bool protect) { int _mi_prim_protect(void* start, size_t size, bool protect) {
int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; } if (err != 0) { err = errno; }
unix_mprotect_hint(err); unix_mprotect_hint(err);
return err; return err;
} }
@ -450,7 +450,7 @@ int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bo
if (err != 0) { if (err != 0) {
err = errno; err = errno;
_mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err); _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
} }
} }
return (*addr != NULL ? 0 : errno); return (*addr != NULL ? 0 : errno);
} }
@ -565,9 +565,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
// low resolution timer // low resolution timer
mi_msecs_t _mi_prim_clock_now(void) { mi_msecs_t _mi_prim_clock_now(void) {
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
return (mi_msecs_t)clock(); return (mi_msecs_t)clock();
#elif (CLOCKS_PER_SEC < 1000) #elif (CLOCKS_PER_SEC < 1000)
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
#else #else
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
#endif #endif
@ -607,7 +607,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
pinfo->stime = timeval_secs(&rusage.ru_stime); pinfo->stime = timeval_secs(&rusage.ru_stime);
#if !defined(__HAIKU__) #if !defined(__HAIKU__)
pinfo->page_faults = rusage.ru_majflt; pinfo->page_faults = rusage.ru_majflt;
#endif #endif
#if defined(__HAIKU__) #if defined(__HAIKU__)
// Haiku does not have (yet?) a way to // Haiku does not have (yet?) a way to
// get these stats per process // get these stats per process
@ -642,7 +642,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
#else #else
#ifndef __wasi__ #ifndef __wasm__
// WebAssembly instances are not processes // WebAssembly instances are not processes
#pragma message("define a way to get process info") #pragma message("define a way to get process info")
#endif #endif
@ -748,7 +748,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
#elif defined(__ANDROID__) || defined(__DragonFly__) || \ #elif defined(__ANDROID__) || defined(__DragonFly__) || \
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
defined(__sun) defined(__sun)
#include <stdlib.h> #include <stdlib.h>
bool _mi_prim_random_buf(void* buf, size_t buf_len) { bool _mi_prim_random_buf(void* buf, size_t buf_len) {
@ -840,7 +840,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
} }
} }
#else #else
void _mi_prim_thread_init_auto_done(void) { void _mi_prim_thread_init_auto_done(void) {
// nothing // nothing

View file

@ -19,7 +19,7 @@ terms of the MIT license. A copy of the license can be found in the file
void _mi_prim_mem_init( mi_os_mem_config_t* config ) { void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
config->alloc_granularity = 16; config->alloc_granularity = 16;
config->has_overcommit = false; config->has_overcommit = false;
config->must_free_whole = true; config->must_free_whole = true;
config->has_virtual_reserve = false; config->has_virtual_reserve = false;
} }
@ -30,7 +30,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
int _mi_prim_free(void* addr, size_t size ) { int _mi_prim_free(void* addr, size_t size ) {
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(addr); MI_UNUSED(size);
// wasi heap cannot be shrunk // wasm linear memory cannot be shrunk
return 0; return 0;
} }
@ -43,12 +43,12 @@ int _mi_prim_free(void* addr, size_t size ) {
static void* mi_memory_grow( size_t size ) { static void* mi_memory_grow( size_t size ) {
void* p = sbrk(size); void* p = sbrk(size);
if (p == (void*)(-1)) return NULL; if (p == (void*)(-1)) return NULL;
#if !defined(__wasi__) // on wasi this is always zero initialized already (?) #if !defined(__wasm__) // on wasm this is always zero initialized already.
memset(p,0,size); memset(p,0,size);
#endif #endif
return p; return p;
} }
#elif defined(__wasi__) #elif defined(__wasm__)
static void* mi_memory_grow( size_t size ) { static void* mi_memory_grow( size_t size ) {
size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size())) size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size()))
: __builtin_wasm_memory_size(0)); : __builtin_wasm_memory_size(0));
@ -129,7 +129,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
//--------------------------------------------- //---------------------------------------------
int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(addr); MI_UNUSED(size);
*is_zero = false; *is_zero = false;
return 0; return 0;
} }
@ -194,9 +194,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
// low resolution timer // low resolution timer
mi_msecs_t _mi_prim_clock_now(void) { mi_msecs_t _mi_prim_clock_now(void) {
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
return (mi_msecs_t)clock(); return (mi_msecs_t)clock();
#elif (CLOCKS_PER_SEC < 1000) #elif (CLOCKS_PER_SEC < 1000)
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
#else #else
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
#endif #endif

View file

@ -160,7 +160,7 @@ If we cannot get good randomness, we fall back to weak randomness based on a tim
uintptr_t _mi_os_random_weak(uintptr_t extra_seed) { uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
x ^= _mi_prim_clock_now(); x ^= _mi_prim_clock_now();
// and do a few randomization steps // and do a few randomization steps
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1; uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
for (uintptr_t i = 0; i < max; i++) { for (uintptr_t i = 0; i < max; i++) {
@ -175,7 +175,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) { if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) {
// if we fail to get random data from the OS, we fall back to a // if we fail to get random data from the OS, we fall back to a
// weak random source based on the current time // weak random source based on the current time
#if !defined(__wasi__) #if !defined(__wasm__)
if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); } if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); }
#endif #endif
uintptr_t x = _mi_os_random_weak(0); uintptr_t x = _mi_os_random_weak(0);