mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-11 21:58:41 +03:00
Merge bf8f14054c
into 10efe291af
This commit is contained in:
commit
900b3e1442
12 changed files with 154 additions and 163 deletions
|
@ -304,7 +304,7 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t;
|
|||
|
||||
// Returns true only on the first invocation
|
||||
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
|
||||
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
|
||||
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
|
||||
uintptr_t expected = 0;
|
||||
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ static inline void mi_atomic_yield(void) {
|
|||
static inline void mi_atomic_yield(void) {
|
||||
smt_pause();
|
||||
}
|
||||
#elif defined(__wasi__)
|
||||
#elif defined(__wasm__)
|
||||
#include <sched.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sched_yield();
|
||||
|
|
|
@ -40,10 +40,6 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define mi_decl_cache_align
|
||||
#endif
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
||||
#define __wasi__
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
#define mi_decl_externc extern "C"
|
||||
#else
|
||||
|
@ -51,7 +47,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#endif
|
||||
|
||||
// pthreads
|
||||
#if !defined(_WIN32) && !defined(__wasi__)
|
||||
#if !defined(_WIN32) && !defined(__wasm__)
|
||||
#define MI_USE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
@ -88,7 +84,7 @@ void _mi_thread_data_collect(void);
|
|||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
|
||||
|
||||
|
@ -311,6 +307,14 @@ static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
|||
}
|
||||
}
|
||||
|
||||
__attribute__((__unused__)) static void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
__attribute__((__unused__)) static void* mi_align_down_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_down((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
||||
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
||||
mi_assert_internal(divider != 0);
|
||||
|
@ -427,7 +431,7 @@ static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
|
|||
|
||||
// Segment belonging to a page
|
||||
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
||||
return segment;
|
||||
}
|
||||
|
@ -729,12 +733,12 @@ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
|
|||
|
||||
#define mi_commit_mask_foreach(cm,idx,count) \
|
||||
idx = 0; \
|
||||
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
|
||||
|
||||
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
|
||||
|
||||
#define mi_commit_mask_foreach_end() \
|
||||
idx += count; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
|
|
@ -12,7 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// --------------------------------------------------------------------------
|
||||
// This file specifies the primitive portability API.
|
||||
// Each OS/host needs to implement these primitives, see `src/prim`
|
||||
// for implementations on Window, macOS, WASI, and Linux/Unix.
|
||||
// for implementations on Window, macOS, wasm, and Linux/Unix.
|
||||
//
|
||||
// note: on all primitive functions, we always have result parameters != NUL, and:
|
||||
// addr != NULL and page aligned
|
||||
|
@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config );
|
|||
|
||||
// Free OS memory
|
||||
int _mi_prim_free(void* addr, size_t size );
|
||||
|
||||
|
||||
// Allocate OS memory. Return NULL on error.
|
||||
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
|
||||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
|
||||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
|
||||
// which will later be committed explicitly using `_mi_prim_commit`.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: !commit => !allow_large
|
||||
|
@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void);
|
|||
typedef struct mi_process_info_s {
|
||||
mi_msecs_t elapsed;
|
||||
mi_msecs_t utime;
|
||||
mi_msecs_t stime;
|
||||
size_t current_rss;
|
||||
size_t peak_rss;
|
||||
mi_msecs_t stime;
|
||||
size_t current_rss;
|
||||
size_t peak_rss;
|
||||
size_t current_commit;
|
||||
size_t peak_commit;
|
||||
size_t peak_commit;
|
||||
size_t page_faults;
|
||||
} mi_process_info_t;
|
||||
|
||||
|
@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
|
|||
|
||||
//-------------------------------------------------------------------
|
||||
// Thread id: `_mi_prim_thread_id()`
|
||||
//
|
||||
//
|
||||
// Getting the thread id should be performant as it is called in the
|
||||
// fast path of `_mi_free` and we specialize for various platforms as
|
||||
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
|
||||
|
|
|
@ -264,7 +264,7 @@ int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p
|
|||
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
|
||||
void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
|
||||
|
||||
#if defined(__wasi__)
|
||||
#if defined(__wasm__)
|
||||
// forward __libc interface (see PR #667)
|
||||
void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size)
|
||||
void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size)
|
||||
|
|
16
src/alloc.c
16
src/alloc.c
|
@ -58,7 +58,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
}
|
||||
else {
|
||||
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
||||
|
@ -113,7 +113,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
if (size == 0) { size = sizeof(void*); }
|
||||
#endif
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
mi_track_malloc(p,size,zero);
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
|
@ -346,7 +346,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
|||
// only maintain stats for smaller objects if requested
|
||||
#if (MI_STAT>0)
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
#if (MI_STAT < 2)
|
||||
#if (MI_STAT < 2)
|
||||
MI_UNUSED(block);
|
||||
#endif
|
||||
mi_heap_t* const heap = mi_heap_get_default();
|
||||
|
@ -354,7 +354,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|||
#if (MI_STAT>1)
|
||||
const size_t usize = mi_page_usable_size_of(page, block);
|
||||
mi_heap_stat_decrease(heap, malloc, usize);
|
||||
#endif
|
||||
#endif
|
||||
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, normal, bsize);
|
||||
#if (MI_STAT > 1)
|
||||
|
@ -366,7 +366,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|||
}
|
||||
else {
|
||||
mi_heap_stat_decrease(heap, huge, bsize);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
|
@ -405,7 +405,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
|||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||
mi_check_padding(page, block);
|
||||
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
|
||||
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
if (segment->kind == MI_SEGMENT_HUGE) {
|
||||
|
@ -421,7 +421,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
|||
_mi_segment_huge_page_reset(segment, page, block);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
|
||||
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
|
||||
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
||||
|
@ -823,7 +823,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_
|
|||
return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
|
||||
}
|
||||
|
||||
#ifndef __wasi__
|
||||
#ifndef __wasm__
|
||||
// `realpath` using mi_malloc
|
||||
#ifdef _WIN32
|
||||
#ifndef PATH_MAX
|
||||
|
|
95
src/arena.c
95
src/arena.c
|
@ -13,7 +13,7 @@ threads and need to be accessed using atomic operations.
|
|||
|
||||
Arenas are used to for huge OS page (1GiB) reservations or for reserving
|
||||
OS memory upfront which can be improve performance or is sometimes needed
|
||||
on embedded devices. We can also employ this with WASI or `sbrk` systems
|
||||
on embedded devices. We can also employ this with wasm or `sbrk` systems
|
||||
to reserve large arenas upfront and be able to reuse the memory more effectively.
|
||||
|
||||
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
|
||||
|
@ -48,13 +48,13 @@ typedef struct mi_arena_s {
|
|||
size_t meta_size; // size of the arena structure itself (including its bitmaps)
|
||||
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
|
||||
int numa_node; // associated NUMA node
|
||||
bool exclusive; // only allow allocations if specifically for this arena
|
||||
bool exclusive; // only allow allocations if specifically for this arena
|
||||
bool is_large; // memory area consists of large- or huge OS pages (always committed)
|
||||
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
|
||||
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
|
||||
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
|
||||
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
||||
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
|
||||
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
|
||||
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
|
||||
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
||||
} mi_arena_t;
|
||||
|
||||
|
@ -103,7 +103,7 @@ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
|
|||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena allocations get a (currently) 16-bit memory id where the
|
||||
Arena allocations get a (currently) 16-bit memory id where the
|
||||
lower 8 bits are the arena id, and the upper bits the block index.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
@ -211,7 +211,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
|
|||
{
|
||||
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
|
||||
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
|
||||
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
|
||||
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
|
||||
return true;
|
||||
};
|
||||
return false;
|
||||
|
@ -231,7 +231,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
mi_bitmap_index_t bitmap_index;
|
||||
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||
|
||||
// claimed it!
|
||||
// claimed it!
|
||||
void* p = mi_arena_block_start(arena, bitmap_index);
|
||||
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
|
||||
memid->is_pinned = arena->memid.is_pinned;
|
||||
|
@ -271,21 +271,21 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
// no need to commit, but check if already fully committed
|
||||
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
}
|
||||
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
// allocate in a speficic arena
|
||||
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
MI_UNUSED_RELEASE(alignment);
|
||||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
const size_t bcount = mi_block_count_of_size(size);
|
||||
const size_t bcount = mi_block_count_of_size(size);
|
||||
const size_t arena_index = mi_arena_id_index(arena_id);
|
||||
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
|
||||
mi_assert_internal(size <= mi_arena_block_size(bcount));
|
||||
|
||||
|
||||
// Check arena suitability
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
|
||||
if (arena == NULL) return NULL;
|
||||
|
@ -305,7 +305,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
|
|||
|
||||
|
||||
// allocate from an arena with fallback to the OS
|
||||
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
|
||||
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large,
|
||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
|
@ -313,9 +313,9 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
|
|||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
if mi_likely(max_arena == 0) return NULL;
|
||||
|
||||
|
||||
if (req_arena_id != _mi_arena_id_none()) {
|
||||
// try a specific arena if requested
|
||||
// try a specific arena if requested
|
||||
if (mi_arena_id_index(req_arena_id) < max_arena) {
|
||||
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
|
@ -323,7 +323,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
|
|||
}
|
||||
else {
|
||||
// try numa affine allocation
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
|
@ -351,22 +351,22 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
|
|||
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
|
||||
if (arena_reserve == 0) return false;
|
||||
|
||||
if (!_mi_os_has_virtual_reserve()) {
|
||||
if (!_mi_os_has_virtual_reserve()) {
|
||||
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
|
||||
}
|
||||
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
|
||||
if (arena_count >= 8 && arena_count <= 128) {
|
||||
arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
|
||||
}
|
||||
}
|
||||
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
|
||||
|
||||
|
||||
// commit eagerly?
|
||||
bool arena_commit = false;
|
||||
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
|
||||
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
|
||||
|
||||
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
|
||||
|
@ -381,9 +381,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
|||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
||||
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
if (p != NULL) return p;
|
||||
|
||||
// otherwise, try to first eagerly reserve a new arena
|
||||
// otherwise, try to first eagerly reserve a new arena
|
||||
if (req_arena_id == _mi_arena_id_none()) {
|
||||
mi_arena_id_t arena_id = 0;
|
||||
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
|
||||
|
@ -400,14 +400,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
|||
errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// finally, fall back to the OS
|
||||
if (align_offset > 0) {
|
||||
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
|
||||
}
|
||||
else {
|
||||
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||
|
@ -443,22 +443,22 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
|
|||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
mi_assert_internal(!arena->memid.is_pinned);
|
||||
const size_t size = mi_arena_block_size(blocks);
|
||||
void* const p = mi_arena_block_start(arena, bitmap_idx);
|
||||
void* const p = mi_arena_block_start(arena, bitmap_idx);
|
||||
bool needs_recommit;
|
||||
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
|
||||
// all blocks are committed, we can purge freely
|
||||
needs_recommit = _mi_os_purge(p, size, stats);
|
||||
}
|
||||
else {
|
||||
// some blocks are not committed -- this can happen when a partially committed block is freed
|
||||
// some blocks are not committed -- this can happen when a partially committed block is freed
|
||||
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
|
||||
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
|
||||
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
|
||||
// and also undo the decommit stats (as it was already adjusted)
|
||||
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
|
||||
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
}
|
||||
|
||||
|
||||
// clear the purged blocks
|
||||
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
|
||||
// update committed bitmap
|
||||
|
@ -476,7 +476,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
|
|||
|
||||
if (_mi_preloading() || delay == 0) {
|
||||
// decommit directly
|
||||
mi_arena_purge(arena, bitmap_idx, blocks, stats);
|
||||
mi_arena_purge(arena, bitmap_idx, blocks, stats);
|
||||
}
|
||||
else {
|
||||
// schedule decommit
|
||||
|
@ -518,7 +518,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
|
|||
}
|
||||
|
||||
// returns true if anything was purged
|
||||
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
|
||||
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
|
||||
{
|
||||
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
|
||||
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
|
||||
|
@ -527,10 +527,10 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
|
|||
|
||||
// reset expire (if not already set concurrently)
|
||||
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
|
||||
|
||||
|
||||
// potential purges scheduled, walk through the bitmap
|
||||
bool any_purged = false;
|
||||
bool full_purge = true;
|
||||
bool full_purge = true;
|
||||
for (size_t i = 0; i < arena->field_count; i++) {
|
||||
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
|
||||
if (purge != 0) {
|
||||
|
@ -581,7 +581,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
|
|||
|
||||
// allow only one thread to purge at a time
|
||||
static mi_atomic_guard_t purge_guard;
|
||||
mi_atomic_guard(&purge_guard)
|
||||
mi_atomic_guard(&purge_guard)
|
||||
{
|
||||
mi_msecs_t now = _mi_clock_now();
|
||||
size_t max_purge_count = (visit_all ? max_arena : 1);
|
||||
|
@ -594,7 +594,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -608,7 +608,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
|||
if (p==NULL) return;
|
||||
if (size==0) return;
|
||||
const bool all_committed = (committed_size == size);
|
||||
|
||||
|
||||
if (mi_memkind_is_os(memid.memkind)) {
|
||||
// was a direct OS allocation, pass through
|
||||
if (!all_committed && committed_size > 0) {
|
||||
|
@ -626,7 +626,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
|||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
const size_t blocks = mi_block_count_of_size(size);
|
||||
|
||||
|
||||
// checks
|
||||
if (arena == NULL) {
|
||||
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||
|
@ -648,7 +648,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
|||
else {
|
||||
mi_assert_internal(arena->blocks_committed != NULL);
|
||||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
|
||||
|
||||
if (!all_committed) {
|
||||
// mark the entire range as no longer committed (so we recommit the full range when re-using)
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||
|
@ -663,9 +663,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
|||
// works (as we should never reset decommitted parts).
|
||||
}
|
||||
// (delay) purge the entire range
|
||||
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
|
||||
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
|
||||
}
|
||||
|
||||
|
||||
// and make it available to others again
|
||||
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
|
||||
if (!all_inuse) {
|
||||
|
@ -690,9 +690,9 @@ static void mi_arenas_unsafe_destroy(void) {
|
|||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena != NULL) {
|
||||
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
|
||||
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
|
||||
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
|
||||
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
|
||||
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
|
||||
}
|
||||
else {
|
||||
new_max_arena = i;
|
||||
|
@ -715,7 +715,7 @@ void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
|
|||
// for dynamic libraries that are unloaded and need to release all their allocated memory.
|
||||
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
|
||||
mi_arenas_unsafe_destroy();
|
||||
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
|
||||
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
|
||||
}
|
||||
|
||||
// Is a pointer inside any of our arenas?
|
||||
|
@ -723,8 +723,8 @@ bool _mi_arena_contains(const void* p) {
|
|||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
|
||||
return true;
|
||||
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
@ -768,7 +768,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
|
|||
mi_memid_t meta_memid;
|
||||
mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
||||
if (arena == NULL) return false;
|
||||
|
||||
|
||||
// already zero'd due to os_alloc
|
||||
// _mi_memzero(arena, asize);
|
||||
arena->id = _mi_arena_id_none();
|
||||
|
@ -785,12 +785,12 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
|
|||
arena->search_idx = 0;
|
||||
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
|
||||
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
|
||||
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
|
||||
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
|
||||
// initialize committed bitmap?
|
||||
if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
|
||||
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
|
||||
}
|
||||
|
||||
|
||||
// and claim leftover blocks if needed (so we never allocate there)
|
||||
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
|
||||
mi_assert_internal(post >= 0);
|
||||
|
@ -939,4 +939,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv
|
|||
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
52
src/os.c
52
src/os.c
|
@ -29,7 +29,7 @@ bool _mi_os_has_overcommit(void) {
|
|||
return mi_os_mem_config.has_overcommit;
|
||||
}
|
||||
|
||||
bool _mi_os_has_virtual_reserve(void) {
|
||||
bool _mi_os_has_virtual_reserve(void) {
|
||||
return mi_os_mem_config.has_virtual_reserve;
|
||||
}
|
||||
|
||||
|
@ -73,14 +73,6 @@ void _mi_os_init(void) {
|
|||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
|
||||
|
||||
static void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
static void* mi_align_down_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_down((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
aligned hinting
|
||||
|
@ -173,7 +165,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
|
|||
}
|
||||
}
|
||||
else {
|
||||
// nothing to do
|
||||
// nothing to do
|
||||
mi_assert(memid.memkind < MI_MEM_OS);
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +189,7 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
|
|||
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
|
||||
|
||||
*is_zero = false;
|
||||
void* p = NULL;
|
||||
void* p = NULL;
|
||||
int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
|
||||
|
@ -205,14 +197,14 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
|
|||
mi_stat_counter_increase(stats->mmap_calls, 1);
|
||||
if (p != NULL) {
|
||||
_mi_stat_increase(&stats->reserved, size);
|
||||
if (commit) {
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
if (commit) {
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
// seems needed for asan (or `mimalloc-test-api` fails)
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (*is_zero) { mi_track_mem_defined(p,size); }
|
||||
else { mi_track_mem_undefined(p,size); }
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
@ -249,7 +241,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
|||
// over-allocate uncommitted (virtual) memory
|
||||
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
|
||||
// set p to the aligned part in the full region
|
||||
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
|
||||
// this is handled though by having the `base` field in the memid's
|
||||
|
@ -265,7 +257,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
|||
// overallocate...
|
||||
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
|
||||
// and selectively unmap parts around the over-allocated area. (noop on sbrk)
|
||||
void* aligned_p = mi_align_up_ptr(p, alignment);
|
||||
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
|
||||
|
@ -276,7 +268,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
|||
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
|
||||
// we can return the aligned pointer on `mmap` (and sbrk) systems
|
||||
p = aligned_p;
|
||||
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
||||
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,7 +292,7 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* tld_stats) {
|
|||
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
|
||||
if (p != NULL) {
|
||||
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -312,7 +304,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
|||
if (size == 0) return NULL;
|
||||
size = _mi_os_good_alloc_size(size);
|
||||
alignment = _mi_align_up(alignment, _mi_os_page_size());
|
||||
|
||||
|
||||
bool os_is_large = false;
|
||||
bool os_is_zero = false;
|
||||
void* os_base = NULL;
|
||||
|
@ -390,7 +382,7 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
|
|||
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||
MI_UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (is_zero != NULL) { *is_zero = false; }
|
||||
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
|
||||
_mi_stat_counter_increase(&stats->commit_calls, 1);
|
||||
|
@ -400,21 +392,21 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
|
|||
void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
|
||||
// commit
|
||||
// commit
|
||||
bool os_is_zero = false;
|
||||
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
||||
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
return false;
|
||||
}
|
||||
if (os_is_zero && is_zero != NULL) {
|
||||
if (os_is_zero && is_zero != NULL) {
|
||||
*is_zero = true;
|
||||
mi_assert_expensive(mi_mem_is_zero(start, csize));
|
||||
}
|
||||
// note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (os_is_zero) { mi_track_mem_defined(start,csize); }
|
||||
else { mi_track_mem_undefined(start,csize); }
|
||||
else { mi_track_mem_undefined(start,csize); }
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
@ -428,11 +420,11 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
|
|||
// page align
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
if (csize == 0) return true;
|
||||
|
||||
// decommit
|
||||
*needs_recommit = true;
|
||||
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
||||
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
}
|
||||
|
@ -450,7 +442,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
|
|||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
// page align conservatively within the range
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
|
@ -470,7 +462,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
|||
}
|
||||
|
||||
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// to be recommitted if it is to be re-used later on.
|
||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
||||
{
|
||||
|
@ -483,7 +475,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
|||
{
|
||||
bool needs_recommit = true;
|
||||
mi_os_decommit_ex(p, size, &needs_recommit, stats);
|
||||
return needs_recommit;
|
||||
return needs_recommit;
|
||||
}
|
||||
else {
|
||||
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
|
||||
|
@ -493,7 +485,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
|||
}
|
||||
}
|
||||
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// to be recommitted if it is to be re-used later on.
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
|
||||
return _mi_os_purge_ex(p, size, true, stats);
|
||||
|
|
|
@ -14,9 +14,9 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#elif defined(__APPLE__)
|
||||
#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c)
|
||||
|
||||
#elif defined(__wasi__)
|
||||
#elif defined(__wasm__)
|
||||
#define MI_USE_SBRK
|
||||
#include "wasi/prim.c" // memory-grow or sbrk (Wasm)
|
||||
#include "wasm/prim.c" // memory-grow or sbrk (Wasm)
|
||||
|
||||
#else
|
||||
#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
This is the portability layer where all primitives needed from the OS are defined.
|
||||
|
||||
- `include/mimalloc/prim.h`: primitive portability API definition.
|
||||
- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform
|
||||
- `prim.c`: Selects one of `unix/prim.c`, `wasm/prim.c`, or `windows/prim.c` depending on the host platform
|
||||
(and on macOS, `osx/prim.c` defers to `unix/prim.c`).
|
||||
|
||||
Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's.
|
||||
Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's.
|
||||
|
|
|
@ -57,7 +57,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
//------------------------------------------------------------------------------------
|
||||
// Use syscalls for some primitives to allow for libraries that override open/read/close etc.
|
||||
// and do allocation themselves; using syscalls prevents recursion when mimalloc is
|
||||
// and do allocation themselves; using syscalls prevents recursion when mimalloc is
|
||||
// still initializing (issue #713)
|
||||
//------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -120,7 +120,7 @@ static bool unix_detect_overcommit(void) {
|
|||
os_overcommit = (val != 0);
|
||||
}
|
||||
#else
|
||||
// default: overcommit is true
|
||||
// default: overcommit is true
|
||||
#endif
|
||||
return os_overcommit;
|
||||
}
|
||||
|
@ -168,12 +168,12 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
|
|||
size_t n = mi_bsr(try_alignment);
|
||||
if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
|
||||
p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
|
||||
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
|
||||
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
|
||||
int err = errno;
|
||||
_mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
|
||||
}
|
||||
if (p!=MAP_FAILED) return p;
|
||||
// fall back to regular mmap
|
||||
// fall back to regular mmap
|
||||
}
|
||||
}
|
||||
#elif defined(MAP_ALIGN) // Solaris
|
||||
|
@ -189,7 +189,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
|
|||
void* hint = _mi_os_get_aligned_hint(try_alignment, size);
|
||||
if (hint != NULL) {
|
||||
p = mmap(hint, size, protect_flags, flags, fd, 0);
|
||||
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
|
||||
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
|
||||
#if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
|
||||
int err = 0;
|
||||
#else
|
||||
|
@ -198,7 +198,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
|
|||
_mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
|
||||
}
|
||||
if (p!=MAP_FAILED) return p;
|
||||
// fall back to regular mmap
|
||||
// fall back to regular mmap
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -327,9 +327,9 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
|
|||
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
|
||||
mi_assert_internal(commit || !allow_large);
|
||||
mi_assert_internal(try_alignment > 0);
|
||||
|
||||
|
||||
*is_zero = true;
|
||||
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
|
||||
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
|
||||
*addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
|
||||
return (*addr != NULL ? 0 : errno);
|
||||
}
|
||||
|
@ -357,19 +357,19 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
|
|||
// was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but
|
||||
// we sometimes call commit on a range with still partially committed
|
||||
// memory and `mprotect` does not zero the range.
|
||||
*is_zero = false;
|
||||
*is_zero = false;
|
||||
int err = mprotect(start, size, (PROT_READ | PROT_WRITE));
|
||||
if (err != 0) {
|
||||
err = errno;
|
||||
if (err != 0) {
|
||||
err = errno;
|
||||
unix_mprotect_hint(err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
|
||||
int err = 0;
|
||||
int err = 0;
|
||||
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
|
||||
err = unix_madvise(start, size, MADV_DONTNEED);
|
||||
err = unix_madvise(start, size, MADV_DONTNEED);
|
||||
#if !MI_DEBUG && !MI_SECURE
|
||||
*needs_recommit = false;
|
||||
#else
|
||||
|
@ -381,15 +381,15 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
|
|||
*needs_recommit = true;
|
||||
const int fd = unix_mmap_fd();
|
||||
void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
|
||||
if (p != start) { err = errno; }
|
||||
if (p != start) { err = errno; }
|
||||
*/
|
||||
return err;
|
||||
}
|
||||
|
||||
int _mi_prim_reset(void* start, size_t size) {
|
||||
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
|
||||
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
|
||||
// will not reduce the `rss` stats in tools like `top` even though the memory is available
|
||||
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
|
||||
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
|
||||
// default `MADV_DONTNEED` is used though.
|
||||
#if defined(MADV_FREE)
|
||||
static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
|
||||
|
@ -409,7 +409,7 @@ int _mi_prim_reset(void* start, size_t size) {
|
|||
|
||||
int _mi_prim_protect(void* start, size_t size, bool protect) {
|
||||
int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
|
||||
if (err != 0) { err = errno; }
|
||||
if (err != 0) { err = errno; }
|
||||
unix_mprotect_hint(err);
|
||||
return err;
|
||||
}
|
||||
|
@ -450,7 +450,7 @@ int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bo
|
|||
if (err != 0) {
|
||||
err = errno;
|
||||
_mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (*addr != NULL ? 0 : errno);
|
||||
}
|
||||
|
@ -565,9 +565,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
|
|||
// low resolution timer
|
||||
mi_msecs_t _mi_prim_clock_now(void) {
|
||||
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
|
||||
return (mi_msecs_t)clock();
|
||||
return (mi_msecs_t)clock();
|
||||
#elif (CLOCKS_PER_SEC < 1000)
|
||||
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
|
||||
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
|
||||
#else
|
||||
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
|
||||
#endif
|
||||
|
@ -607,7 +607,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
|
|||
pinfo->stime = timeval_secs(&rusage.ru_stime);
|
||||
#if !defined(__HAIKU__)
|
||||
pinfo->page_faults = rusage.ru_majflt;
|
||||
#endif
|
||||
#endif
|
||||
#if defined(__HAIKU__)
|
||||
// Haiku does not have (yet?) a way to
|
||||
// get these stats per process
|
||||
|
@ -642,7 +642,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
|
|||
|
||||
#else
|
||||
|
||||
#ifndef __wasi__
|
||||
#ifndef __wasm__
|
||||
// WebAssembly instances are not processes
|
||||
#pragma message("define a way to get process info")
|
||||
#endif
|
||||
|
@ -748,7 +748,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
|
|||
|
||||
#elif defined(__ANDROID__) || defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
|
||||
defined(__sun)
|
||||
defined(__sun)
|
||||
|
||||
#include <stdlib.h>
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
|
||||
|
@ -840,7 +840,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
|
|||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#else
|
||||
|
||||
void _mi_prim_thread_init_auto_done(void) {
|
||||
// nothing
|
||||
|
|
|
@ -7,6 +7,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
// This file is included in `src/prim/prim.c`
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
|
@ -19,7 +23,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
|
||||
config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
|
||||
config->alloc_granularity = 16;
|
||||
config->has_overcommit = false;
|
||||
config->has_overcommit = false;
|
||||
config->must_free_whole = true;
|
||||
config->has_virtual_reserve = false;
|
||||
}
|
||||
|
@ -30,32 +34,23 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
|
|||
|
||||
int _mi_prim_free(void* addr, size_t size ) {
|
||||
MI_UNUSED(addr); MI_UNUSED(size);
|
||||
// wasi heap cannot be shrunk
|
||||
// wasm linear memory cannot be shrunk
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
//---------------------------------------------
|
||||
// Allocation: sbrk or memory_grow
|
||||
// Allocation: sbrk
|
||||
//---------------------------------------------
|
||||
|
||||
#if defined(MI_USE_SBRK)
|
||||
static void* mi_memory_grow( size_t size ) {
|
||||
void* p = sbrk(size);
|
||||
if (p == (void*)(-1)) return NULL;
|
||||
#if !defined(__wasi__) // on wasi this is always zero initialized already (?)
|
||||
memset(p,0,size);
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
#elif defined(__wasi__)
|
||||
static void* mi_memory_grow( size_t size ) {
|
||||
size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size()))
|
||||
: __builtin_wasm_memory_size(0));
|
||||
if (base == SIZE_MAX) return NULL;
|
||||
return (void*)(base * _mi_os_page_size());
|
||||
}
|
||||
static void* mi_memory_grow( size_t size ) {
|
||||
void* p = sbrk(size);
|
||||
if (p == (void*)(-1)) return NULL;
|
||||
#if !defined(__wasm__) // on wasm this is always zero initialized already.
|
||||
memset(p,0,size);
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
|
||||
#if defined(MI_USE_PTHREADS)
|
||||
static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
@ -96,7 +91,7 @@ static void* mi_prim_mem_grow(size_t size, size_t try_alignment) {
|
|||
if (base != NULL) {
|
||||
p = mi_align_up_ptr(base, try_alignment);
|
||||
if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) {
|
||||
// another thread used wasm_memory_grow/sbrk in-between and we do not have enough
|
||||
// another thread used sbrk in-between and we do not have enough
|
||||
// space after alignment. Give up (and waste the space as we cannot shrink :-( )
|
||||
// (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align)
|
||||
p = NULL;
|
||||
|
@ -105,7 +100,7 @@ static void* mi_prim_mem_grow(size_t size, size_t try_alignment) {
|
|||
}
|
||||
/*
|
||||
if (p == NULL) {
|
||||
_mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment);
|
||||
_mi_warning_message("unable to allocate sbrk OS memory (%zu bytes, %zu alignment)\n", size, try_alignment);
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -114,7 +109,7 @@ static void* mi_prim_mem_grow(size_t size, size_t try_alignment) {
|
|||
return p;
|
||||
}
|
||||
|
||||
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
|
||||
// Note: the `try_alignment` argument is respected by over-allocating.
|
||||
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
|
||||
MI_UNUSED(allow_large); MI_UNUSED(commit);
|
||||
*is_large = false;
|
||||
|
@ -129,7 +124,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la
|
|||
//---------------------------------------------
|
||||
|
||||
int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
|
||||
MI_UNUSED(addr); MI_UNUSED(size);
|
||||
MI_UNUSED(addr); MI_UNUSED(size);
|
||||
*is_zero = false;
|
||||
return 0;
|
||||
}
|
||||
|
@ -194,9 +189,9 @@ mi_msecs_t _mi_prim_clock_now(void) {
|
|||
// low resolution timer
|
||||
mi_msecs_t _mi_prim_clock_now(void) {
|
||||
#if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
|
||||
return (mi_msecs_t)clock();
|
||||
return (mi_msecs_t)clock();
|
||||
#elif (CLOCKS_PER_SEC < 1000)
|
||||
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
|
||||
return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
|
||||
#else
|
||||
return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
|
||||
#endif
|
||||
|
@ -254,7 +249,8 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
|
|||
//----------------------------------------------------------------
|
||||
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
|
||||
return false;
|
||||
int r = getentropy(buf, buf_len);
|
||||
return r == 0;
|
||||
}
|
||||
|
||||
|
|
@ -160,7 +160,7 @@ If we cannot get good randomness, we fall back to weak randomness based on a tim
|
|||
|
||||
uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
|
||||
uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
|
||||
x ^= _mi_prim_clock_now();
|
||||
x ^= _mi_prim_clock_now();
|
||||
// and do a few randomization steps
|
||||
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
|
||||
for (uintptr_t i = 0; i < max; i++) {
|
||||
|
@ -175,7 +175,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
|
|||
if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) {
|
||||
// if we fail to get random data from the OS, we fall back to a
|
||||
// weak random source based on the current time
|
||||
#if !defined(__wasi__)
|
||||
#if !defined(__wasm__)
|
||||
if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); }
|
||||
#endif
|
||||
uintptr_t x = _mi_os_random_weak(0);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue