merge from dev

This commit is contained in:
Daan 2024-03-02 17:26:38 -08:00
commit 7ff4607f6c
16 changed files with 444 additions and 124 deletions

1
.gitattributes vendored
View file

@ -10,3 +10,4 @@
*.dll binary *.dll binary
*.lib binary *.lib binary
*.exe binary *.exe binary
bin export-ignore

View file

@ -28,6 +28,7 @@ option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clan
option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF) option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF)
option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF) option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF)
option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF) option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF)
option(MI_NO_THP "Disable transparent huge pages support on Linux/Android for the mimalloc process only" OFF)
# deprecated options # deprecated options
option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF) option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
@ -128,7 +129,7 @@ endif()
if(MI_SECURE) if(MI_SECURE)
message(STATUS "Set full secure build (MI_SECURE=ON)") message(STATUS "Set full secure build (MI_SECURE=ON)")
list(APPEND mi_defines MI_SECURE=4) list(APPEND mi_defines MI_SECURE=4)
endif() endif()
if(MI_TRACK_VALGRIND) if(MI_TRACK_VALGRIND)
@ -247,7 +248,7 @@ if(MI_DEBUG_UBSAN)
message(WARNING "Can only use undefined-behavior sanitizer with clang++ (MI_DEBUG_UBSAN=ON but ignored)") message(WARNING "Can only use undefined-behavior sanitizer with clang++ (MI_DEBUG_UBSAN=ON but ignored)")
endif() endif()
else() else()
message(WARNING "Can only use thread sanitizer with a debug build (CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE})") message(WARNING "Can only use undefined-behavior sanitizer with a debug build (CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE})")
endif() endif()
endif() endif()
@ -263,10 +264,18 @@ if(MI_USE_CXX)
endif() endif()
endif() endif()
if(CMAKE_SYSTEM_NAME MATCHES "Haiku") if(CMAKE_SYSTEM_NAME MATCHES "Linux|Android")
SET(CMAKE_INSTALL_LIBDIR ~/config/non-packaged/lib) if(MI_NO_THP)
SET(CMAKE_INSTALL_INCLUDEDIR ~/config/non-packaged/headers) message(STATUS "Disable transparent huge pages support (MI_NO_THP=ON)")
endif() list(APPEND mi_defines MI_NO_THP=1)
endif()
endif()
# On Haiku use `-DCMAKE_INSTALL_PREFIX` instead, issue #788
# if(CMAKE_SYSTEM_NAME MATCHES "Haiku")
# SET(CMAKE_INSTALL_LIBDIR ~/config/non-packaged/lib)
# SET(CMAKE_INSTALL_INCLUDEDIR ~/config/non-packaged/headers)
# endif()
# Compiler flags # Compiler flags
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU") if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
@ -468,7 +477,7 @@ if (MI_BUILD_OBJECT)
set(mimalloc-obj-static "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}") set(mimalloc-obj-static "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}")
set(mimalloc-obj-out "${CMAKE_CURRENT_BINARY_DIR}/${mi_basename}${CMAKE_C_OUTPUT_EXTENSION}") set(mimalloc-obj-out "${CMAKE_CURRENT_BINARY_DIR}/${mi_basename}${CMAKE_C_OUTPUT_EXTENSION}")
add_custom_command(OUTPUT ${mimalloc-obj-out} DEPENDS mimalloc-obj COMMAND "${CMAKE_COMMAND}" -E copy "${mimalloc-obj-static}" "${mimalloc-obj-out}") add_custom_command(OUTPUT ${mimalloc-obj-out} DEPENDS mimalloc-obj COMMAND "${CMAKE_COMMAND}" -E copy "${mimalloc-obj-static}" "${mimalloc-obj-out}")
add_custom_target(mimalloc-obj-target ALL DEPENDS ${mimalloc-obj-out}) add_custom_target(mimalloc-obj-target ALL DEPENDS ${mimalloc-obj-out})
endif() endif()
# the following seems to lead to cmake warnings/errors on some systems, disable for now :-( # the following seems to lead to cmake warnings/errors on some systems, disable for now :-(

View file

@ -30,14 +30,17 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_noinline __declspec(noinline) #define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread) #define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) #define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
#define mi_decl_weak
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc #elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline)) #define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread #define mi_decl_thread __thread
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE))) #define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
#define mi_decl_weak __attribute__((weak))
#else #else
#define mi_decl_noinline #define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-) #define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align #define mi_decl_cache_align
#define mi_decl_weak
#endif #endif
#if defined(__EMSCRIPTEN__) && !defined(__wasi__) #if defined(__EMSCRIPTEN__) && !defined(__wasi__)
@ -311,6 +314,17 @@ static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
} }
} }
// Align a pointer upwards
static inline void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
}
// Align a pointer downwards
static inline void* mi_align_down_ptr(void* p, size_t alignment) {
return (void*)_mi_align_down((uintptr_t)p, alignment);
}
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`. // Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
mi_assert_internal(divider != 0); mi_assert_internal(divider != 0);

View file

@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config );
// Free OS memory // Free OS memory
int _mi_prim_free(void* addr, size_t size ); int _mi_prim_free(void* addr, size_t size );
// Allocate OS memory. Return NULL on error. // Allocate OS memory. Return NULL on error.
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. // The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
// If `commit` is false, the virtual memory range only needs to be reserved (with no access) // If `commit` is false, the virtual memory range only needs to be reserved (with no access)
// which will later be committed explicitly using `_mi_prim_commit`. // which will later be committed explicitly using `_mi_prim_commit`.
// `is_zero` is set to true if the memory was zero initialized (as on most OS's) // `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: !commit => !allow_large // pre: !commit => !allow_large
@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void);
typedef struct mi_process_info_s { typedef struct mi_process_info_s {
mi_msecs_t elapsed; mi_msecs_t elapsed;
mi_msecs_t utime; mi_msecs_t utime;
mi_msecs_t stime; mi_msecs_t stime;
size_t current_rss; size_t current_rss;
size_t peak_rss; size_t peak_rss;
size_t current_commit; size_t current_commit;
size_t peak_commit; size_t peak_commit;
size_t page_faults; size_t page_faults;
} mi_process_info_t; } mi_process_info_t;
@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Thread id: `_mi_prim_thread_id()` // Thread id: `_mi_prim_thread_id()`
// //
// Getting the thread id should be performant as it is called in the // Getting the thread id should be performant as it is called in the
// fast path of `_mi_free` and we specialize for various platforms as // fast path of `_mi_free` and we specialize for various platforms as
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. // inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
@ -125,33 +125,23 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
// for each thread (unequal to zero). // for each thread (unequal to zero).
//------------------------------------------------------------------- //-------------------------------------------------------------------
// defined in `init.c`; do not use these directly // On some libc + platform combinations we can directly access a thread-local storage (TLS) slot.
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from // The TLS layout depends on both the OS and libc implementation so we use specific tests for each main platform.
extern bool _mi_process_is_initialized; // has mi_process_init been called?
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
// Windows: works on Intel and ARM in both 32- and 64-bit
return (uintptr_t)NtCurrentTeb();
}
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
// both the OS and libc implementation so we use specific tests for each main platform.
// If you test on another platform and it works please send a PR :-) // If you test on another platform and it works please send a PR :-)
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. // see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
#elif defined(__GNUC__) && ( \ //
// Note: on most platforms this is not actually used anymore as we prefer `__builtin_thread_pointer()` nowadays.
// However, we do still use it with older clang compilers and Apple OS (as we use TLS slot for the default heap there).
#if defined(__GNUC__) && ( \
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \ || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
) )
#define MI_HAS_TLS_SLOT
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
void* res; void* res;
const size_t ofs = (slot*sizeof(void*)); const size_t ofs = (slot*sizeof(void*));
@ -175,6 +165,9 @@ static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#endif #endif
res = tcb[slot]; res = tcb[slot];
#elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781
MI_UNUSED(ofs);
res = pthread_getspecific(slot);
#endif #endif
return res; return res;
} }
@ -202,9 +195,40 @@ static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexce
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#endif #endif
tcb[slot] = value; tcb[slot] = value;
#elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781
MI_UNUSED(ofs);
pthread_setspecific(slot, value);
#endif #endif
} }
#endif
// defined in `init.c`; do not use these directly
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
extern bool _mi_process_is_initialized; // has mi_process_init been called?
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
// Windows: works on Intel and ARM in both 32- and 64-bit
return (uintptr_t)NtCurrentTeb();
}
#elif defined(__has_builtin) && __has_builtin(__builtin_thread_pointer) && \
(!defined(__APPLE__)) && /* on apple (M1) the wrong register is read (tpidr_el0 instead of tpidrro_el0) so fall back to TLS slot assembly (<https://github.com/microsoft/mimalloc/issues/343#issuecomment-763272369>)*/ \
(!defined(__clang_major__) || __clang_major__ >= 14) // older clang versions emit bad code; fall back to using the TLS slot (<https://lore.kernel.org/linux-arm-kernel/202110280952.352F66D8@keescook/T/>)
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
// Works on most Unix based platforms
return (uintptr_t)__builtin_thread_pointer();
}
#elif defined(MI_HAS_TLS_SLOT)
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
#if defined(__BIONIC__) #if defined(__BIONIC__)
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
@ -251,7 +275,6 @@ static inline mi_heap_t* mi_prim_get_default_heap(void);
#if defined(MI_MALLOC_OVERRIDE) #if defined(MI_MALLOC_OVERRIDE)
#if defined(__APPLE__) // macOS #if defined(__APPLE__) // macOS
#define MI_TLS_SLOT 89 // seems unused? #define MI_TLS_SLOT 89 // seems unused?
// #define MI_TLS_RECURSE_GUARD 1
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h> // see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
#elif defined(__OpenBSD__) #elif defined(__OpenBSD__)
@ -269,6 +292,9 @@ static inline mi_heap_t* mi_prim_get_default_heap(void);
#if defined(MI_TLS_SLOT) #if defined(MI_TLS_SLOT)
# if !defined(MI_HAS_TLS_SLOT)
# error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined"
# endif
static inline mi_heap_t* mi_prim_get_default_heap(void) { static inline mi_heap_t* mi_prim_get_default_heap(void) {
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);

View file

@ -259,10 +259,11 @@ extern "C" {
// no forwarding here due to aliasing/name mangling issues // no forwarding here due to aliasing/name mangling issues
void cfree(void* p) { mi_free(p); } void cfree(void* p) { mi_free(p); }
void* pvalloc(size_t size) { return mi_pvalloc(size); } void* pvalloc(size_t size) { return mi_pvalloc(size); }
void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); }
int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); }
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); }
// some systems define reallocarr so mark it as a weak symbol (#751)
mi_decl_weak int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); }
#if defined(__wasi__) #if defined(__wasi__)
// forward __libc interface (see PR #667) // forward __libc interface (see PR #667)

View file

@ -913,9 +913,13 @@ static bool mi_try_new_handler(bool nothrow) {
#endif #endif
if (h==NULL) { if (h==NULL) {
_mi_error_message(ENOMEM, "out of memory in 'new'"); _mi_error_message(ENOMEM, "out of memory in 'new'");
#if defined(_CPPUNWIND) || defined(__cpp_exceptions) // exceptions are not always enabled
if (!nothrow) { if (!nothrow) {
throw std::bad_alloc(); throw std::bad_alloc();
} }
#else
MI_UNUSED(nothrow);
#endif
return false; return false;
} }
else { else {

View file

@ -13,7 +13,7 @@ threads and need to be accessed using atomic operations.
Arenas are used to for huge OS page (1GiB) reservations or for reserving Arenas are used to for huge OS page (1GiB) reservations or for reserving
OS memory upfront which can be improve performance or is sometimes needed OS memory upfront which can be improve performance or is sometimes needed
on embedded devices. We can also employ this with WASI or `sbrk` systems on embedded devices. We can also employ this with WASI or `sbrk` systems
to reserve large arenas upfront and be able to reuse the memory more effectively. to reserve large arenas upfront and be able to reuse the memory more effectively.
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
@ -48,13 +48,13 @@ typedef struct mi_arena_s {
size_t meta_size; // size of the arena structure itself (including its bitmaps) size_t meta_size; // size of the arena structure itself (including its bitmaps)
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node int numa_node; // associated NUMA node
bool exclusive; // only allow allocations if specifically for this arena bool exclusive; // only allow allocations if specifically for this arena
bool is_large; // memory area consists of large- or huge OS pages (always committed) bool is_large; // memory area consists of large- or huge OS pages (always committed)
_Atomic(size_t) search_idx; // optimization to start the search for free blocks _Atomic(size_t) search_idx; // optimization to start the search for free blocks
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
} mi_arena_t; } mi_arena_t;
@ -94,7 +94,7 @@ bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_i
return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id); return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
} }
else { else {
return mi_arena_id_is_suitable(0, false, request_arena_id); return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id);
} }
} }
@ -103,7 +103,7 @@ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index. lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */ ----------------------------------------------------------- */
@ -165,6 +165,7 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m
// success // success
*memid = _mi_memid_create(MI_MEM_STATIC); *memid = _mi_memid_create(MI_MEM_STATIC);
memid->initially_zero = true;
const size_t start = _mi_align_up(oldtop, alignment); const size_t start = _mi_align_up(oldtop, alignment);
uint8_t* const p = &mi_arena_static[start]; uint8_t* const p = &mi_arena_static[start];
_mi_memzero(p, size); _mi_memzero(p, size);
@ -182,8 +183,10 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st
p = _mi_os_alloc(size, memid, stats); p = _mi_os_alloc(size, memid, stats);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// zero the OS memory if needed
if (!memid->initially_zero) { if (!memid->initially_zero) {
_mi_memzero_aligned(p, size); _mi_memzero_aligned(p, size);
memid->initially_zero = true;
} }
return p; return p;
} }
@ -211,7 +214,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
{ {
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) { if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true; return true;
}; };
return false; return false;
@ -231,7 +234,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
mi_bitmap_index_t bitmap_index; mi_bitmap_index_t bitmap_index;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL; if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it! // claimed it!
void* p = mi_arena_block_start(arena, bitmap_index); void* p = mi_arena_block_start(arena, bitmap_index);
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index); *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
memid->is_pinned = arena->memid.is_pinned; memid->is_pinned = arena->memid.is_pinned;
@ -271,21 +274,21 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// no need to commit, but check if already fully committed // no need to commit, but check if already fully committed
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
} }
return p; return p;
} }
// allocate in a speficic arena // allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
MI_UNUSED_RELEASE(alignment); MI_UNUSED_RELEASE(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t bcount = mi_block_count_of_size(size); const size_t bcount = mi_block_count_of_size(size);
const size_t arena_index = mi_arena_id_index(arena_id); const size_t arena_index = mi_arena_id_index(arena_id);
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count)); mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
mi_assert_internal(size <= mi_arena_block_size(bcount)); mi_assert_internal(size <= mi_arena_block_size(bcount));
// Check arena suitability // Check arena suitability
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
if (arena == NULL) return NULL; if (arena == NULL) return NULL;
@ -305,7 +308,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS // allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
@ -313,9 +316,9 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
if mi_likely(max_arena == 0) return NULL; if mi_likely(max_arena == 0) return NULL;
if (req_arena_id != _mi_arena_id_none()) { if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested // try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) { if (mi_arena_id_index(req_arena_id) < max_arena) {
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
@ -323,7 +326,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
} }
else { else {
// try numa affine allocation // try numa affine allocation
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
} }
@ -351,22 +354,22 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
if (arena_reserve == 0) return false; if (arena_reserve == 0) return false;
if (!_mi_os_has_virtual_reserve()) { if (!_mi_os_has_virtual_reserve()) {
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example) arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
} }
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
if (arena_count >= 8 && arena_count <= 128) { if (arena_count >= 8 && arena_count <= 128) {
arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
} }
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
// commit eagerly? // commit eagerly?
bool arena_commit = false; bool arena_commit = false;
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0); return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
} }
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
@ -381,9 +384,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena // otherwise, try to first eagerly reserve a new arena
if (req_arena_id == _mi_arena_id_none()) { if (req_arena_id == _mi_arena_id_none()) {
mi_arena_id_t arena_id = 0; mi_arena_id_t arena_id = 0;
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
@ -400,14 +403,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
errno = ENOMEM; errno = ENOMEM;
return NULL; return NULL;
} }
// finally, fall back to the OS // finally, fall back to the OS
if (align_offset > 0) { if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
} }
else { else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
} }
} }
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
@ -443,22 +446,22 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned); mi_assert_internal(!arena->memid.is_pinned);
const size_t size = mi_arena_block_size(blocks); const size_t size = mi_arena_block_size(blocks);
void* const p = mi_arena_block_start(arena, bitmap_idx); void* const p = mi_arena_block_start(arena, bitmap_idx);
bool needs_recommit; bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely // all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats); needs_recommit = _mi_os_purge(p, size, stats);
} }
else { else {
// some blocks are not committed -- this can happen when a partially committed block is freed // some blocks are not committed -- this can happen when a partially committed block is freed
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted) // and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
_mi_stat_increase(&stats->committed, size); _mi_stat_increase(&stats->committed, size);
} }
// clear the purged blocks // clear the purged blocks
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx); _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
// update committed bitmap // update committed bitmap
@ -476,13 +479,13 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
if (_mi_preloading() || delay == 0) { if (_mi_preloading() || delay == 0) {
// decommit directly // decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats); mi_arena_purge(arena, bitmap_idx, blocks, stats);
} }
else { else {
// schedule decommit // schedule decommit
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (expire != 0) { if (expire != 0) {
mi_atomic_addi64_acq_rel(&arena->purge_expire, delay/10); // add smallish extra delay mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10)); // add smallish extra delay
} }
else { else {
mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay); mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
@ -518,7 +521,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
} }
// returns true if anything was purged // returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
{ {
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
@ -526,11 +529,11 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
if (!force && expire > now) return false; if (!force && expire > now) return false;
// reset expire (if not already set concurrently) // reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0); mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
// potential purges scheduled, walk through the bitmap // potential purges scheduled, walk through the bitmap
bool any_purged = false; bool any_purged = false;
bool full_purge = true; bool full_purge = true;
for (size_t i = 0; i < arena->field_count; i++) { for (size_t i = 0; i < arena->field_count; i++) {
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]); size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
if (purge != 0) { if (purge != 0) {
@ -581,7 +584,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
// allow only one thread to purge at a time // allow only one thread to purge at a time
static mi_atomic_guard_t purge_guard; static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard) mi_atomic_guard(&purge_guard)
{ {
mi_msecs_t now = _mi_clock_now(); mi_msecs_t now = _mi_clock_now();
size_t max_purge_count = (visit_all ? max_arena : 1); size_t max_purge_count = (visit_all ? max_arena : 1);
@ -594,7 +597,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
} }
} }
} }
} }
} }
@ -608,7 +611,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
if (p==NULL) return; if (p==NULL) return;
if (size==0) return; if (size==0) return;
const bool all_committed = (committed_size == size); const bool all_committed = (committed_size == size);
if (mi_memkind_is_os(memid.memkind)) { if (mi_memkind_is_os(memid.memkind)) {
// was a direct OS allocation, pass through // was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) { if (!all_committed && committed_size > 0) {
@ -626,7 +629,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL); mi_assert_internal(arena != NULL);
const size_t blocks = mi_block_count_of_size(size); const size_t blocks = mi_block_count_of_size(size);
// checks // checks
if (arena == NULL) { if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
@ -648,7 +651,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
else { else {
mi_assert_internal(arena->blocks_committed != NULL); mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(arena->blocks_purge != NULL);
if (!all_committed) { if (!all_committed) {
// mark the entire range as no longer committed (so we recommit the full range when re-using) // mark the entire range as no longer committed (so we recommit the full range when re-using)
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
@ -663,9 +666,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts). // works (as we should never reset decommitted parts).
} }
// (delay) purge the entire range // (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
} }
// and make it available to others again // and make it available to others again
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!all_inuse) { if (!all_inuse) {
@ -690,9 +693,9 @@ static void mi_arenas_unsafe_destroy(void) {
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) { if (arena != NULL) {
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
} }
else { else {
new_max_arena = i; new_max_arena = i;
@ -715,7 +718,7 @@ void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
// for dynamic libraries that are unloaded and need to release all their allocated memory. // for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
mi_arenas_unsafe_destroy(); mi_arenas_unsafe_destroy();
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas _mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
} }
// Is a pointer inside any of our arenas? // Is a pointer inside any of our arenas?
@ -723,8 +726,8 @@ bool _mi_arena_contains(const void* p) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
return true; return true;
} }
} }
return false; return false;
@ -768,7 +771,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
mi_memid_t meta_memid; mi_memid_t meta_memid;
mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
if (arena == NULL) return false; if (arena == NULL) return false;
// already zero'd due to os_alloc // already zero'd due to os_alloc
// _mi_memzero(arena, asize); // _mi_memzero(arena, asize);
arena->id = _mi_arena_id_none(); arena->id = _mi_arena_id_none();
@ -785,12 +788,12 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
arena->search_idx = 0; arena->search_idx = 0;
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
// initialize committed bitmap? // initialize committed bitmap?
if (arena->blocks_committed != NULL && arena->memid.initially_committed) { if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
} }
// and claim leftover blocks if needed (so we never allocate there) // and claim leftover blocks if needed (so we never allocate there)
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
mi_assert_internal(post >= 0); mi_assert_internal(post >= 0);

View file

@ -455,7 +455,7 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
#if defined(MI_TLS_SLOT) #if defined(MI_TLS_SLOT)
mi_prim_tls_slot_set(MI_TLS_SLOT,heap); mi_prim_tls_slot_set(MI_TLS_SLOT,heap);
#elif defined(MI_TLS_PTHREAD_SLOT_OFS) #elif defined(MI_TLS_PTHREAD_SLOT_OFS)
*mi_tls_pthread_heap_slot() = heap; *mi_prim_tls_pthread_heap_slot() = heap;
#elif defined(MI_TLS_PTHREAD) #elif defined(MI_TLS_PTHREAD)
// we use _mi_heap_default_key // we use _mi_heap_default_key
#else #else

View file

@ -73,14 +73,6 @@ void _mi_os_init(void) {
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats); bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
}
static void* mi_align_down_ptr(void* p, size_t alignment) {
return (void*)_mi_align_down((uintptr_t)p, alignment);
}
/* ----------------------------------------------------------- /* -----------------------------------------------------------
aligned hinting aligned hinting

251
src/prim/emscripten/prim.c Normal file
View file

@ -0,0 +1,251 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen, Alon Zakai
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
// This file is included in `src/prim/prim.c`
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/atomic.h"
#include "mimalloc/prim.h"
// Design
// ======
//
// mimalloc is built on top of emmalloc. emmalloc is a minimal allocator on top
// of sbrk. The reason for having three layers here is that we want mimalloc to
// be able to allocate and release system memory properly, the same way it would
// when using VirtualAlloc on Windows or mmap on POSIX, and sbrk is too limited.
// Specifically, sbrk can only go up and down, and not "skip" over regions, and
// so we end up either never freeing memory to the system, or we can get stuck
// with holes.
//
// Atm wasm generally does *not* free memory back the system: once grown, we do
// not shrink back down (https://github.com/WebAssembly/design/issues/1397).
// However, that is expected to improve
// (https://github.com/WebAssembly/memory-control/blob/main/proposals/memory-control/Overview.md)
// and so we do not want to bake those limitations in here.
//
// Even without that issue, we want our system allocator to handle holes, that
// is, it should merge freed regions and allow allocating new content there of
// the full size, etc., so that we do not waste space. That means that the
// system allocator really does need to handle the general problem of allocating
// and freeing variable-sized chunks of memory in a random order, like malloc/
// free do. And so it makes sense to layer mimalloc on top of such an
// implementation.
//
// emmalloc makes sense for the lower level because it is small and simple while
// still fully handling merging of holes etc. It is not the most efficient
// allocator, but our assumption is that mimalloc needs to be fast while the
// system allocator underneath it is called much less frequently.
//
//---------------------------------------------
// init
//---------------------------------------------
void _mi_prim_mem_init( mi_os_mem_config_t* config) {
config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
config->alloc_granularity = 16;
config->has_overcommit = false;
config->must_free_whole = true;
config->has_virtual_reserve = false;
}
extern void emmalloc_free(void*);
int _mi_prim_free(void* addr, size_t size) {
MI_UNUSED(size);
emmalloc_free(addr);
return 0;
}
//---------------------------------------------
// Allocation
//---------------------------------------------
extern void* emmalloc_memalign(size_t, size_t);
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
MI_UNUSED(try_alignment); MI_UNUSED(allow_large); MI_UNUSED(commit);
*is_large = false;
// TODO: Track the highest address ever seen; first uses of it are zeroes.
// That assumes no one else uses sbrk but us (they could go up,
// scribble, and then down), but we could assert on that perhaps.
*is_zero = false;
// emmalloc has some limitations on alignment size.
// TODO: Why does mimalloc ask for an align of 4MB? that ends up allocating
// 8, which wastes quite a lot for us in wasm. If that is unavoidable,
// we may want to improve emmalloc to support such alignment. See also
// https://github.com/emscripten-core/emscripten/issues/20645
#define MIN_EMMALLOC_ALIGN 8
#define MAX_EMMALLOC_ALIGN (1024*1024)
if (try_alignment < MIN_EMMALLOC_ALIGN) {
try_alignment = MIN_EMMALLOC_ALIGN;
} else if (try_alignment > MAX_EMMALLOC_ALIGN) {
try_alignment = MAX_EMMALLOC_ALIGN;
}
void* p = emmalloc_memalign(try_alignment, size);
*addr = p;
if (p == 0) {
return ENOMEM;
}
return 0;
}
//---------------------------------------------
// Commit/Reset
//---------------------------------------------
int _mi_prim_commit(void* addr, size_t size, bool* is_zero) {
MI_UNUSED(addr); MI_UNUSED(size);
// See TODO above.
*is_zero = false;
return 0;
}
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) {
MI_UNUSED(addr); MI_UNUSED(size);
*needs_recommit = false;
return 0;
}
int _mi_prim_reset(void* addr, size_t size) {
MI_UNUSED(addr); MI_UNUSED(size);
return 0;
}
int _mi_prim_protect(void* addr, size_t size, bool protect) {
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect);
return 0;
}
//---------------------------------------------
// Huge pages and NUMA nodes
//---------------------------------------------
int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) {
MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node);
*is_zero = true;
*addr = NULL;
return ENOSYS;
}
size_t _mi_prim_numa_node(void) {
return 0;
}
size_t _mi_prim_numa_node_count(void) {
return 1;
}
//----------------------------------------------------------------
// Clock
//----------------------------------------------------------------
#include <emscripten/html5.h>
mi_msecs_t _mi_prim_clock_now(void) {
return emscripten_date_now();
}
//----------------------------------------------------------------
// Process info
//----------------------------------------------------------------
void _mi_prim_process_info(mi_process_info_t* pinfo)
{
// use defaults
MI_UNUSED(pinfo);
}
//----------------------------------------------------------------
// Output
//----------------------------------------------------------------
#include <emscripten/console.h>
void _mi_prim_out_stderr( const char* msg) {
emscripten_console_error(msg);
}
//----------------------------------------------------------------
// Environment
//----------------------------------------------------------------
bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
// For code size reasons, do not support environ customization for now.
MI_UNUSED(name);
MI_UNUSED(result);
MI_UNUSED(result_size);
return false;
}
//----------------------------------------------------------------
// Random
//----------------------------------------------------------------
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
int err = getentropy(buf, buf_len);
return !err;
}
//----------------------------------------------------------------
// Thread init/done
//----------------------------------------------------------------
#ifdef __EMSCRIPTEN_SHARED_MEMORY__
// use pthread local storage keys to detect thread ending
// (and used with MI_TLS_PTHREADS for the default heap)
pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1);
static void mi_pthread_done(void* value) {
if (value!=NULL) {
_mi_thread_done((mi_heap_t*)value);
}
}
void _mi_prim_thread_init_auto_done(void) {
mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1));
pthread_key_create(&_mi_heap_default_key, &mi_pthread_done);
}
void _mi_prim_thread_done_auto_done(void) {
// nothing to do
}
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD
pthread_setspecific(_mi_heap_default_key, heap);
}
}
#else
void _mi_prim_thread_init_auto_done(void) {
// nothing
}
void _mi_prim_thread_done_auto_done(void) {
// nothing
}
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
MI_UNUSED(heap);
}
#endif

View file

@ -18,6 +18,9 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_USE_SBRK #define MI_USE_SBRK
#include "wasi/prim.c" // memory-grow or sbrk (Wasm) #include "wasi/prim.c" // memory-grow or sbrk (Wasm)
#elif defined(__EMSCRIPTEN__)
#include "emscripten/prim.c" // emmalloc_*, + pthread support
#else #else
#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.) #include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)

View file

@ -27,16 +27,20 @@ terms of the MIT license. A copy of the license can be found in the file
#include <sys/mman.h> // mmap #include <sys/mman.h> // mmap
#include <unistd.h> // sysconf #include <unistd.h> // sysconf
#include <fcntl.h> // open, close, read, access
#if defined(__linux__) #if defined(__linux__)
#include <features.h> #include <features.h>
#include <fcntl.h> #if defined(MI_NO_THP)
#include <sys/prctl.h>
#endif
#if defined(__GLIBC__) #if defined(__GLIBC__)
#include <linux/mman.h> // linux mmap flags #include <linux/mman.h> // linux mmap flags
#else #else
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
#elif defined(__APPLE__) #elif defined(__APPLE__)
#include <AvailabilityMacros.h>
#include <TargetConditionals.h> #include <TargetConditionals.h>
#if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR #if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR
#include <mach/vm_statistics.h> #include <mach/vm_statistics.h>
@ -50,17 +54,19 @@ terms of the MIT license. A copy of the license can be found in the file
#include <sys/sysctl.h> #include <sys/sysctl.h>
#endif #endif
#if !defined(__HAIKU__) && !defined(__APPLE__) && !defined(__CYGWIN__) #if !defined(__HAIKU__) && !defined(__APPLE__) && !defined(__CYGWIN__) && !defined(__OpenBSD__) && !defined(__sun)
#define MI_HAS_SYSCALL_H #define MI_HAS_SYSCALL_H
#include <sys/syscall.h> #include <sys/syscall.h>
#endif #endif
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
// Use syscalls for some primitives to allow for libraries that override open/read/close etc. // Use syscalls for some primitives to allow for libraries that override open/read/close etc.
// and do allocation themselves; using syscalls prevents recursion when mimalloc is // and do allocation themselves; using syscalls prevents recursion when mimalloc is
// still initializing (issue #713) // still initializing (issue #713)
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access) #if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access)
static int mi_prim_open(const char* fpath, int open_flags) { static int mi_prim_open(const char* fpath, int open_flags) {
@ -76,7 +82,7 @@ static int mi_prim_access(const char *fpath, int mode) {
return syscall(SYS_access,fpath,mode); return syscall(SYS_access,fpath,mode);
} }
#elif !defined(__APPLE__) // avoid unused warnings #elif (!defined(__APPLE__) || MAC_OS_X_VERSION_MIN_REQUIRED < 1070) && !defined(__sun) // avoid unused warnings on macOS and Solaris
static int mi_prim_open(const char* fpath, int open_flags) { static int mi_prim_open(const char* fpath, int open_flags) {
return open(fpath,open_flags); return open(fpath,open_flags);
@ -125,7 +131,8 @@ static bool unix_detect_overcommit(void) {
return os_overcommit; return os_overcommit;
} }
void _mi_prim_mem_init( mi_os_mem_config_t* config ) { void _mi_prim_mem_init( mi_os_mem_config_t* config )
{
long psize = sysconf(_SC_PAGESIZE); long psize = sysconf(_SC_PAGESIZE);
if (psize > 0) { if (psize > 0) {
config->page_size = (size_t)psize; config->page_size = (size_t)psize;
@ -135,6 +142,17 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
config->has_overcommit = unix_detect_overcommit(); config->has_overcommit = unix_detect_overcommit();
config->must_free_whole = false; // mmap can free in parts config->must_free_whole = false; // mmap can free in parts
config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE) config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE)
// disable transparent huge pages for this process?
#if defined(MI_NO_THP) && (defined(__linux__) || defined(__ANDROID__))
int val = 0;
if (prctl(PR_GET_THP_DISABLE, &val, 0, 0, 0) != 0) {
// Most likely since distros often come with always/madvise settings.
val = 1;
// Disabling only for mimalloc process rather than touching system wide settings
(void)prctl(PR_SET_THP_DISABLE, &val, 0, 0, 0);
}
#endif
} }
@ -276,7 +294,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
*is_large = true; *is_large = true;
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
#ifdef MAP_HUGE_1GB #ifdef MAP_HUGE_1GB
if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) { if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) {
mi_huge_pages_available = false; // don't try huge 1GiB pages again mi_huge_pages_available = false; // don't try huge 1GiB pages again
_mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno); _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno);
lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
@ -310,7 +328,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
#elif defined(__sun) #elif defined(__sun)
if (allow_large && _mi_os_use_large_page(size, try_alignment)) { if (allow_large && _mi_os_use_large_page(size, try_alignment)) {
struct memcntl_mha cmd = {0}; struct memcntl_mha cmd = {0};
cmd.mha_pagesize = large_os_page_size; cmd.mha_pagesize = _mi_os_large_page_size();
cmd.mha_cmd = MHA_MAPSIZE_VA; cmd.mha_cmd = MHA_MAPSIZE_VA;
if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) { if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
*is_large = true; *is_large = true;
@ -727,28 +745,20 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
// Random // Random
//---------------------------------------------------------------- //----------------------------------------------------------------
#if defined(__APPLE__) #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 && MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
#include <AvailabilityMacros.h>
#if defined(MAC_OS_X_VERSION_10_10) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_10
#include <CommonCrypto/CommonCryptoError.h> #include <CommonCrypto/CommonCryptoError.h>
#include <CommonCrypto/CommonRandom.h> #include <CommonCrypto/CommonRandom.h>
#endif
bool _mi_prim_random_buf(void* buf, size_t buf_len) { bool _mi_prim_random_buf(void* buf, size_t buf_len) {
#if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf
// We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf // may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
// may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html> return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
#else
// fall back on older macOS
arc4random_buf(buf, buf_len);
return true;
#endif
} }
#elif defined(__ANDROID__) || defined(__DragonFly__) || \ #elif defined(__ANDROID__) || defined(__DragonFly__) || \
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
defined(__sun) defined(__sun) || \
(defined(MAC_OS_X_VERSION_10_10) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_10 && MAC_OS_X_VERSION_MIN_REQUIRED >= 1070)
#include <stdlib.h> #include <stdlib.h>
bool _mi_prim_random_buf(void* buf, size_t buf_len) { bool _mi_prim_random_buf(void* buf, size_t buf_len) {
@ -756,11 +766,10 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
return true; return true;
} }
#elif defined(__linux__) || defined(__HAIKU__) #elif defined(__APPLE__) || defined(__linux__) || defined(__HAIKU__) // for old apple versions < 1070 (issue #829)
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <fcntl.h>
#include <errno.h> #include <errno.h>
bool _mi_prim_random_buf(void* buf, size_t buf_len) { bool _mi_prim_random_buf(void* buf, size_t buf_len) {
@ -831,7 +840,9 @@ void _mi_prim_thread_init_auto_done(void) {
} }
void _mi_prim_thread_done_auto_done(void) { void _mi_prim_thread_done_auto_done(void) {
// nothing to do if (_mi_heap_default_key != (pthread_key_t)(-1)) { // do not leak the key, see issue #809
pthread_key_delete(_mi_heap_default_key);
}
} }
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {

View file

@ -12,6 +12,9 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc/atomic.h" #include "mimalloc/atomic.h"
#include "mimalloc/prim.h" #include "mimalloc/prim.h"
#include <stdio.h> // fputs
#include <stdlib.h> // getenv
//--------------------------------------------- //---------------------------------------------
// Initialize // Initialize
//--------------------------------------------- //---------------------------------------------
@ -40,6 +43,8 @@ int _mi_prim_free(void* addr, size_t size ) {
//--------------------------------------------- //---------------------------------------------
#if defined(MI_USE_SBRK) #if defined(MI_USE_SBRK)
#include <unistd.h> // for sbrk
static void* mi_memory_grow( size_t size ) { static void* mi_memory_grow( size_t size ) {
void* p = sbrk(size); void* p = sbrk(size);
if (p == (void*)(-1)) return NULL; if (p == (void*)(-1)) return NULL;

View file

@ -29,6 +29,7 @@ terms of the MIT license. A copy of the license can be found in the file
static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
// note: segment can be invalid or NULL.
mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE? mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE?
if ((uintptr_t)segment >= MI_MAX_ADDRESS) { if ((uintptr_t)segment >= MI_MAX_ADDRESS) {
*bitidx = 0; *bitidx = 0;
@ -70,8 +71,7 @@ void _mi_segment_map_freed_at(const mi_segment_t* segment) {
// Determine the segment belonging to a pointer or NULL if it is not in a valid segment. // Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
static mi_segment_t* _mi_segment_of(const void* p) { static mi_segment_t* _mi_segment_of(const void* p) {
if (p == NULL) return NULL; if (p == NULL) return NULL;
mi_segment_t* segment = _mi_ptr_segment(p); mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL
mi_assert_internal(segment != NULL);
size_t bitidx; size_t bitidx;
size_t index = mi_segment_map_index_of(segment, &bitidx); size_t index = mi_segment_map_index_of(segment, &bitidx);
// fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge

View file

@ -836,7 +836,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) { if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) {
_mi_arena_free(segment,segment_size,0,memid,tld->stats); _mi_arena_free(segment,segment_size,0,memid,tld->stats);
return NULL; return NULL;
} }
} }
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);

View file

@ -108,7 +108,7 @@ static void various_tests() {
auto tbuf = new unsigned char[sizeof(Test)]; auto tbuf = new unsigned char[sizeof(Test)];
t = new (tbuf) Test(42); t = new (tbuf) Test(42);
t->~Test(); t->~Test();
delete tbuf; delete[] tbuf;
} }
class Static { class Static {