mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge from dev
This commit is contained in:
commit
ad47cab97c
142 changed files with 3326 additions and 1819 deletions
|
@ -1,5 +1,5 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -43,6 +43,11 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define mi_decl_externc
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32) && !defined(__wasi__)
|
||||
#define MI_USE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
// "options.c"
|
||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
|
||||
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
|
||||
|
@ -156,8 +161,8 @@ bool _mi_page_is_valid(mi_page_t* page);
|
|||
// ------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define mi_unlikely(x) __builtin_expect((x),0)
|
||||
#define mi_likely(x) __builtin_expect((x),1)
|
||||
#define mi_unlikely(x) __builtin_expect(!!(x),false)
|
||||
#define mi_likely(x) __builtin_expect(!!(x),true)
|
||||
#else
|
||||
#define mi_unlikely(x) (x)
|
||||
#define mi_likely(x) (x)
|
||||
|
@ -249,11 +254,6 @@ static inline size_t _mi_wsize_from_size(size_t size) {
|
|||
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
|
||||
}
|
||||
|
||||
// Does malloc satisfy the alignment constraints already?
|
||||
static inline bool mi_malloc_satisfies_alignment(size_t alignment, size_t size) {
|
||||
return (alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)));
|
||||
}
|
||||
|
||||
// Overflow detecting multiply
|
||||
#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
|
||||
#include <limits.h> // UINT_MAX, ULONG_MAX
|
||||
|
@ -304,7 +304,7 @@ We try to circumvent this in an efficient way:
|
|||
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
|
||||
loader itself calls `malloc` even before the modules are initialized.
|
||||
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
|
||||
- DragonFly: the uniqueid use is buggy but kept for reference.
|
||||
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
|
||||
|
@ -321,16 +321,18 @@ mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing hea
|
|||
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
|
||||
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
|
||||
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
|
||||
#elif defined(__DragonFly__)
|
||||
#warning "mimalloc is not working correctly on DragonFly yet."
|
||||
//#define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
|
||||
// #elif defined(__DragonFly__)
|
||||
// #warning "mimalloc is not working correctly on DragonFly yet."
|
||||
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
|
||||
#elif defined(__ANDROID__)
|
||||
// See issue #381
|
||||
#define MI_TLS_PTHREAD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(MI_TLS_SLOT)
|
||||
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
#include <pthread.h>
|
||||
static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
|
||||
pthread_t self = pthread_self();
|
||||
#if defined(__DragonFly__)
|
||||
|
@ -342,7 +344,6 @@ static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
|
|||
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
|
||||
}
|
||||
#elif defined(MI_TLS_PTHREAD)
|
||||
#include <pthread.h>
|
||||
extern pthread_key_t _mi_heap_default_key;
|
||||
#endif
|
||||
|
||||
|
@ -352,11 +353,15 @@ extern pthread_key_t _mi_heap_default_key;
|
|||
// However, on the Apple M1 we do use the address of this variable as the unique thread-id (issue #356).
|
||||
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
|
||||
|
||||
|
||||
static inline mi_heap_t* mi_get_default_heap(void) {
|
||||
#if defined(MI_TLS_SLOT)
|
||||
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
|
||||
if (mi_unlikely(heap == NULL)) { heap = (mi_heap_t*)&_mi_heap_empty; } //_mi_heap_empty_get(); }
|
||||
if (mi_unlikely(heap == NULL)) {
|
||||
#ifdef __GNUC__
|
||||
__asm(""); // prevent conditional load of the address of _mi_heap_empty
|
||||
#endif
|
||||
heap = (mi_heap_t*)&_mi_heap_empty;
|
||||
}
|
||||
return heap;
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
mi_heap_t* heap = *mi_tls_pthread_heap_slot();
|
||||
|
@ -705,8 +710,10 @@ static inline size_t _mi_os_numa_node_count(void) {
|
|||
// -------------------------------------------------------------------
|
||||
// Getting the thread id should be performant as it is called in the
|
||||
// fast path of `_mi_free` and we specialize for various platforms.
|
||||
// We only require _mi_threadid() to return a unique id for each thread.
|
||||
// -------------------------------------------------------------------
|
||||
#if defined(_WIN32)
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
|
||||
|
@ -714,81 +721,93 @@ static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
|
|||
return (uintptr_t)NtCurrentTeb();
|
||||
}
|
||||
|
||||
#elif defined(__GNUC__) && \
|
||||
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
|
||||
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
|
||||
// both the OS and libc implementation so we use specific tests for each main platform.
|
||||
// If you test on another platform and it works please send a PR :-)
|
||||
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
|
||||
#elif defined(__GNUC__) && ( \
|
||||
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|
||||
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
|
||||
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|
||||
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
)
|
||||
|
||||
// TLS register on x86 is in the FS or GS register, see: https://akkadia.org/drepper/tls.pdf
|
||||
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept {
|
||||
void* res;
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
res = tcb[slot];
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0" : "=r" (tcb));
|
||||
tcb = (void**)((uintptr_t)tcb & ~0x07UL); // clear lower 3 bits
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
res = tcb[slot];
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
res = tcb[slot];
|
||||
#endif
|
||||
res = tcb[slot];
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
// setting is only used on macOSX for now
|
||||
// setting a tls slot is only used on macOS for now
|
||||
static inline void mi_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOSX uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
tcb[slot] = value;
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0" : "=r" (tcb));
|
||||
tcb = (void**)((uintptr_t)tcb & ~0x07UL); // clear lower 3 bits
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
tcb[slot] = value;
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
tcb[slot] = value;
|
||||
#endif
|
||||
tcb[slot] = value;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
|
||||
#if defined(__BIONIC__) && (defined(__arm__) || defined(__aarch64__))
|
||||
// on Android, slot 1 is the thread ID (pointer to pthread internal struct)
|
||||
return (uintptr_t)mi_tls_slot(1);
|
||||
#else
|
||||
// in all our other targets, slot 0 is the pointer to the thread control block
|
||||
return (uintptr_t)mi_tls_slot(0);
|
||||
#endif
|
||||
#if defined(__BIONIC__)
|
||||
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
|
||||
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
|
||||
return (uintptr_t)mi_tls_slot(1);
|
||||
#else
|
||||
// in all our other targets, slot 0 is the thread id
|
||||
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
|
||||
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
|
||||
return (uintptr_t)mi_tls_slot(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
// otherwise use standard C
|
||||
|
||||
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
|
||||
static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
|
||||
return (uintptr_t)&_mi_heap_default;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
|
||||
// -----------------------------------------------------------------------
|
||||
|
|
|
@ -48,6 +48,7 @@ not accidentally mix pointers from different allocators).
|
|||
#define valloc(n) mi_valloc(n)
|
||||
#define pvalloc(n) mi_pvalloc(n)
|
||||
#define reallocarray(p,s,n) mi_reallocarray(p,s,n)
|
||||
#define reallocarr(p,s,n) mi_reallocarr(p,s,n)
|
||||
#define memalign(a,n) mi_memalign(a,n)
|
||||
#define aligned_alloc(a,n) mi_aligned_alloc(a,n)
|
||||
#define posix_memalign(p,a,n) mi_posix_memalign(p,a,n)
|
||||
|
|
|
@ -175,7 +175,10 @@ typedef int32_t mi_ssize_t;
|
|||
#define MI_BIN_HUGE (73U)
|
||||
|
||||
#if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
|
||||
#error "define more bins"
|
||||
#error "mimalloc internal: define more bins"
|
||||
#endif
|
||||
#if (MI_ALIGNMENT_MAX > MI_SEGMENT_SIZE/2)
|
||||
#error "mimalloc internal: the max aligned boundary is too large for the segment size"
|
||||
#endif
|
||||
|
||||
// Used as a special value to encode block sizes in 32 bits.
|
||||
|
@ -427,9 +430,15 @@ struct mi_heap_s {
|
|||
// Debug
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if !defined(MI_DEBUG_UNINIT)
|
||||
#define MI_DEBUG_UNINIT (0xD0)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_FREED)
|
||||
#define MI_DEBUG_FREED (0xDF)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_PADDING)
|
||||
#define MI_DEBUG_PADDING (0xDE)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG)
|
||||
// use our own assertion to print without memory allocation
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -166,6 +166,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
|
|||
// Note that `alignment` always follows `size` for consistency with unaligned
|
||||
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
|
||||
// -------------------------------------------------------------------------------------
|
||||
#define MI_ALIGNMENT_MAX (1024*1024UL) // maximum supported alignment is 1MiB
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
|
@ -253,7 +254,7 @@ typedef struct mi_heap_area_s {
|
|||
void* blocks; // start of the area containing heap blocks
|
||||
size_t reserved; // bytes reserved for this area (virtual)
|
||||
size_t committed; // current available bytes for this area
|
||||
size_t used; // bytes in use by allocated blocks
|
||||
size_t used; // number of allocated blocks
|
||||
size_t block_size; // size in bytes of each block
|
||||
} mi_heap_area_t;
|
||||
|
||||
|
@ -356,6 +357,7 @@ mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size)
|
|||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue