Merge branch 'dev' into mimalloc-histogram

This commit is contained in:
Daan 2022-02-02 20:01:13 -08:00 committed by GitHub
commit e5bcfae95f
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
152 changed files with 3538 additions and 1962 deletions

View file

@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@ -42,6 +42,11 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_externc
#endif
#if !defined(_WIN32) && !defined(__wasi__)
#define MI_USE_PTHREADS
#include <pthread.h>
#endif
// "options.c"
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
@ -151,8 +156,8 @@ bool _mi_page_is_valid(mi_page_t* page);
// ------------------------------------------------------
#if defined(__GNUC__) || defined(__clang__)
#define mi_unlikely(x) __builtin_expect((x),0)
#define mi_likely(x) __builtin_expect((x),1)
#define mi_unlikely(x) __builtin_expect(!!(x),false)
#define mi_likely(x) __builtin_expect(!!(x),true)
#else
#define mi_unlikely(x) (x)
#define mi_likely(x) (x)
@ -244,11 +249,6 @@ static inline size_t _mi_wsize_from_size(size_t size) {
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
}
// Does malloc satisfy the alignment constraints already?
static inline bool mi_malloc_satisfies_alignment(size_t alignment, size_t size) {
return (alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)));
}
// Overflow detecting multiply
#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
#include <limits.h> // UINT_MAX, ULONG_MAX
@ -299,7 +299,7 @@ We try to circumvent this in an efficient way:
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
loader itself calls `malloc` even before the modules are initialized.
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
- DragonFly: the uniqueid use is buggy but kept for reference.
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
------------------------------------------------------------------------------------------- */
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
@ -316,16 +316,18 @@ mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing hea
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
#elif defined(__DragonFly__)
#warning "mimalloc is not working correctly on DragonFly yet."
//#define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
// #elif defined(__DragonFly__)
// #warning "mimalloc is not working correctly on DragonFly yet."
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
#elif defined(__ANDROID__)
// See issue #381
#define MI_TLS_PTHREAD
#endif
#endif
#if defined(MI_TLS_SLOT)
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
#include <pthread.h>
static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
pthread_t self = pthread_self();
#if defined(__DragonFly__)
@ -337,7 +339,6 @@ static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
}
#elif defined(MI_TLS_PTHREAD)
#include <pthread.h>
extern pthread_key_t _mi_heap_default_key;
#endif
@ -347,11 +348,15 @@ extern pthread_key_t _mi_heap_default_key;
// However, on the Apple M1 we do use the address of this variable as the unique thread-id (issue #356).
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
static inline mi_heap_t* mi_get_default_heap(void) {
#if defined(MI_TLS_SLOT)
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
if (mi_unlikely(heap == NULL)) { heap = (mi_heap_t*)&_mi_heap_empty; } //_mi_heap_empty_get(); }
if (mi_unlikely(heap == NULL)) {
#ifdef __GNUC__
__asm(""); // prevent conditional load of the address of _mi_heap_empty
#endif
heap = (mi_heap_t*)&_mi_heap_empty;
}
return heap;
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
mi_heap_t* heap = *mi_tls_pthread_heap_slot();
@ -699,8 +704,10 @@ static inline size_t _mi_os_numa_node_count(void) {
// -------------------------------------------------------------------
// Getting the thread id should be performant as it is called in the
// fast path of `_mi_free` and we specialize for various platforms.
// We only require _mi_threadid() to return a unique id for each thread.
// -------------------------------------------------------------------
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
@ -708,81 +715,94 @@ static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
return (uintptr_t)NtCurrentTeb();
}
#elif defined(__GNUC__) && \
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
// both the OS and libc implementation so we use specific tests for each main platform.
// If you test on another platform and it works please send a PR :-)
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
#elif defined(__GNUC__) && ( \
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
)
// TLS register on x86 is in the FS or GS register, see: https://akkadia.org/drepper/tls.pdf
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept {
void* res;
const size_t ofs = (slot*sizeof(void*));
#if defined(__i386__)
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // 32-bit always uses GS
#elif defined(__APPLE__) && defined(__x86_64__)
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
#elif defined(__x86_64__)
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; MI_UNUSED(ofs);
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
res = tcb[slot];
#elif defined(__aarch64__)
void** tcb; MI_UNUSED(ofs);
#if defined(__APPLE__) // M1, issue #343
__asm__ volatile ("mrs %0, tpidrro_el0" : "=r" (tcb));
tcb = (void**)((uintptr_t)tcb & ~0x07UL); // clear lower 3 bits
#else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#if defined(__i386__)
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
#elif defined(__APPLE__) && defined(__x86_64__)
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
#elif defined(__x86_64__)
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; MI_UNUSED(ofs);
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
res = tcb[slot];
#elif defined(__aarch64__)
void** tcb; MI_UNUSED(ofs);
#if defined(__APPLE__) // M1, issue #343
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
#else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#endif
res = tcb[slot];
#endif
res = tcb[slot];
#endif
return res;
}
// setting is only used on macOSX for now
// setting a tls slot is only used on macOS for now
static inline void mi_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
const size_t ofs = (slot*sizeof(void*));
#if defined(__i386__)
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
#elif defined(__APPLE__) && defined(__x86_64__)
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOSX uses GS
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__("movl %1,%%fs:%1" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
#elif defined(__x86_64__)
__asm__("movq %1,%%fs:%1" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; MI_UNUSED(ofs);
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
tcb[slot] = value;
#elif defined(__aarch64__)
void** tcb; MI_UNUSED(ofs);
#if defined(__APPLE__) // M1, issue #343
__asm__ volatile ("mrs %0, tpidrro_el0" : "=r" (tcb));
tcb = (void**)((uintptr_t)tcb & ~0x07UL); // clear lower 3 bits
#else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#if defined(__i386__)
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
#elif defined(__APPLE__) && defined(__x86_64__)
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
#elif defined(__x86_64__)
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; MI_UNUSED(ofs);
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
tcb[slot] = value;
#elif defined(__aarch64__)
void** tcb; MI_UNUSED(ofs);
#if defined(__APPLE__) // M1, issue #343
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
#else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#endif
tcb[slot] = value;
#endif
tcb[slot] = value;
#endif
}
static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
#if defined(__BIONIC__) && (defined(__arm__) || defined(__aarch64__))
// on Android, slot 1 is the thread ID (pointer to pthread internal struct)
return (uintptr_t)mi_tls_slot(1);
#else
// in all our other targets, slot 0 is the pointer to the thread control block
return (uintptr_t)mi_tls_slot(0);
#endif
#if defined(__BIONIC__)
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
return (uintptr_t)mi_tls_slot(1);
#else
// in all our other targets, slot 0 is the thread id
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
return (uintptr_t)mi_tls_slot(0);
#endif
}
#else
// otherwise use standard C
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
return (uintptr_t)&_mi_heap_default;
}
#endif
// -----------------------------------------------------------------------
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
// -----------------------------------------------------------------------

View file

@ -48,6 +48,7 @@ not accidentally mix pointers from different allocators).
#define valloc(n) mi_valloc(n)
#define pvalloc(n) mi_pvalloc(n)
#define reallocarray(p,s,n) mi_reallocarray(p,s,n)
#define reallocarr(p,s,n) mi_reallocarr(p,s,n)
#define memalign(a,n) mi_memalign(a,n)
#define aligned_alloc(a,n) mi_aligned_alloc(a,n)
#define posix_memalign(p,a,n) mi_posix_memalign(p,a,n)

View file

@ -158,7 +158,10 @@ typedef int32_t mi_ssize_t;
#define MI_BIN_HUGE (73U)
#if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
#error "define more bins"
#error "mimalloc internal: define more bins"
#endif
#if (MI_ALIGNMENT_MAX > MI_SEGMENT_SIZE/2)
#error "mimalloc internal: the max aligned boundary is too large for the segment size"
#endif
// Used as a special value to encode block sizes in 32 bits.
@ -313,7 +316,7 @@ typedef struct mi_segment_s {
// layout like this to optimize access in `mi_free`
size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
mi_page_kind_t page_kind; // kind of pages: small, large, or huge
mi_page_kind_t page_kind; // kind of pages: small, medium, large, or huge
mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
} mi_segment_t;
@ -390,9 +393,15 @@ struct mi_heap_s {
// Debug
// ------------------------------------------------------
#if !defined(MI_DEBUG_UNINIT)
#define MI_DEBUG_UNINIT (0xD0)
#endif
#if !defined(MI_DEBUG_FREED)
#define MI_DEBUG_FREED (0xDF)
#endif
#if !defined(MI_DEBUG_PADDING)
#define MI_DEBUG_PADDING (0xDE)
#endif
#if (MI_DEBUG)
// use our own assertion to print without memory allocation

View file

@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 173 // major + 2 digits minor
#define MI_MALLOC_VERSION 174 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@ -166,6 +166,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
// Note that `alignment` always follows `size` for consistency with unaligned
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
// -------------------------------------------------------------------------------------
#define MI_ALIGNMENT_MAX (1024*1024UL) // maximum supported alignment is 1MiB
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
@ -253,7 +254,7 @@ typedef struct mi_heap_area_s {
void* blocks; // start of the area containing heap blocks
size_t reserved; // bytes reserved for this area (virtual)
size_t committed; // current available bytes for this area
size_t used; // bytes in use by allocated blocks
size_t used; // number of allocated blocks
size_t block_size; // size in bytes of each block
} mi_heap_area_t;
@ -271,7 +272,6 @@ mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
@ -296,31 +296,31 @@ mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size
// ------------------------------------------------------
// Options, all `false` by default
// Options
// ------------------------------------------------------
typedef enum mi_option_e {
// stable options
mi_option_show_errors,
mi_option_show_stats,
mi_option_show_histogram,
mi_option_verbose,
// the following options are experimental
mi_option_eager_commit,
mi_option_eager_region_commit,
mi_option_show_errors, // print error messages
mi_option_show_stats, // print statistics on termination
mi_option_show_histogram, // print histogram
mi_option_verbose, // print verbose messages
// the following options are experimental (see src/options.h)
mi_option_eager_commit,
mi_option_eager_region_commit,
mi_option_reset_decommits,
mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages,
mi_option_reserve_huge_os_pages_at,
mi_option_reserve_os_memory,
mi_option_segment_cache,
mi_option_page_reset,
mi_option_abandoned_page_reset,
mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
mi_option_segment_cache,
mi_option_page_reset,
mi_option_abandoned_page_reset,
mi_option_segment_reset,
mi_option_eager_commit_delay,
mi_option_reset_delay,
mi_option_use_numa_nodes,
mi_option_limit_os_alloc,
mi_option_use_numa_nodes, // 0 = use available numa nodes, otherwise use at most N nodes.
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only reserved arenas)
mi_option_os_tag,
mi_option_max_errors,
mi_option_max_warnings,
@ -358,6 +358,7 @@ mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size)
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;