From f459d576bdd6bb2bc4f771acccfc25a8ff6eac07 Mon Sep 17 00:00:00 2001 From: playX Date: Tue, 9 Mar 2021 19:12:52 +0300 Subject: [PATCH] Heap local deferred free fun --- include/mimalloc-types.h | 390 ++++++++++++----------- include/mimalloc.h | 663 ++++++++++++++++++++------------------- src/heap.c | 423 +++++++++++++++---------- src/init.c | 631 ++++++++++++++++++++----------------- src/page.c | 596 ++++++++++++++++++++--------------- 5 files changed, 1489 insertions(+), 1214 deletions(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 99024679..8fa9065b 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -8,18 +8,18 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_TYPES_H #define MIMALLOC_TYPES_H -#include // ptrdiff_t -#include // uintptr_t, uint16_t, etc -#include // _Atomic +#include // ptrdiff_t +#include // uintptr_t, uint16_t, etc +#include // _Atomic #ifdef _MSC_VER -#pragma warning(disable:4214) // bitfield is not int -#endif +#pragma warning(disable : 4214) // bitfield is not int +#endif // Minimal alignment necessary. On most platforms 16 bytes are needed // due to SSE registers for example. This must be at least `MI_INTPTR_SIZE` #ifndef MI_MAX_ALIGN_SIZE -#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) +#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) #endif // ------------------------------------------------------ @@ -56,15 +56,14 @@ terms of the MIT license. A copy of the license can be found in the file // Reserve extra padding at the end of each block to be more resilient against heap block overflows. // The padding can detect byte-precise buffer overflow on free. -#if !defined(MI_PADDING) && (MI_DEBUG>=1) -#define MI_PADDING 1 +#if !defined(MI_PADDING) && (MI_DEBUG >= 1) +#define MI_PADDING 1 #endif - // Encoded free lists allow detection of corrupted free lists // and can detect buffer overflows, modify after free, and double `free`s. -#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) -#define MI_ENCODE_FREELIST 1 +#if (MI_SECURE >= 3 || MI_DEBUG >= 1 || MI_PADDING > 0) +#define MI_ENCODE_FREELIST 1 #endif // ------------------------------------------------------ @@ -84,20 +83,19 @@ terms of the MIT license. A copy of the license can be found in the file // ------------------------------------------------------ #if INTPTR_MAX == 9223372036854775807LL -# define MI_INTPTR_SHIFT (3) +#define MI_INTPTR_SHIFT (3) #elif INTPTR_MAX == 2147483647LL -# define MI_INTPTR_SHIFT (2) +#define MI_INTPTR_SHIFT (2) #else #error platform must be 32 or 64 bits #endif -#define MI_INTPTR_SIZE (1<= 655360) #error "define more bins" #endif // Used as a special value to encode block sizes in 32 bits. -#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX) +#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX) // The free lists use encoded next fields // (Only actually encodes when MI_ENCODED_FREELIST is defined.) typedef uintptr_t mi_encoded_t; // free lists contain blocks -typedef struct mi_block_s { +typedef struct mi_block_s +{ mi_encoded_t next; } mi_block_t; - // The delayed flags are used for efficient multi-threaded free-ing -typedef enum mi_delayed_e { - MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list - MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap - MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list - MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim +typedef enum mi_delayed_e +{ + MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list + MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap + MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list + MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim } mi_delayed_t; - // The `in_full` and `has_aligned` page flags are put in a union to efficiently // test if both are false (`full_aligned == 0`) in the `mi_free` routine. #if !MI_TSAN -typedef union mi_page_flags_s { +typedef union mi_page_flags_s +{ uint8_t full_aligned; - struct { + struct + { uint8_t in_full : 1; uint8_t has_aligned : 1; } x; } mi_page_flags_t; #else // under thread sanitizer, use a byte for each flag to suppress warning, issue #130 -typedef union mi_page_flags_s { +typedef union mi_page_flags_s +{ uint16_t full_aligned; - struct { + struct + { uint8_t in_full; uint8_t has_aligned; } x; @@ -201,91 +203,91 @@ typedef uintptr_t mi_thread_free_t; // We don't count `freed` (as |free|) but use `used` to reduce // the number of memory accesses in the `mi_page_all_free` function(s). // -// Notes: +// Notes: // - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`) // - Using `uint16_t` does not seem to slow things down // - The size is 8 words on 64-bit which helps the page index calculations -// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10 +// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10 // and 12 are still good for address calculation) -// - To limit the structure size, the `xblock_size` is 32-bits only; for +// - To limit the structure size, the `xblock_size` is 32-bits only; for // blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size // - `thread_free` uses the bottom bits as a delayed-free flags to optimize // concurrent frees where only the first concurrent free adds to the owning // heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`). // The invariant is that no-delayed-free is only set if there is -// at least one block that will be added, or as already been added, to +// at least one block that will be added, or as already been added, to // the owning heap `thread_delayed_free` list. This guarantees that pages // will be freed correctly even if only other threads free blocks. -typedef struct mi_page_s { +typedef struct mi_page_s +{ // "owned" by the segment - uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]` - uint8_t segment_in_use:1; // `true` if the segment allocated this page - uint8_t is_reset:1; // `true` if the page memory was reset - uint8_t is_committed:1; // `true` if the page virtual memory is committed - uint8_t is_zero_init:1; // `true` if the page was zero initialized + uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]` + uint8_t segment_in_use : 1; // `true` if the segment allocated this page + uint8_t is_reset : 1; // `true` if the page memory was reset + uint8_t is_committed : 1; // `true` if the page virtual memory is committed + uint8_t is_zero_init : 1; // `true` if the page was zero initialized // layout like this to optimize access in `mi_malloc` and `mi_free` - uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` - uint16_t reserved; // number of blocks reserved in memory - mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits) - uint8_t is_zero:1; // `true` if the blocks in the free list are zero initialized - uint8_t retire_expire:7; // expiration count for retired blocks + uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` + uint16_t reserved; // number of blocks reserved in memory + mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits) + uint8_t is_zero : 1; // `true` if the blocks in the free list are zero initialized + uint8_t retire_expire : 7; // expiration count for retired blocks - mi_block_t* free; // list of available free blocks (`malloc` allocates from this list) - #ifdef MI_ENCODE_FREELIST - uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) - #endif - uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`) - uint32_t xblock_size; // size available in each block (always `>0`) + mi_block_t *free; // list of available free blocks (`malloc` allocates from this list) +#ifdef MI_ENCODE_FREELIST + uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) +#endif + uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`) + uint32_t xblock_size; // size available in each block (always `>0`) - mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) - _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads - _Atomic(uintptr_t) xheap; - - struct mi_page_s* next; // next page owned by this thread with the same `block_size` - struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` + mi_block_t *local_free; // list of deferred free blocks by this thread (migrates to `free`) + _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads + _Atomic(uintptr_t) xheap; + + struct mi_page_s *next; // next page owned by this thread with the same `block_size` + struct mi_page_s *prev; // previous page owned by this thread with the same `block_size` } mi_page_t; - - -typedef enum mi_page_kind_e { - MI_PAGE_SMALL, // small blocks go into 64kb pages inside a segment - MI_PAGE_MEDIUM, // medium blocks go into 512kb pages inside a segment - MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment - MI_PAGE_HUGE // huge blocks (>512kb) are put into a single page in a segment of the exact size (but still 2mb aligned) +typedef enum mi_page_kind_e +{ + MI_PAGE_SMALL, // small blocks go into 64kb pages inside a segment + MI_PAGE_MEDIUM, // medium blocks go into 512kb pages inside a segment + MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment + MI_PAGE_HUGE // huge blocks (>512kb) are put into a single page in a segment of the exact size (but still 2mb aligned) } mi_page_kind_t; // Segments are large allocated memory blocks (2mb on 64 bit) from // the OS. Inside segments we allocated fixed size _pages_ that // contain blocks. -typedef struct mi_segment_s { +typedef struct mi_segment_s +{ // memory fields - size_t memid; // id for the os-level memory manager - bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages) - bool mem_is_committed; // `true` if the whole segment is eagerly committed + size_t memid; // id for the os-level memory manager + bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages) + bool mem_is_committed; // `true` if the whole segment is eagerly committed // segment fields - _Atomic(struct mi_segment_s*) abandoned_next; - struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init` - struct mi_segment_s* prev; + _Atomic(struct mi_segment_s *) abandoned_next; + struct mi_segment_s *next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init` + struct mi_segment_s *prev; - size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) - size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long) + size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) + size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long) - size_t used; // count of pages in use (`used <= capacity`) - size_t capacity; // count of available pages (`#free + used`) - size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE` - size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages. - uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie` + size_t used; // count of pages in use (`used <= capacity`) + size_t capacity; // count of available pages (`#free + used`) + size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE` + size_t segment_info_size; // space we are using from the first page for segment meta-data and possible guard pages. + uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie` // layout like this to optimize access in `mi_free` - size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`). - _Atomic(uintptr_t) thread_id; // unique id of the thread owning this segment - mi_page_kind_t page_kind; // kind of pages: small, large, or huge - mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages + size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`). + _Atomic(uintptr_t) thread_id; // unique id of the thread owning this segment + mi_page_kind_t page_kind; // kind of pages: small, large, or huge + mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages } mi_segment_t; - // ------------------------------------------------------ // Heaps // Provide first-class heaps to allocate from. @@ -303,81 +305,83 @@ typedef struct mi_segment_s { typedef struct mi_tld_s mi_tld_t; // Pages of a certain block size are held in a queue. -typedef struct mi_page_queue_s { - mi_page_t* first; - mi_page_t* last; - size_t block_size; +typedef struct mi_page_queue_s +{ + mi_page_t *first; + mi_page_t *last; + size_t block_size; } mi_page_queue_t; -#define MI_BIN_FULL (MI_BIN_HUGE+1) +#define MI_BIN_FULL (MI_BIN_HUGE + 1) // Random context -typedef struct mi_random_cxt_s { +typedef struct mi_random_cxt_s +{ uint32_t input[16]; uint32_t output[16]; - int output_available; + int output_available; } mi_random_ctx_t; - // In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows #if (MI_PADDING) -typedef struct mi_padding_s { +typedef struct mi_padding_s +{ uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes) } mi_padding_t; -#define MI_PADDING_SIZE (sizeof(mi_padding_t)) -#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) +#define MI_PADDING_SIZE (sizeof(mi_padding_t)) +#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) #else -#define MI_PADDING_SIZE 0 -#define MI_PADDING_WSIZE 0 +#define MI_PADDING_SIZE 0 +#define MI_PADDING_WSIZE 0 #endif -#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) - +#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) // A heap owns a set of pages. -struct mi_heap_s { - mi_tld_t* tld; - mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. - mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") - _Atomic(mi_block_t*) thread_delayed_free; - uintptr_t thread_id; // thread this heap belongs too - uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) - uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list - mi_random_ctx_t random; // random number context used for secure allocation - size_t page_count; // total number of pages in the `pages` queues. - size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) - size_t page_retired_max; // largest retired index into the `pages` array. - mi_heap_t* next; // list of heaps per thread - bool no_reclaim; // `true` if this heap should not reclaim abandoned pages +struct mi_heap_s +{ + mi_tld_t *tld; + mi_page_t *pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. + mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") + _Atomic(mi_block_t *) thread_delayed_free; + uintptr_t thread_id; // thread this heap belongs too + uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) + uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list + mi_random_ctx_t random; // random number context used for secure allocation + size_t page_count; // total number of pages in the `pages` queues. + size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) + size_t page_retired_max; // largest retired index into the `pages` array. + mi_heap_t *next; // list of heaps per thread + bool no_reclaim; // `true` if this heap should not reclaim abandoned pages + void *deferred_free; + void *deferred_arg; }; - - // ------------------------------------------------------ // Debug // ------------------------------------------------------ -#define MI_DEBUG_UNINIT (0xD0) -#define MI_DEBUG_FREED (0xDF) -#define MI_DEBUG_PADDING (0xDE) +#define MI_DEBUG_UNINIT (0xD0) +#define MI_DEBUG_FREED (0xDF) +#define MI_DEBUG_PADDING (0xDE) #if (MI_DEBUG) // use our own assertion to print without memory allocation -void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); -#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +void _mi_assert_fail(const char *assertion, const char *fname, unsigned int line, const char *func); +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr, __FILE__, __LINE__, __func__)) #else #define mi_assert(x) #endif -#if (MI_DEBUG>1) -#define mi_assert_internal mi_assert +#if (MI_DEBUG > 1) +#define mi_assert_internal mi_assert #else #define mi_assert_internal(x) #endif -#if (MI_DEBUG>2) -#define mi_assert_expensive mi_assert +#if (MI_DEBUG > 2) +#define mi_assert_expensive mi_assert #else #define mi_assert_expensive(x) #endif @@ -387,26 +391,29 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line // ------------------------------------------------------ #ifndef MI_STAT -#if (MI_DEBUG>0) +#if (MI_DEBUG > 0) #define MI_STAT 2 #else #define MI_STAT 0 #endif #endif -typedef struct mi_stat_count_s { +typedef struct mi_stat_count_s +{ int64_t allocated; int64_t freed; int64_t peak; int64_t current; } mi_stat_count_t; -typedef struct mi_stat_counter_s { +typedef struct mi_stat_counter_s +{ int64_t total; int64_t count; } mi_stat_counter_t; -typedef struct mi_stats_s { +typedef struct mi_stats_s +{ mi_stat_count_t segments; mi_stat_count_t pages; mi_stat_count_t reserved; @@ -429,73 +436,76 @@ typedef struct mi_stats_s { mi_stat_counter_t normal_count; mi_stat_counter_t huge_count; mi_stat_counter_t giant_count; -#if MI_STAT>1 - mi_stat_count_t normal_bins[MI_BIN_HUGE+1]; +#if MI_STAT > 1 + mi_stat_count_t normal_bins[MI_BIN_HUGE + 1]; #endif } mi_stats_t; - -void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); -void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); -void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); +void _mi_stat_increase(mi_stat_count_t *stat, size_t amount); +void _mi_stat_decrease(mi_stat_count_t *stat, size_t amount); +void _mi_stat_counter_increase(mi_stat_counter_t *stat, size_t amount); #if (MI_STAT) -#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) -#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) -#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) +#define mi_stat_increase(stat, amount) _mi_stat_increase(&(stat), amount) +#define mi_stat_decrease(stat, amount) _mi_stat_decrease(&(stat), amount) +#define mi_stat_counter_increase(stat, amount) _mi_stat_counter_increase(&(stat), amount) #else -#define mi_stat_increase(stat,amount) (void)0 -#define mi_stat_decrease(stat,amount) (void)0 -#define mi_stat_counter_increase(stat,amount) (void)0 +#define mi_stat_increase(stat, amount) (void)0 +#define mi_stat_decrease(stat, amount) (void)0 +#define mi_stat_counter_increase(stat, amount) (void)0 #endif -#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) -#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount) -#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_counter_increase(heap, stat, amount) mi_stat_counter_increase((heap)->tld->stats.stat, amount) +#define mi_heap_stat_increase(heap, stat, amount) mi_stat_increase((heap)->tld->stats.stat, amount) +#define mi_heap_stat_decrease(heap, stat, amount) mi_stat_decrease((heap)->tld->stats.stat, amount) // ------------------------------------------------------ // Thread Local data // ------------------------------------------------------ -typedef int64_t mi_msecs_t; +typedef int64_t mi_msecs_t; // Queue of segments -typedef struct mi_segment_queue_s { - mi_segment_t* first; - mi_segment_t* last; +typedef struct mi_segment_queue_s +{ + mi_segment_t *first; + mi_segment_t *last; } mi_segment_queue_t; // OS thread local data -typedef struct mi_os_tld_s { - size_t region_idx; // start point for next allocation - mi_stats_t* stats; // points to tld stats +typedef struct mi_os_tld_s +{ + size_t region_idx; // start point for next allocation + mi_stats_t *stats; // points to tld stats } mi_os_tld_t; // Segments thread local data -typedef struct mi_segments_tld_s { - mi_segment_queue_t small_free; // queue of segments with free small pages - mi_segment_queue_t medium_free; // queue of segments with free medium pages - mi_page_queue_t pages_reset; // queue of freed pages that can be reset - size_t count; // current number of segments; - size_t peak_count; // peak number of segments - size_t current_size; // current size of all segments - size_t peak_size; // peak size of all segments - size_t cache_count; // number of segments in the cache - size_t cache_size; // total size of all segments in the cache - mi_segment_t* cache; // (small) cache of segments - mi_stats_t* stats; // points to tld stats - mi_os_tld_t* os; // points to os stats +typedef struct mi_segments_tld_s +{ + mi_segment_queue_t small_free; // queue of segments with free small pages + mi_segment_queue_t medium_free; // queue of segments with free medium pages + mi_page_queue_t pages_reset; // queue of freed pages that can be reset + size_t count; // current number of segments; + size_t peak_count; // peak number of segments + size_t current_size; // current size of all segments + size_t peak_size; // peak size of all segments + size_t cache_count; // number of segments in the cache + size_t cache_size; // total size of all segments in the cache + mi_segment_t *cache; // (small) cache of segments + mi_stats_t *stats; // points to tld stats + mi_os_tld_t *os; // points to os stats } mi_segments_tld_t; // Thread local data -struct mi_tld_s { - unsigned long long heartbeat; // monotonic heartbeat count - bool recurse; // true if deferred was called; used to prevent infinite recursion. - mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) - mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) - mi_segments_tld_t segments; // segment tld - mi_os_tld_t os; // os tld - mi_stats_t stats; // statistics +struct mi_tld_s +{ + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + mi_heap_t *heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t *heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + mi_segments_tld_t segments; // segment tld + mi_os_tld_t os; // os tld + mi_stats_t stats; // statistics }; #endif diff --git a/include/mimalloc.h b/include/mimalloc.h index a2835d02..43fd6e7b 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -8,370 +8,366 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 170 // major + 2 digits minor +#define MI_MALLOC_VERSION 170 // major + 2 digits minor // ------------------------------------------------------ // Compiler specific attributes // ------------------------------------------------------ #ifdef __cplusplus - #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 - #define mi_attr_noexcept noexcept - #else - #define mi_attr_noexcept throw() - #endif +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#define mi_attr_noexcept noexcept #else - #define mi_attr_noexcept +#define mi_attr_noexcept throw() +#endif +#else +#define mi_attr_noexcept #endif #if defined(__cplusplus) && (__cplusplus >= 201703) - #define mi_decl_nodiscard [[nodiscard]] -#elif (__GNUC__ >= 4) || defined(__clang__) // includes clang, icc, and clang-cl - #define mi_decl_nodiscard __attribute__((warn_unused_result)) +#define mi_decl_nodiscard [[nodiscard]] +#elif (__GNUC__ >= 4) || defined(__clang__) // includes clang, icc, and clang-cl +#define mi_decl_nodiscard __attribute__((warn_unused_result)) #elif (_MSC_VER >= 1700) - #define mi_decl_nodiscard _Check_return_ +#define mi_decl_nodiscard _Check_return_ #else - #define mi_decl_nodiscard +#define mi_decl_nodiscard #endif #if defined(_MSC_VER) || defined(__MINGW32__) - #if !defined(MI_SHARED_LIB) - #define mi_decl_export - #elif defined(MI_SHARED_LIB_EXPORT) - #define mi_decl_export __declspec(dllexport) - #else - #define mi_decl_export __declspec(dllimport) - #endif - #if defined(__MINGW32__) - #define mi_decl_restrict - #define mi_attr_malloc __attribute__((malloc)) - #else - #if (_MSC_VER >= 1900) && !defined(__EDG__) - #define mi_decl_restrict __declspec(allocator) __declspec(restrict) - #else - #define mi_decl_restrict __declspec(restrict) - #endif - #define mi_attr_malloc - #endif - #define mi_cdecl __cdecl - #define mi_attr_alloc_size(s) - #define mi_attr_alloc_size2(s1,s2) - #define mi_attr_alloc_align(p) -#elif defined(__GNUC__) // includes clang and icc - #define mi_cdecl // leads to warnings... __attribute__((cdecl)) - #define mi_decl_export __attribute__((visibility("default"))) - #define mi_decl_restrict - #define mi_attr_malloc __attribute__((malloc)) - #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) - #define mi_attr_alloc_size(s) - #define mi_attr_alloc_size2(s1,s2) - #define mi_attr_alloc_align(p) - #elif defined(__INTEL_COMPILER) - #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) - #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) - #define mi_attr_alloc_align(p) - #else - #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) - #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) - #define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) - #endif +#if !defined(MI_SHARED_LIB) +#define mi_decl_export +#elif defined(MI_SHARED_LIB_EXPORT) +#define mi_decl_export __declspec(dllexport) #else - #define mi_cdecl - #define mi_decl_export - #define mi_decl_restrict - #define mi_attr_malloc - #define mi_attr_alloc_size(s) - #define mi_attr_alloc_size2(s1,s2) - #define mi_attr_alloc_align(p) +#define mi_decl_export __declspec(dllimport) +#endif +#if defined(__MINGW32__) +#define mi_decl_restrict +#define mi_attr_malloc __attribute__((malloc)) +#else +#if (_MSC_VER >= 1900) && !defined(__EDG__) +#define mi_decl_restrict __declspec(allocator) __declspec(restrict) +#else +#define mi_decl_restrict __declspec(restrict) +#endif +#define mi_attr_malloc +#endif +#define mi_cdecl __cdecl +#define mi_attr_alloc_size(s) +#define mi_attr_alloc_size2(s1, s2) +#define mi_attr_alloc_align(p) +#elif defined(__GNUC__) // includes clang and icc +#define mi_cdecl // leads to warnings... __attribute__((cdecl)) +#define mi_decl_export __attribute__((visibility("default"))) +#define mi_decl_restrict +#define mi_attr_malloc __attribute__((malloc)) +#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) +#define mi_attr_alloc_size(s) +#define mi_attr_alloc_size2(s1, s2) +#define mi_attr_alloc_align(p) +#elif defined(__INTEL_COMPILER) +#define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) +#define mi_attr_alloc_size2(s1, s2) __attribute__((alloc_size(s1, s2))) +#define mi_attr_alloc_align(p) +#else +#define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) +#define mi_attr_alloc_size2(s1, s2) __attribute__((alloc_size(s1, s2))) +#define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) +#endif +#else +#define mi_cdecl +#define mi_decl_export +#define mi_decl_restrict +#define mi_attr_malloc +#define mi_attr_alloc_size(s) +#define mi_attr_alloc_size2(s1, s2) +#define mi_attr_alloc_align(p) #endif // ------------------------------------------------------ // Includes // ------------------------------------------------------ -#include // size_t -#include // bool +#include // size_t +#include // bool #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif -// ------------------------------------------------------ -// Standard malloc interface -// ------------------------------------------------------ + // ------------------------------------------------------ + // Standard malloc interface + // ------------------------------------------------------ -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); -mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); -mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1, 2); + mi_decl_nodiscard mi_decl_export void *mi_realloc(void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + mi_decl_export void *mi_expand(void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); -mi_decl_export void mi_free(void* p) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc; -mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; -mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + mi_decl_export void mi_free(void *p) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export mi_decl_restrict char *mi_strdup(const char *s) mi_attr_noexcept mi_attr_malloc; + mi_decl_nodiscard mi_decl_export mi_decl_restrict char *mi_strndup(const char *s, size_t n) mi_attr_noexcept mi_attr_malloc; + mi_decl_nodiscard mi_decl_export mi_decl_restrict char *mi_realpath(const char *fname, char *resolved_name) mi_attr_noexcept mi_attr_malloc; // ------------------------------------------------------ // Extended functionality // ------------------------------------------------------ -#define MI_SMALL_WSIZE_MAX (128) -#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*)) +#define MI_SMALL_WSIZE_MAX (128) +#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX * sizeof(void *)) -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); -mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); -mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1, 2); + mi_decl_nodiscard mi_decl_export void *mi_reallocn(void *p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2, 3); + mi_decl_nodiscard mi_decl_export void *mi_reallocf(void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void *p) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + // ------------------------------------------------------ + // Internals + // ------------------------------------------------------ -// ------------------------------------------------------ -// Internals -// ------------------------------------------------------ + typedef void(mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void *arg); + mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg) mi_attr_noexcept; -typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); -mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; + typedef void(mi_cdecl mi_output_fun)(const char *msg, void *arg); + mi_decl_export void mi_register_output(mi_output_fun *out, void *arg) mi_attr_noexcept; -typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); -mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; + typedef void(mi_cdecl mi_error_fun)(int err, void *arg); + mi_decl_export void mi_register_error(mi_error_fun *fun, void *arg); -typedef void (mi_cdecl mi_error_fun)(int err, void* arg); -mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); + mi_decl_export void mi_collect(bool force) mi_attr_noexcept; + mi_decl_export int mi_version(void) mi_attr_noexcept; + mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; + mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; + mi_decl_export void mi_stats_print(void *out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL + mi_decl_export void mi_stats_print_out(mi_output_fun *out, void *arg) mi_attr_noexcept; -mi_decl_export void mi_collect(bool force) mi_attr_noexcept; -mi_decl_export int mi_version(void) mi_attr_noexcept; -mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; -mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; -mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL -mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + mi_decl_export void mi_process_init(void) mi_attr_noexcept; + mi_decl_export void mi_thread_init(void) mi_attr_noexcept; + mi_decl_export void mi_thread_done(void) mi_attr_noexcept; + mi_decl_export void mi_thread_stats_print_out(mi_output_fun *out, void *arg) mi_attr_noexcept; -mi_decl_export void mi_process_init(void) mi_attr_noexcept; -mi_decl_export void mi_thread_init(void) mi_attr_noexcept; -mi_decl_export void mi_thread_done(void) mi_attr_noexcept; -mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + mi_decl_export void mi_process_info(size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, + size_t *current_rss, size_t *peak_rss, + size_t *current_commit, size_t *peak_commit, size_t *page_faults) mi_attr_noexcept; -mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, - size_t* current_rss, size_t* peak_rss, - size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + // ------------------------------------------------------------------------------------- + // Aligned allocation + // Note that `alignment` always follows `size` for consistency with unaligned + // allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. + // ------------------------------------------------------------------------------------- -// ------------------------------------------------------------------------------------- -// Aligned allocation -// Note that `alignment` always follows `size` for consistency with unaligned -// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. -// ------------------------------------------------------------------------------------- + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1, 2) mi_attr_alloc_align(3); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1, 2); + mi_decl_nodiscard mi_decl_export void *mi_realloc_aligned(void *p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); + mi_decl_nodiscard mi_decl_export void *mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); -mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); -mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); + // ------------------------------------------------------------------------------------- + // Heaps: first-class, but can only allocate from the same thread that created it. + // ------------------------------------------------------------------------------------- + struct mi_heap_s; + typedef struct mi_heap_s mi_heap_t; -// ------------------------------------------------------------------------------------- -// Heaps: first-class, but can only allocate from the same thread that created it. -// ------------------------------------------------------------------------------------- + mi_decl_nodiscard mi_decl_export mi_heap_t *mi_heap_new(void); + mi_decl_export void mi_heap_delete(mi_heap_t *heap); + mi_decl_export void mi_heap_destroy(mi_heap_t *heap); + mi_decl_export mi_heap_t *mi_heap_set_default(mi_heap_t *heap); + mi_decl_export mi_heap_t *mi_heap_get_default(void); + mi_decl_export mi_heap_t *mi_heap_get_backing(void); + mi_decl_export void mi_heap_collect(mi_heap_t *heap, bool force) mi_attr_noexcept; + typedef void(mi_local_deferred_free_fun)(mi_heap_t *heap, bool force, unsigned long long heartbeat, void *arg); + mi_decl_export void mi_heap_register_local_deferred_free(mi_heap_t *heap, mi_local_deferred_free_fun *deferred_free, void *arg) mi_attr_noexcept; -struct mi_heap_s; -typedef struct mi_heap_s mi_heap_t; + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_malloc(mi_heap_t *heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_zalloc(mi_heap_t *heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_malloc_small(mi_heap_t *heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void); -mi_decl_export void mi_heap_delete(mi_heap_t* heap); -mi_decl_export void mi_heap_destroy(mi_heap_t* heap); -mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap); -mi_decl_export mi_heap_t* mi_heap_get_default(void); -mi_decl_export mi_heap_t* mi_heap_get_backing(void); -mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export void *mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + mi_decl_nodiscard mi_decl_export void *mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3, 4); + mi_decl_nodiscard mi_decl_export void *mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict char *mi_heap_strdup(mi_heap_t *heap, const char *s) mi_attr_noexcept mi_attr_malloc; + mi_decl_nodiscard mi_decl_export mi_decl_restrict char *mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n) mi_attr_noexcept mi_attr_malloc; + mi_decl_nodiscard mi_decl_export mi_decl_restrict char *mi_heap_realpath(mi_heap_t *heap, const char *fname, char *resolved_name) mi_attr_noexcept mi_attr_malloc; -mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); -mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); -mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_malloc_aligned(mi_heap_t *heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_zalloc_aligned(mi_heap_t *heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_zalloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_heap_calloc_aligned_at(mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); + mi_decl_nodiscard mi_decl_export void *mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); + mi_decl_nodiscard mi_decl_export void *mi_heap_realloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc; -mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; -mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + // -------------------------------------------------------------------------------- + // Zero initialized re-allocation. + // Only valid on memory that was originally allocated with zero initialization too. + // e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. + // see + // -------------------------------------------------------------------------------- -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); -mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); -mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); + mi_decl_nodiscard mi_decl_export void *mi_rezalloc(void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export void *mi_recalloc(void *p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2, 3); + mi_decl_nodiscard mi_decl_export void *mi_rezalloc_aligned(void *p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); + mi_decl_nodiscard mi_decl_export void *mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export void *mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); + mi_decl_nodiscard mi_decl_export void *mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2, 3); -// -------------------------------------------------------------------------------- -// Zero initialized re-allocation. -// Only valid on memory that was originally allocated with zero initialization too. -// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. -// see -// -------------------------------------------------------------------------------- + mi_decl_nodiscard mi_decl_export void *mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + mi_decl_nodiscard mi_decl_export void *mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3, 4); -mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); + mi_decl_nodiscard mi_decl_export void *mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); + mi_decl_nodiscard mi_decl_export void *mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); + mi_decl_nodiscard mi_decl_export void *mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3, 4) mi_attr_alloc_align(5); + mi_decl_nodiscard mi_decl_export void *mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3, 4); -mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); -mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4); -mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3); + // ------------------------------------------------------ + // Analysis + // ------------------------------------------------------ -mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); -mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); + mi_decl_export bool mi_heap_contains_block(mi_heap_t *heap, const void *p); + mi_decl_export bool mi_heap_check_owned(mi_heap_t *heap, const void *p); + mi_decl_export bool mi_check_owned(const void *p); -mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); -mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); -mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5); -mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4); + // An area of heap space contains blocks of a single size. + typedef struct mi_heap_area_s + { + void *blocks; // start of the area containing heap blocks + size_t reserved; // bytes reserved for this area (virtual) + size_t committed; // current available bytes for this area + size_t used; // bytes in use by allocated blocks + size_t block_size; // size in bytes of each block + } mi_heap_area_t; + typedef bool(mi_cdecl mi_block_visit_fun)(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg); -// ------------------------------------------------------ -// Analysis -// ------------------------------------------------------ + mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg); -mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p); -mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p); -mi_decl_export bool mi_check_owned(const void* p); + // Experimental + mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void *p) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; -// An area of heap space contains blocks of a single size. -typedef struct mi_heap_area_s { - void* blocks; // start of the area containing heap blocks - size_t reserved; // bytes reserved for this area (virtual) - size_t committed; // current available bytes for this area - size_t used; // bytes in use by allocated blocks - size_t block_size; // size in bytes of each block -} mi_heap_area_t; + mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; + mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; -typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; + mi_decl_export bool mi_manage_os_memory(void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; -mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); + // deprecated + mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t *pages_reserved) mi_attr_noexcept; -// Experimental -mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; + // ------------------------------------------------------ + // Convenience + // ------------------------------------------------------ -mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; -mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; +#define mi_malloc_tp(tp) ((tp *)mi_malloc(sizeof(tp))) +#define mi_zalloc_tp(tp) ((tp *)mi_zalloc(sizeof(tp))) +#define mi_calloc_tp(tp, n) ((tp *)mi_calloc(n, sizeof(tp))) +#define mi_mallocn_tp(tp, n) ((tp *)mi_mallocn(n, sizeof(tp))) +#define mi_reallocn_tp(p, tp, n) ((tp *)mi_reallocn(p, n, sizeof(tp))) +#define mi_recalloc_tp(p, tp, n) ((tp *)mi_recalloc(p, n, sizeof(tp))) -mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; -mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; +#define mi_heap_malloc_tp(hp, tp) ((tp *)mi_heap_malloc(hp, sizeof(tp))) +#define mi_heap_zalloc_tp(hp, tp) ((tp *)mi_heap_zalloc(hp, sizeof(tp))) +#define mi_heap_calloc_tp(hp, tp, n) ((tp *)mi_heap_calloc(hp, n, sizeof(tp))) +#define mi_heap_mallocn_tp(hp, tp, n) ((tp *)mi_heap_mallocn(hp, n, sizeof(tp))) +#define mi_heap_reallocn_tp(hp, p, tp, n) ((tp *)mi_heap_reallocn(hp, p, n, sizeof(tp))) +#define mi_heap_recalloc_tp(hp, p, tp, n) ((tp *)mi_heap_recalloc(hp, p, n, sizeof(tp))) + // ------------------------------------------------------ + // Options, all `false` by default + // ------------------------------------------------------ -// deprecated -mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; + typedef enum mi_option_e + { + // stable options + mi_option_show_errors, + mi_option_show_stats, + mi_option_verbose, + // the following options are experimental + mi_option_eager_commit, + mi_option_eager_region_commit, + mi_option_reset_decommits, + mi_option_large_os_pages, // implies eager commit + mi_option_reserve_huge_os_pages, + mi_option_reserve_os_memory, + mi_option_segment_cache, + mi_option_page_reset, + mi_option_abandoned_page_reset, + mi_option_segment_reset, + mi_option_eager_commit_delay, + mi_option_reset_delay, + mi_option_use_numa_nodes, + mi_option_limit_os_alloc, + mi_option_os_tag, + mi_option_max_errors, + mi_option_max_warnings, + _mi_option_last + } mi_option_t; + mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option); + mi_decl_export void mi_option_enable(mi_option_t option); + mi_decl_export void mi_option_disable(mi_option_t option); + mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); + mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); -// ------------------------------------------------------ -// Convenience -// ------------------------------------------------------ + mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); + mi_decl_export void mi_option_set(mi_option_t option, long value); + mi_decl_export void mi_option_set_default(mi_option_t option, long value); -#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) -#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) -#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp))) -#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp))) -#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp))) -#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp))) + // ------------------------------------------------------------------------------------------------------- + // "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions. + // (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.) + // note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing. + // ------------------------------------------------------------------------------------------------------- -#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) -#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) -#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp))) -#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp))) -#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp))) -#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp))) + mi_decl_export void mi_cfree(void *p) mi_attr_noexcept; + mi_decl_export void *mi__expand(void *p, size_t newsize) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void *p) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept; + mi_decl_export int mi_posix_memalign(void **p, size_t alignment, size_t size) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); -// ------------------------------------------------------ -// Options, all `false` by default -// ------------------------------------------------------ + mi_decl_nodiscard mi_decl_export void *mi_reallocarray(void *p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2, 3); + mi_decl_nodiscard mi_decl_export void *mi_aligned_recalloc(void *p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; + mi_decl_nodiscard mi_decl_export void *mi_aligned_offset_recalloc(void *p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept; -typedef enum mi_option_e { - // stable options - mi_option_show_errors, - mi_option_show_stats, - mi_option_verbose, - // the following options are experimental - mi_option_eager_commit, - mi_option_eager_region_commit, - mi_option_reset_decommits, - mi_option_large_os_pages, // implies eager commit - mi_option_reserve_huge_os_pages, - mi_option_reserve_os_memory, - mi_option_segment_cache, - mi_option_page_reset, - mi_option_abandoned_page_reset, - mi_option_segment_reset, - mi_option_eager_commit_delay, - mi_option_reset_delay, - mi_option_use_numa_nodes, - mi_option_limit_os_alloc, - mi_option_os_tag, - mi_option_max_errors, - mi_option_max_warnings, - _mi_option_last -} mi_option_t; + mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short *mi_wcsdup(const unsigned short *s) mi_attr_noexcept mi_attr_malloc; + mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char *mi_mbsdup(const unsigned char *s) mi_attr_noexcept mi_attr_malloc; + mi_decl_export int mi_dupenv_s(char **buf, size_t *size, const char *name) mi_attr_noexcept; + mi_decl_export int mi_wdupenv_s(unsigned short **buf, size_t *size, const unsigned short *name) mi_attr_noexcept; + mi_decl_export void mi_free_size(void *p, size_t size) mi_attr_noexcept; + mi_decl_export void mi_free_size_aligned(void *p, size_t size, size_t alignment) mi_attr_noexcept; + mi_decl_export void mi_free_aligned(void *p, size_t alignment) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option); -mi_decl_export void mi_option_enable(mi_option_t option); -mi_decl_export void mi_option_disable(mi_option_t option); -mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); -mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); - -mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); -mi_decl_export void mi_option_set(mi_option_t option, long value); -mi_decl_export void mi_option_set_default(mi_option_t option, long value); - - -// ------------------------------------------------------------------------------------------------------- -// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions. -// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.) -// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing. -// ------------------------------------------------------------------------------------------------------- - -mi_decl_export void mi_cfree(void* p) mi_attr_noexcept; -mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept; - -mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); - -mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); -mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; -mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept; - -mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc; -mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc; -mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept; -mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept; - -mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; -mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; -mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; - -// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. -// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); -mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); -mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2); -mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); + // The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. + // (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); + mi_decl_nodiscard mi_decl_export mi_decl_restrict void *mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); + mi_decl_nodiscard mi_decl_export void *mi_new_realloc(void *p, size_t newsize) mi_attr_alloc_size(2); + mi_decl_nodiscard mi_decl_export void *mi_new_reallocn(void *p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); #ifdef __cplusplus } @@ -383,54 +379,77 @@ mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, // --------------------------------------------------------------------------------------------- #ifdef __cplusplus -#include // PTRDIFF_MAX -#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 -#include // std::true_type -#include // std::forward +#include // PTRDIFF_MAX +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#include // std::true_type +#include // std::forward #endif -template struct mi_stl_allocator { - typedef T value_type; - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; - typedef value_type& reference; - typedef value_type const& const_reference; - typedef value_type* pointer; - typedef value_type const* const_pointer; - template struct rebind { typedef mi_stl_allocator other; }; +template +struct mi_stl_allocator +{ + typedef T value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type &reference; + typedef value_type const &const_reference; + typedef value_type *pointer; + typedef value_type const *const_pointer; + template + struct rebind + { + typedef mi_stl_allocator other; + }; - mi_stl_allocator() mi_attr_noexcept = default; - mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default; - template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } - mi_stl_allocator select_on_container_copy_construction() const { return *this; } - void deallocate(T* p, size_type) { mi_free(p); } + mi_stl_allocator() mi_attr_noexcept = default; + mi_stl_allocator(const mi_stl_allocator &) mi_attr_noexcept = default; + template + mi_stl_allocator(const mi_stl_allocator &) mi_attr_noexcept {} + mi_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T *p, size_type) { mi_free(p); } - #if (__cplusplus >= 201703L) // C++17 - mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); } - mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } - #else - mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); } - #endif +#if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T *allocate(size_type count) + { + return static_cast(mi_new_n(count, sizeof(T))); + } + mi_decl_nodiscard T *allocate(size_type count, const void *) { return allocate(count); } +#else + mi_decl_nodiscard pointer allocate(size_type count, const void * = 0) + { + return static_cast(mi_new_n(count, sizeof(value_type))); + } +#endif - #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 +#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; - using propagate_on_container_swap = std::true_type; - using is_always_equal = std::true_type; - template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } - template void destroy(U* p) mi_attr_noexcept { p->~U(); } - #else - void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::true_type; + template + void construct(U *p, Args &&...args) { ::new (p) U(std::forward(args)...); } + template + void destroy(U *p) mi_attr_noexcept { p->~U(); } +#else + void construct(pointer p, value_type const &val) + { + ::new (p) value_type(val); + } void destroy(pointer p) { p->~value_type(); } - #endif +#endif - size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } - pointer address(reference x) const { return &x; } - const_pointer address(const_reference x) const { return &x; } + size_type max_size() const mi_attr_noexcept + { + return (PTRDIFF_MAX / sizeof(value_type)); + } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } }; -template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } -template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } +template +bool operator==(const mi_stl_allocator &, const mi_stl_allocator &) mi_attr_noexcept { return true; } +template +bool operator!=(const mi_stl_allocator &, const mi_stl_allocator &) mi_attr_noexcept { return false; } #endif // __cplusplus #endif diff --git a/src/heap.c b/src/heap.c index a9799dde..9439f7d3 100644 --- a/src/heap.c +++ b/src/heap.c @@ -9,10 +9,10 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc-internal.h" #include "mimalloc-atomic.h" -#include // memset, memcpy +#include // memset, memcpy #if defined(_MSC_VER) && (_MSC_VER < 1920) -#pragma warning(disable:4204) // non-constant aggregate initializer +#pragma warning(disable : 4204) // non-constant aggregate initializer #endif /* ----------------------------------------------------------- @@ -20,26 +20,30 @@ terms of the MIT license. A copy of the license can be found in the file ----------------------------------------------------------- */ // return `true` if ok, `false` to break -typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2); +typedef bool(heap_page_visitor_fun)(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2); // Visit all pages in a heap; returns `false` if break was called. -static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2) +static bool mi_heap_visit_pages(mi_heap_t *heap, heap_page_visitor_fun *fn, void *arg1, void *arg2) { - if (heap==NULL || heap->page_count==0) return 0; + if (heap == NULL || heap->page_count == 0) + return 0; - // visit all pages - #if MI_DEBUG>1 +// visit all pages +#if MI_DEBUG > 1 size_t total = heap->page_count; - #endif +#endif size_t count = 0; - for (size_t i = 0; i <= MI_BIN_FULL; i++) { - mi_page_queue_t* pq = &heap->pages[i]; - mi_page_t* page = pq->first; - while(page != NULL) { - mi_page_t* next = page->next; // save next in case the page gets removed from the queue + for (size_t i = 0; i <= MI_BIN_FULL; i++) + { + mi_page_queue_t *pq = &heap->pages[i]; + mi_page_t *page = pq->first; + while (page != NULL) + { + mi_page_t *next = page->next; // save next in case the page gets removed from the queue mi_assert_internal(mi_page_heap(page) == heap); count++; - if (!fn(heap, pq, page, arg1, arg2)) return false; + if (!fn(heap, pq, page, arg1, arg2)) + return false; page = next; // and continue } } @@ -47,30 +51,28 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void return true; } - -#if MI_DEBUG>=2 -static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { +#if MI_DEBUG >= 2 +static bool mi_heap_page_is_valid(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2) +{ UNUSED(arg1); UNUSED(arg2); UNUSED(pq); mi_assert_internal(mi_page_heap(page) == heap); - mi_segment_t* segment = _mi_page_segment(page); + mi_segment_t *segment = _mi_page_segment(page); mi_assert_internal(segment->thread_id == heap->thread_id); mi_assert_expensive(_mi_page_is_valid(page)); return true; } #endif -#if MI_DEBUG>=3 -static bool mi_heap_is_valid(mi_heap_t* heap) { - mi_assert_internal(heap!=NULL); +#if MI_DEBUG >= 3 +static bool mi_heap_is_valid(mi_heap_t *heap) +{ + mi_assert_internal(heap != NULL); mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); return true; } #endif - - - /* ----------------------------------------------------------- "Collect" pages by migrating `local_free` and `thread_free` lists and freeing empty pages. This is done when a thread @@ -78,32 +80,36 @@ static bool mi_heap_is_valid(mi_heap_t* heap) { blocks alive) ----------------------------------------------------------- */ -typedef enum mi_collect_e { +typedef enum mi_collect_e +{ MI_NORMAL, MI_FORCE, MI_ABANDON } mi_collect_t; - -static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { +static bool mi_heap_page_collect(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg_collect, void *arg2) +{ UNUSED(arg2); UNUSED(heap); mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL)); - mi_collect_t collect = *((mi_collect_t*)arg_collect); + mi_collect_t collect = *((mi_collect_t *)arg_collect); _mi_page_free_collect(page, collect >= MI_FORCE); - if (mi_page_all_free(page)) { - // no more used blocks, free the page. + if (mi_page_all_free(page)) + { + // no more used blocks, free the page. // note: this will free retired pages as well. _mi_page_free(page, pq, collect >= MI_FORCE); } - else if (collect == MI_ABANDON) { + else if (collect == MI_ABANDON) + { // still used blocks but the thread is done; abandon the page _mi_page_abandon(page, pq); } return true; // don't break } -static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { +static bool mi_heap_page_never_delayed_free(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2) +{ UNUSED(arg1); UNUSED(arg2); UNUSED(heap); @@ -112,27 +118,29 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq return true; // don't break } -static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) +static void mi_heap_collect_ex(mi_heap_t *heap, mi_collect_t collect) { - if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (heap == NULL || !mi_heap_is_initialized(heap)) + return; _mi_deferred_free(heap, collect >= MI_FORCE); - // note: never reclaim on collect but leave it to threads that need storage to reclaim + // note: never reclaim on collect but leave it to threads that need storage to reclaim if ( - #ifdef NDEBUG +#ifdef NDEBUG collect == MI_FORCE - #else +#else collect >= MI_FORCE - #endif - && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim) +#endif + && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim) { // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. // if all memory is freed by now, all segments should be freed. _mi_abandoned_reclaim_all(heap, &heap->tld->segments); } - + // if abandoning, mark all pages to no longer add to delayed_free - if (collect == MI_ABANDON) { + if (collect == MI_ABANDON) + { mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); } @@ -145,74 +153,85 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) // collect all pages owned by this thread mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); - mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); + mi_assert_internal(collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t, &heap->thread_delayed_free) == NULL); // collect segment caches - if (collect >= MI_FORCE) { + if (collect >= MI_FORCE) + { _mi_segment_thread_collect(&heap->tld->segments); } // collect regions on program-exit (or shared library unload) - if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) { + if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) + { _mi_mem_collect(&heap->tld->os); } } -void _mi_heap_collect_abandon(mi_heap_t* heap) { +void _mi_heap_collect_abandon(mi_heap_t *heap) +{ mi_heap_collect_ex(heap, MI_ABANDON); } -void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { +void mi_heap_collect(mi_heap_t *heap, bool force) mi_attr_noexcept +{ mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL)); } -void mi_collect(bool force) mi_attr_noexcept { +void mi_collect(bool force) mi_attr_noexcept +{ mi_heap_collect(mi_get_default_heap(), force); } - /* ----------------------------------------------------------- Heap new ----------------------------------------------------------- */ -mi_heap_t* mi_heap_get_default(void) { +mi_heap_t *mi_heap_get_default(void) +{ mi_thread_init(); return mi_get_default_heap(); } -mi_heap_t* mi_heap_get_backing(void) { - mi_heap_t* heap = mi_heap_get_default(); - mi_assert_internal(heap!=NULL); - mi_heap_t* bheap = heap->tld->heap_backing; - mi_assert_internal(bheap!=NULL); +mi_heap_t *mi_heap_get_backing(void) +{ + mi_heap_t *heap = mi_heap_get_default(); + mi_assert_internal(heap != NULL); + mi_heap_t *bheap = heap->tld->heap_backing; + mi_assert_internal(bheap != NULL); mi_assert_internal(bheap->thread_id == _mi_thread_id()); return bheap; } -mi_heap_t* mi_heap_new(void) { - mi_heap_t* bheap = mi_heap_get_backing(); - mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? - if (heap==NULL) return NULL; +mi_heap_t *mi_heap_new(void) +{ + mi_heap_t *bheap = mi_heap_get_backing(); + mi_heap_t *heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? + if (heap == NULL) + return NULL; _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); heap->tld = bheap->tld; heap->thread_id = _mi_thread_id(); _mi_random_split(&bheap->random, &heap->random); - heap->cookie = _mi_heap_random_next(heap) | 1; + heap->cookie = _mi_heap_random_next(heap) | 1; heap->keys[0] = _mi_heap_random_next(heap); heap->keys[1] = _mi_heap_random_next(heap); - heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe + heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe // push on the thread local heaps list heap->next = heap->tld->heaps; heap->tld->heaps = heap; + heap->deferred_free = NULL; return heap; } -uintptr_t _mi_heap_random_next(mi_heap_t* heap) { +uintptr_t _mi_heap_random_next(mi_heap_t *heap) +{ return _mi_random_next(&heap->random); } // zero out the page queues -static void mi_heap_reset_pages(mi_heap_t* heap) { +static void mi_heap_reset_pages(mi_heap_t *heap) +{ mi_assert_internal(heap != NULL); mi_assert_internal(mi_heap_is_initialized(heap)); // TODO: copy full empty heap instead? @@ -226,29 +245,41 @@ static void mi_heap_reset_pages(mi_heap_t* heap) { } // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. -static void mi_heap_free(mi_heap_t* heap) { +static void mi_heap_free(mi_heap_t *heap) +{ mi_assert(heap != NULL); mi_assert_internal(mi_heap_is_initialized(heap)); - if (heap==NULL || !mi_heap_is_initialized(heap)) return; - if (mi_heap_is_backing(heap)) return; // dont free the backing heap + if (heap == NULL || !mi_heap_is_initialized(heap)) + return; + if (mi_heap_is_backing(heap)) + return; // dont free the backing heap // reset default - if (mi_heap_is_default(heap)) { + if (mi_heap_is_default(heap)) + { _mi_heap_set_default_direct(heap->tld->heap_backing); } // remove ourselves from the thread local heaps list // linear search but we expect the number of heaps to be relatively small - mi_heap_t* prev = NULL; - mi_heap_t* curr = heap->tld->heaps; - while (curr != heap && curr != NULL) { + mi_heap_t *prev = NULL; + mi_heap_t *curr = heap->tld->heaps; + while (curr != heap && curr != NULL) + { prev = curr; curr = curr->next; } mi_assert_internal(curr == heap); - if (curr == heap) { - if (prev != NULL) { prev->next = heap->next; } - else { heap->tld->heaps = heap->next; } + if (curr == heap) + { + if (prev != NULL) + { + prev->next = heap->next; + } + else + { + heap->tld->heaps = heap->next; + } } mi_assert_internal(heap->tld->heaps != NULL); @@ -256,12 +287,12 @@ static void mi_heap_free(mi_heap_t* heap) { mi_free(heap); } - /* ----------------------------------------------------------- Heap destroy ----------------------------------------------------------- */ -static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { +static bool _mi_heap_page_destroy(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2) +{ UNUSED(arg1); UNUSED(arg2); UNUSED(heap); @@ -272,24 +303,28 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ // stats const size_t bsize = mi_page_block_size(page); - if (bsize > MI_LARGE_OBJ_SIZE_MAX) { - if (bsize > MI_HUGE_OBJ_SIZE_MAX) { + if (bsize > MI_LARGE_OBJ_SIZE_MAX) + { + if (bsize > MI_HUGE_OBJ_SIZE_MAX) + { mi_heap_stat_decrease(heap, giant, bsize); } - else { + else + { mi_heap_stat_decrease(heap, huge, bsize); } } #if (MI_STAT) - _mi_page_free_collect(page, false); // update used count + _mi_page_free_collect(page, false); // update used count const size_t inuse = page->used; - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) + { mi_heap_stat_decrease(heap, normal, bsize * inuse); -#if (MI_STAT>1) +#if (MI_STAT > 1) mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); #endif } - mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... + mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... #endif /// pretend it is all free now @@ -300,146 +335,163 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ // mi_page_free(page,false); page->next = NULL; page->prev = NULL; - _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); + _mi_segment_page_free(page, false /* no force? */, &heap->tld->segments); return true; // keep going } -void _mi_heap_destroy_pages(mi_heap_t* heap) { +void _mi_heap_destroy_pages(mi_heap_t *heap) +{ mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL); mi_heap_reset_pages(heap); } -void mi_heap_destroy(mi_heap_t* heap) { +void mi_heap_destroy(mi_heap_t *heap) +{ mi_assert(heap != NULL); mi_assert(mi_heap_is_initialized(heap)); mi_assert(heap->no_reclaim); mi_assert_expensive(mi_heap_is_valid(heap)); - if (heap==NULL || !mi_heap_is_initialized(heap)) return; - if (!heap->no_reclaim) { + if (heap == NULL || !mi_heap_is_initialized(heap)) + return; + if (!heap->no_reclaim) + { // don't free in case it may contain reclaimed pages mi_heap_delete(heap); } - else { + else + { // free all pages _mi_heap_destroy_pages(heap); mi_heap_free(heap); } } - - /* ----------------------------------------------------------- Safe Heap delete ----------------------------------------------------------- */ // Tranfer the pages from one heap to the other -static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { - mi_assert_internal(heap!=NULL); - if (from==NULL || from->page_count == 0) return; +static void mi_heap_absorb(mi_heap_t *heap, mi_heap_t *from) +{ + mi_assert_internal(heap != NULL); + if (from == NULL || from->page_count == 0) + return; // reduce the size of the delayed frees _mi_heap_delayed_free(from); - - // transfer all pages by appending the queues; this will set a new heap field + + // transfer all pages by appending the queues; this will set a new heap field // so threads may do delayed frees in either heap for a while. // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state // so after this only the new heap will get delayed frees - for (size_t i = 0; i <= MI_BIN_FULL; i++) { - mi_page_queue_t* pq = &heap->pages[i]; - mi_page_queue_t* append = &from->pages[i]; + for (size_t i = 0; i <= MI_BIN_FULL; i++) + { + mi_page_queue_t *pq = &heap->pages[i]; + mi_page_queue_t *append = &from->pages[i]; size_t pcount = _mi_page_queue_append(heap, pq, append); heap->page_count += pcount; from->page_count -= pcount; } mi_assert_internal(from->page_count == 0); - // and do outstanding delayed frees in the `from` heap + // and do outstanding delayed frees in the `from` heap // note: be careful here as the `heap` field in all those pages no longer point to `from`, - // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a + // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a // the regular `_mi_free_delayed_block` which is safe. - _mi_heap_delayed_free(from); - mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL); + _mi_heap_delayed_free(from); + mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t, &from->thread_delayed_free) == NULL); // and reset the `from` heap - mi_heap_reset_pages(from); + mi_heap_reset_pages(from); } // Safe delete a heap without freeing any still allocated blocks in that heap. -void mi_heap_delete(mi_heap_t* heap) +void mi_heap_delete(mi_heap_t *heap) { mi_assert(heap != NULL); mi_assert(mi_heap_is_initialized(heap)); mi_assert_expensive(mi_heap_is_valid(heap)); - if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (heap == NULL || !mi_heap_is_initialized(heap)) + return; - if (!mi_heap_is_backing(heap)) { + if (!mi_heap_is_backing(heap)) + { // tranfer still used pages to the backing heap mi_heap_absorb(heap->tld->heap_backing, heap); } - else { + else + { // the backing heap abandons its pages _mi_heap_collect_abandon(heap); } - mi_assert_internal(heap->page_count==0); + mi_assert_internal(heap->page_count == 0); mi_heap_free(heap); } -mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { +mi_heap_t *mi_heap_set_default(mi_heap_t *heap) +{ mi_assert(heap != NULL); mi_assert(mi_heap_is_initialized(heap)); - if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; + if (heap == NULL || !mi_heap_is_initialized(heap)) + return NULL; mi_assert_expensive(mi_heap_is_valid(heap)); - mi_heap_t* old = mi_get_default_heap(); + mi_heap_t *old = mi_get_default_heap(); _mi_heap_set_default_direct(heap); return old; } - - - /* ----------------------------------------------------------- Analysis ----------------------------------------------------------- */ // static since it is not thread safe to access heaps from other threads. -static mi_heap_t* mi_heap_of_block(const void* p) { - if (p == NULL) return NULL; - mi_segment_t* segment = _mi_ptr_segment(p); +static mi_heap_t *mi_heap_of_block(const void *p) +{ + if (p == NULL) + return NULL; + mi_segment_t *segment = _mi_ptr_segment(p); bool valid = (_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(valid); - if (mi_unlikely(!valid)) return NULL; - return mi_page_heap(_mi_segment_page_of(segment,p)); + if (mi_unlikely(!valid)) + return NULL; + return mi_page_heap(_mi_segment_page_of(segment, p)); } -bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { +bool mi_heap_contains_block(mi_heap_t *heap, const void *p) +{ mi_assert(heap != NULL); - if (heap==NULL || !mi_heap_is_initialized(heap)) return false; + if (heap == NULL || !mi_heap_is_initialized(heap)) + return false; return (heap == mi_heap_of_block(p)); } - -static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { +static bool mi_heap_page_check_owned(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *p, void *vfound) +{ UNUSED(heap); UNUSED(pq); - bool* found = (bool*)vfound; - mi_segment_t* segment = _mi_page_segment(page); - void* start = _mi_page_start(segment, page, NULL); - void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); + bool *found = (bool *)vfound; + mi_segment_t *segment = _mi_page_segment(page); + void *start = _mi_page_start(segment, page, NULL); + void *end = (uint8_t *)start + (page->capacity * mi_page_block_size(page)); *found = (p >= start && p < end); return (!*found); // continue if not found } -bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { +bool mi_heap_check_owned(mi_heap_t *heap, const void *p) +{ mi_assert(heap != NULL); - if (heap==NULL || !mi_heap_is_initialized(heap)) return false; - if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers + if (heap == NULL || !mi_heap_is_initialized(heap)) + return false; + if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) + return false; // only aligned pointers bool found = false; - mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); + mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void *)p, &found); return found; } -bool mi_check_owned(const void* p) { +bool mi_check_owned(const void *p) +{ return mi_heap_check_owned(mi_get_default_heap(), p); } @@ -450,46 +502,53 @@ bool mi_check_owned(const void* p) { ----------------------------------------------------------- */ // Separate struct to keep `mi_page_t` out of the public interface -typedef struct mi_heap_area_ex_s { +typedef struct mi_heap_area_ex_s +{ mi_heap_area_t area; - mi_page_t* page; + mi_page_t *page; } mi_heap_area_ex_t; -static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { +static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t *xarea, mi_block_visit_fun *visitor, void *arg) +{ mi_assert(xarea != NULL); - if (xarea==NULL) return true; - const mi_heap_area_t* area = &xarea->area; - mi_page_t* page = xarea->page; + if (xarea == NULL) + return true; + const mi_heap_area_t *area = &xarea->area; + mi_page_t *page = xarea->page; mi_assert(page != NULL); - if (page == NULL) return true; + if (page == NULL) + return true; - _mi_page_free_collect(page,true); + _mi_page_free_collect(page, true); mi_assert_internal(page->local_free == NULL); - if (page->used == 0) return true; + if (page->used == 0) + return true; const size_t bsize = mi_page_block_size(page); - size_t psize; - uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); + size_t psize; + uint8_t *pstart = _mi_page_start(_mi_page_segment(page), page, &psize); - if (page->capacity == 1) { + if (page->capacity == 1) + { // optimize page with one block mi_assert_internal(page->used == 1 && page->free == NULL); return visitor(mi_page_heap(page), area, pstart, bsize, arg); } - // create a bitmap of free blocks. - #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) +// create a bitmap of free blocks. +#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void *)) uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; memset(free_map, 0, sizeof(free_map)); size_t free_count = 0; - for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + for (mi_block_t *block = page->free; block != NULL; block = mi_block_next(page, block)) + { free_count++; - mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); - size_t offset = (uint8_t*)block - pstart; + mi_assert_internal((uint8_t *)block >= pstart && (uint8_t *)block < (pstart + psize)); + size_t offset = (uint8_t *)block - pstart; mi_assert_internal(offset % bsize == 0); - size_t blockidx = offset / bsize; // Todo: avoid division? - mi_assert_internal( blockidx < MI_MAX_BLOCKS); + size_t blockidx = offset / bsize; // Todo: avoid division? + mi_assert_internal(blockidx < MI_MAX_BLOCKS); size_t bitidx = (blockidx / sizeof(uintptr_t)); size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); free_map[bitidx] |= ((uintptr_t)1 << bit); @@ -498,30 +557,34 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v // walk through all blocks skipping the free ones size_t used_count = 0; - for (size_t i = 0; i < page->capacity; i++) { + for (size_t i = 0; i < page->capacity; i++) + { size_t bitidx = (i / sizeof(uintptr_t)); size_t bit = i - (bitidx * sizeof(uintptr_t)); uintptr_t m = free_map[bitidx]; - if (bit == 0 && m == UINTPTR_MAX) { + if (bit == 0 && m == UINTPTR_MAX) + { i += (sizeof(uintptr_t) - 1); // skip a run of free blocks } - else if ((m & ((uintptr_t)1 << bit)) == 0) { + else if ((m & ((uintptr_t)1 << bit)) == 0) + { used_count++; - uint8_t* block = pstart + (i * bsize); - if (!visitor(mi_page_heap(page), area, block, bsize, arg)) return false; + uint8_t *block = pstart + (i * bsize); + if (!visitor(mi_page_heap(page), area, block, bsize, arg)) + return false; } } mi_assert_internal(page->used == used_count); return true; } -typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); +typedef bool(mi_heap_area_visit_fun)(const mi_heap_t *heap, const mi_heap_area_ex_t *area, void *arg); - -static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { +static bool mi_heap_visit_areas_page(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *vfun, void *arg) +{ UNUSED(heap); UNUSED(pq); - mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; + mi_heap_area_visit_fun *fun = (mi_heap_area_visit_fun *)vfun; mi_heap_area_ex_t xarea; const size_t bsize = mi_page_block_size(page); xarea.page = page; @@ -534,31 +597,45 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa } // Visit all heap pages as areas -static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { - if (visitor == NULL) return false; - return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{ +static bool mi_heap_visit_areas(const mi_heap_t *heap, mi_heap_area_visit_fun *visitor, void *arg) +{ + if (visitor == NULL) + return false; + return mi_heap_visit_pages((mi_heap_t *)heap, &mi_heap_visit_areas_page, (void *)(visitor), arg); // note: function pointer to void* :-{ } // Just to pass arguments -typedef struct mi_visit_blocks_args_s { - bool visit_blocks; - mi_block_visit_fun* visitor; - void* arg; +typedef struct mi_visit_blocks_args_s +{ + bool visit_blocks; + mi_block_visit_fun *visitor; + void *arg; } mi_visit_blocks_args_t; -static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { - mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; - if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; - if (args->visit_blocks) { +static bool mi_heap_area_visitor(const mi_heap_t *heap, const mi_heap_area_ex_t *xarea, void *arg) +{ + mi_visit_blocks_args_t *args = (mi_visit_blocks_args_t *)arg; + if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) + return false; + if (args->visit_blocks) + { return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); } - else { + else + { return true; } } // Visit all blocks in a heap -bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { - mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; +bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_blocks, mi_block_visit_fun *visitor, void *arg) +{ + mi_visit_blocks_args_t args = {visit_blocks, visitor, arg}; return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); } + +void mi_heap_register_local_deferred_free(mi_heap_t *heap, mi_local_deferred_free_fun *deferred_free, void *arg) +{ + heap->deferred_free = (void *)deferred_free; + heap->deferred_arg = arg; +} \ No newline at end of file diff --git a/src/init.c b/src/init.c index f635cf98..fe14ecf2 100644 --- a/src/init.c +++ b/src/init.c @@ -7,76 +7,92 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc.h" #include "mimalloc-internal.h" -#include // memcpy, memset -#include // atexit +#include // memcpy, memset +#include // atexit // Empty page used to initialize the small free pages array const mi_page_t _mi_page_empty = { - 0, false, false, false, false, - 0, // capacity - 0, // reserved capacity - { 0 }, // flags - false, // is_zero - 0, // retire_expire - NULL, // free - #if MI_ENCODE_FREELIST - { 0, 0 }, - #endif - 0, // used - 0, // xblock_size - NULL, // local_free - ATOMIC_VAR_INIT(0), // xthread_free - ATOMIC_VAR_INIT(0), // xheap - NULL, NULL -}; + 0, false, false, false, false, + 0, // capacity + 0, // reserved capacity + {0}, // flags + false, // is_zero + 0, // retire_expire + NULL, // free +#if MI_ENCODE_FREELIST + {0, 0}, +#endif + 0, // used + 0, // xblock_size + NULL, // local_free + ATOMIC_VAR_INIT(0), // xthread_free + ATOMIC_VAR_INIT(0), // xheap + NULL, + NULL}; -#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) +#define MI_PAGE_EMPTY() ((mi_page_t *)&_mi_page_empty) -#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8) -#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } -#elif (MI_PADDING>0) -#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } +#if (MI_PADDING > 0) && (MI_INTPTR_SIZE >= 8) +#define MI_SMALL_PAGES_EMPTY \ + { \ + MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() \ + } +#elif (MI_PADDING > 0) +#define MI_SMALL_PAGES_EMPTY \ + { \ + MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() \ + } #else -#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() } +#define MI_SMALL_PAGES_EMPTY \ + { \ + MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() \ + } #endif - // Empty page queues for every bin -#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) } -#define MI_PAGE_QUEUES_EMPTY \ - { QNULL(1), \ - QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \ - QNULL( 10), QNULL( 12), QNULL( 14), QNULL( 16), QNULL( 20), QNULL( 24), QNULL( 28), QNULL( 32), /* 16 */ \ - QNULL( 40), QNULL( 48), QNULL( 56), QNULL( 64), QNULL( 80), QNULL( 96), QNULL( 112), QNULL( 128), /* 24 */ \ - QNULL( 160), QNULL( 192), QNULL( 224), QNULL( 256), QNULL( 320), QNULL( 384), QNULL( 448), QNULL( 512), /* 32 */ \ - QNULL( 640), QNULL( 768), QNULL( 896), QNULL( 1024), QNULL( 1280), QNULL( 1536), QNULL( 1792), QNULL( 2048), /* 40 */ \ - QNULL( 2560), QNULL( 3072), QNULL( 3584), QNULL( 4096), QNULL( 5120), QNULL( 6144), QNULL( 7168), QNULL( 8192), /* 48 */ \ - QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \ - QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \ - QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \ - QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \ - QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ } +#define QNULL(sz) \ + { \ + NULL, NULL, (sz) * sizeof(uintptr_t) \ + } +#define MI_PAGE_QUEUES_EMPTY \ + { \ + QNULL(1), \ + QNULL(1), QNULL(2), QNULL(3), QNULL(4), QNULL(5), QNULL(6), QNULL(7), QNULL(8), /* 8 */ \ + QNULL(10), QNULL(12), QNULL(14), QNULL(16), QNULL(20), QNULL(24), QNULL(28), QNULL(32), /* 16 */ \ + QNULL(40), QNULL(48), QNULL(56), QNULL(64), QNULL(80), QNULL(96), QNULL(112), QNULL(128), /* 24 */ \ + QNULL(160), QNULL(192), QNULL(224), QNULL(256), QNULL(320), QNULL(384), QNULL(448), QNULL(512), /* 32 */ \ + QNULL(640), QNULL(768), QNULL(896), QNULL(1024), QNULL(1280), QNULL(1536), QNULL(1792), QNULL(2048), /* 40 */ \ + QNULL(2560), QNULL(3072), QNULL(3584), QNULL(4096), QNULL(5120), QNULL(6144), QNULL(7168), QNULL(8192), /* 48 */ \ + QNULL(10240), QNULL(12288), QNULL(14336), QNULL(16384), QNULL(20480), QNULL(24576), QNULL(28672), QNULL(32768), /* 56 */ \ + QNULL(40960), QNULL(49152), QNULL(57344), QNULL(65536), QNULL(81920), QNULL(98304), QNULL(114688), QNULL(131072), /* 64 */ \ + QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \ + QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \ + QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ \ + } -#define MI_STAT_COUNT_NULL() {0,0,0,0} +#define MI_STAT_COUNT_NULL() \ + { \ + 0, 0, 0, 0 \ + } // Empty statistics -#if MI_STAT>1 -#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) } +#if MI_STAT > 1 +#define MI_STAT_COUNT_END_NULL() \ + , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) } #else #define MI_STAT_COUNT_END_NULL() #endif -#define MI_STATS_NULL \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \ - MI_STAT_COUNT_END_NULL() +#define MI_STATS_NULL \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + {0, 0}, {0, 0}, {0, 0}, {0, 0}, \ + {0, 0}, {0, 0}, {0, 0}, {0, 0} MI_STAT_COUNT_END_NULL() // -------------------------------------------------------- // Statically allocate an empty heap as the initial @@ -88,58 +104,58 @@ const mi_page_t _mi_page_empty = { // -------------------------------------------------------- mi_decl_cache_align const mi_heap_t _mi_heap_empty = { - NULL, - MI_SMALL_PAGES_EMPTY, - MI_PAGE_QUEUES_EMPTY, - ATOMIC_VAR_INIT(NULL), - 0, // tid - 0, // cookie - { 0, 0 }, // keys - { {0}, {0}, 0 }, - 0, // page count - MI_BIN_FULL, 0, // page retired min/max - NULL, // next - false -}; + NULL, + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY, + ATOMIC_VAR_INIT(NULL), + 0, // tid + 0, // cookie + {0, 0}, // keys + {{0}, {0}, 0}, + 0, // page count + MI_BIN_FULL, + 0, // page retired min/max + NULL, // next + false, + 0, + 0}; // the thread-local default heap for allocation -mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; +mi_decl_thread mi_heap_t *_mi_heap_default = (mi_heap_t *)&_mi_heap_empty; extern mi_heap_t _mi_heap_main; static mi_tld_t tld_main = { - 0, false, - &_mi_heap_main, &_mi_heap_main, - { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, - 0, 0, 0, 0, 0, 0, NULL, - &tld_main.stats, &tld_main.os - }, // segments - { 0, &tld_main.stats }, // os - { MI_STATS_NULL } // stats + 0, false, &_mi_heap_main, &_mi_heap_main, {{NULL, NULL}, {NULL, NULL}, {NULL, NULL, 0}, 0, 0, 0, 0, 0, 0, NULL, &tld_main.stats, &tld_main.os}, // segments + {0, &tld_main.stats}, // os + {MI_STATS_NULL} // stats }; mi_heap_t _mi_heap_main = { - &tld_main, - MI_SMALL_PAGES_EMPTY, - MI_PAGE_QUEUES_EMPTY, - ATOMIC_VAR_INIT(NULL), - 0, // thread id - 0, // initial cookie - { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) - { {0x846ca68b}, {0}, 0 }, // random - 0, // page count - MI_BIN_FULL, 0, // page retired min/max - NULL, // next heap - false // can reclaim -}; + &tld_main, + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY, + ATOMIC_VAR_INIT(NULL), + 0, // thread id + 0, // initial cookie + {0, 0}, // the key of the main heap can be fixed (unlike page keys that need to be secure!) + {{0x846ca68b}, {0}, 0}, // random + 0, // page count + MI_BIN_FULL, + 0, // page retired min/max + NULL, // next heap + false, // can reclaim + 0, + 0}; -bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. +bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. -mi_stats_t _mi_stats_main = { MI_STATS_NULL }; +mi_stats_t _mi_stats_main = {MI_STATS_NULL}; - -static void mi_heap_main_init(void) { - if (_mi_heap_main.cookie == 0) { +static void mi_heap_main_init(void) +{ + if (_mi_heap_main.cookie == 0) + { _mi_heap_main.thread_id = _mi_thread_id(); _mi_heap_main.cookie = _os_random_weak((uintptr_t)&mi_heap_main_init); _mi_random_init(&_mi_heap_main.random); @@ -148,51 +164,58 @@ static void mi_heap_main_init(void) { } } -mi_heap_t* _mi_heap_main_get(void) { +mi_heap_t *_mi_heap_main_get(void) +{ mi_heap_main_init(); return &_mi_heap_main; } - /* ----------------------------------------------------------- Initialization and freeing of the thread local heaps ----------------------------------------------------------- */ // note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size). -typedef struct mi_thread_data_s { - mi_heap_t heap; // must come first due to cast in `_mi_heap_done` - mi_tld_t tld; +typedef struct mi_thread_data_s +{ + mi_heap_t heap; // must come first due to cast in `_mi_heap_done` + mi_tld_t tld; } mi_thread_data_t; // Initialize the thread local default heap, called from `mi_thread_init` -static bool _mi_heap_init(void) { - if (mi_heap_is_initialized(mi_get_default_heap())) return true; - if (_mi_is_main_thread()) { +static bool _mi_heap_init(void) +{ + if (mi_heap_is_initialized(mi_get_default_heap())) + return true; + if (_mi_is_main_thread()) + { // mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization // the main heap is statically allocated mi_heap_main_init(); _mi_heap_set_default_direct(&_mi_heap_main); //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_get_default_heap()); } - else { + else + { // use `_mi_os_alloc` to allocate directly from the OS - mi_thread_data_t* td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); // Todo: more efficient allocation? - if (td == NULL) { + mi_thread_data_t *td = (mi_thread_data_t *)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); // Todo: more efficient allocation? + if (td == NULL) + { // if this fails, try once more. (issue #257) - td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); - if (td == NULL) { + td = (mi_thread_data_t *)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); + if (td == NULL) + { // really out of memory _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); return false; } } // OS allocated so already zero initialized - mi_tld_t* tld = &td->tld; - mi_heap_t* heap = &td->heap; + mi_tld_t *tld = &td->tld; + mi_heap_t *heap = &td->heap; _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap)); heap->thread_id = _mi_thread_id(); _mi_random_init(&heap->random); - heap->cookie = _mi_heap_random_next(heap) | 1; + heap->cookie = _mi_heap_random_next(heap) | 1; heap->keys[0] = _mi_heap_random_next(heap); heap->keys[1] = _mi_heap_random_next(heap); heap->tld = tld; @@ -201,27 +224,32 @@ static bool _mi_heap_init(void) { tld->segments.stats = &tld->stats; tld->segments.os = &tld->os; tld->os.stats = &tld->stats; - _mi_heap_set_default_direct(heap); + _mi_heap_set_default_direct(heap); } return false; } // Free the thread local default heap (called from `mi_thread_done`) -static bool _mi_heap_done(mi_heap_t* heap) { - if (!mi_heap_is_initialized(heap)) return true; +static bool _mi_heap_done(mi_heap_t *heap) +{ + if (!mi_heap_is_initialized(heap)) + return true; // reset default heap - _mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty); + _mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t *)&_mi_heap_empty); // switch to backing heap heap = heap->tld->heap_backing; - if (!mi_heap_is_initialized(heap)) return false; + if (!mi_heap_is_initialized(heap)) + return false; // delete all non-backing heaps in this thread - mi_heap_t* curr = heap->tld->heaps; - while (curr != NULL) { - mi_heap_t* next = curr->next; // save `next` as `curr` will be freed - if (curr != heap) { + mi_heap_t *curr = heap->tld->heaps; + while (curr != NULL) + { + mi_heap_t *next = curr->next; // save `next` as `curr` will be freed + if (curr != heap) + { mi_assert_internal(!mi_heap_is_backing(curr)); mi_heap_delete(curr); } @@ -231,15 +259,17 @@ static bool _mi_heap_done(mi_heap_t* heap) { mi_assert_internal(mi_heap_is_backing(heap)); // collect if not the main thread - if (heap != &_mi_heap_main) { + if (heap != &_mi_heap_main) + { _mi_heap_collect_abandon(heap); } - + // merge stats - _mi_stats_done(&heap->tld->stats); + _mi_stats_done(&heap->tld->stats); // free if not the main thread - if (heap != &_mi_heap_main) { + if (heap != &_mi_heap_main) + { mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id()); _mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main); } @@ -254,8 +284,6 @@ static bool _mi_heap_done(mi_heap_t* heap) { return false; } - - // -------------------------------------------------------- // Try to run `mi_thread_done()` automatically so any memory // owned by the thread but not yet released can be abandoned @@ -272,7 +300,7 @@ static bool _mi_heap_done(mi_heap_t* heap) { // to set up the thread local keys. // -------------------------------------------------------- -static void _mi_thread_done(mi_heap_t* default_heap); +static void _mi_thread_done(mi_heap_t *default_heap); #ifdef __wasi__ // no pthreads in the WebAssembly Standard Interface @@ -281,54 +309,60 @@ static void _mi_thread_done(mi_heap_t* default_heap); #endif #if defined(_WIN32) && defined(MI_SHARED_LIB) - // nothing to do as it is done in DllMain +// nothing to do as it is done in DllMain #elif defined(_WIN32) && !defined(MI_SHARED_LIB) - // use thread local storage keys to detect thread ending - #include - #include - #if (_WIN32_WINNT < 0x600) // before Windows Vista - WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); - WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); - WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); - WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); - #endif - static DWORD mi_fls_key = (DWORD)(-1); - static void NTAPI mi_fls_done(PVOID value) { - if (value!=NULL) _mi_thread_done((mi_heap_t*)value); - } +// use thread local storage keys to detect thread ending +#include +#include +#if (_WIN32_WINNT < 0x600) // before Windows Vista +WINBASEAPI DWORD WINAPI FlsAlloc(_In_opt_ PFLS_CALLBACK_FUNCTION lpCallback); +WINBASEAPI PVOID WINAPI FlsGetValue(_In_ DWORD dwFlsIndex); +WINBASEAPI BOOL WINAPI FlsSetValue(_In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData); +WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); +#endif +static DWORD mi_fls_key = (DWORD)(-1); +static void NTAPI mi_fls_done(PVOID value) +{ + if (value != NULL) + _mi_thread_done((mi_heap_t *)value); +} #elif defined(MI_USE_PTHREADS) - // use pthread local storage keys to detect thread ending - // (and used with MI_TLS_PTHREADS for the default heap) - #include - pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); - static void mi_pthread_done(void* value) { - if (value!=NULL) _mi_thread_done((mi_heap_t*)value); - } +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +#include +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); +static void mi_pthread_done(void *value) +{ + if (value != NULL) + _mi_thread_done((mi_heap_t *)value); +} #elif defined(__wasi__) // no pthreads in the WebAssembly Standard Interface #else - #pragma message("define a way to call mi_thread_done when a thread is done") +#pragma message("define a way to call mi_thread_done when a thread is done") #endif // Set up handlers so `mi_thread_done` is called automatically -static void mi_process_setup_auto_thread_done(void) { +static void mi_process_setup_auto_thread_done(void) +{ static bool tls_initialized = false; // fine if it races - if (tls_initialized) return; + if (tls_initialized) + return; tls_initialized = true; - #if defined(_WIN32) && defined(MI_SHARED_LIB) - // nothing to do as it is done in DllMain - #elif defined(_WIN32) && !defined(MI_SHARED_LIB) - mi_fls_key = FlsAlloc(&mi_fls_done); - #elif defined(MI_USE_PTHREADS) - mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); - pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); - #endif +#if defined(_WIN32) && defined(MI_SHARED_LIB) + // nothing to do as it is done in DllMain +#elif defined(_WIN32) && !defined(MI_SHARED_LIB) + mi_fls_key = FlsAlloc(&mi_fls_done); +#elif defined(MI_USE_PTHREADS) + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +#endif _mi_heap_set_default_direct(&_mi_heap_main); } - -bool _mi_is_main_thread(void) { - return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id()); +bool _mi_is_main_thread(void) +{ + return (_mi_heap_main.thread_id == 0 || _mi_heap_main.thread_id == _mi_thread_id()); } // This is called from the `mi_malloc_generic` @@ -336,125 +370,144 @@ void mi_thread_init(void) mi_attr_noexcept { // ensure our process has started already mi_process_init(); - + // initialize the thread local default heap // (this will call `_mi_heap_set_default_direct` and thus set the // fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called) - if (_mi_heap_init()) return; // returns true if already initialized + if (_mi_heap_init()) + return; // returns true if already initialized _mi_stat_increase(&_mi_stats_main.threads, 1); //_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id()); } -void mi_thread_done(void) mi_attr_noexcept { +void mi_thread_done(void) mi_attr_noexcept +{ _mi_thread_done(mi_get_default_heap()); } -static void _mi_thread_done(mi_heap_t* heap) { +static void _mi_thread_done(mi_heap_t *heap) +{ _mi_stat_decrease(&_mi_stats_main.threads, 1); // check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps... - if (heap->thread_id != _mi_thread_id()) return; - + if (heap->thread_id != _mi_thread_id()) + return; + // abandon the thread local heap - if (_mi_heap_done(heap)) return; // returns true if already ran + if (_mi_heap_done(heap)) + return; // returns true if already ran } -void _mi_heap_set_default_direct(mi_heap_t* heap) { +void _mi_heap_set_default_direct(mi_heap_t *heap) +{ mi_assert_internal(heap != NULL); - #if defined(MI_TLS_SLOT) - mi_tls_slot_set(MI_TLS_SLOT,heap); - #elif defined(MI_TLS_PTHREAD_SLOT_OFS) +#if defined(MI_TLS_SLOT) + mi_tls_slot_set(MI_TLS_SLOT, heap); +#elif defined(MI_TLS_PTHREAD_SLOT_OFS) *mi_tls_pthread_heap_slot() = heap; - #elif defined(MI_TLS_PTHREAD) - // we use _mi_heap_default_key - #else +#elif defined(MI_TLS_PTHREAD) +// we use _mi_heap_default_key +#else _mi_heap_default = heap; - #endif +#endif - // ensure the default heap is passed to `_mi_thread_done` - // setting to a non-NULL value also ensures `mi_thread_done` is called. - #if defined(_WIN32) && defined(MI_SHARED_LIB) - // nothing to do as it is done in DllMain - #elif defined(_WIN32) && !defined(MI_SHARED_LIB) - mi_assert_internal(mi_fls_key != 0); - FlsSetValue(mi_fls_key, heap); - #elif defined(MI_USE_PTHREADS) - if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD +// ensure the default heap is passed to `_mi_thread_done` +// setting to a non-NULL value also ensures `mi_thread_done` is called. +#if defined(_WIN32) && defined(MI_SHARED_LIB) + // nothing to do as it is done in DllMain +#elif defined(_WIN32) && !defined(MI_SHARED_LIB) + mi_assert_internal(mi_fls_key != 0); + FlsSetValue(mi_fls_key, heap); +#elif defined(MI_USE_PTHREADS) + if (_mi_heap_default_key != (pthread_key_t)(-1)) + { // can happen during recursive invocation on freeBSD pthread_setspecific(_mi_heap_default_key, heap); } - #endif +#endif } - // -------------------------------------------------------- // Run functions on process init/done, and thread init/done // -------------------------------------------------------- static void mi_process_done(void); -static bool os_preloading = true; // true until this module is initialized -static bool mi_redirected = false; // true if malloc redirects to mi_malloc +static bool os_preloading = true; // true until this module is initialized +static bool mi_redirected = false; // true if malloc redirects to mi_malloc // Returns true if this module has not been initialized; Don't use C runtime routines until it returns false. -bool _mi_preloading(void) { +bool _mi_preloading(void) +{ return os_preloading; } -bool mi_is_redirected(void) mi_attr_noexcept { +bool mi_is_redirected(void) mi_attr_noexcept +{ return mi_redirected; } // Communicate with the redirection module on Windows #if defined(_WIN32) && defined(MI_SHARED_LIB) #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif -mi_decl_export void _mi_redirect_entry(DWORD reason) { - // called on redirection; careful as this may be called before DllMain - if (reason == DLL_PROCESS_ATTACH) { - mi_redirected = true; + mi_decl_export void _mi_redirect_entry(DWORD reason) + { + // called on redirection; careful as this may be called before DllMain + if (reason == DLL_PROCESS_ATTACH) + { + mi_redirected = true; + } + else if (reason == DLL_PROCESS_DETACH) + { + mi_redirected = false; + } + else if (reason == DLL_THREAD_DETACH) + { + mi_thread_done(); + } } - else if (reason == DLL_PROCESS_DETACH) { - mi_redirected = false; - } - else if (reason == DLL_THREAD_DETACH) { - mi_thread_done(); - } -} -__declspec(dllimport) bool mi_allocator_init(const char** message); -__declspec(dllimport) void mi_allocator_done(void); + __declspec(dllimport) bool mi_allocator_init(const char **message); + __declspec(dllimport) void mi_allocator_done(void); #ifdef __cplusplus } #endif #else -static bool mi_allocator_init(const char** message) { - if (message != NULL) *message = NULL; +static bool mi_allocator_init(const char **message) +{ + if (message != NULL) + *message = NULL; return true; } -static void mi_allocator_done(void) { +static void mi_allocator_done(void) +{ // nothing to do } #endif // Called once by the process loader -static void mi_process_load(void) { +static void mi_process_load(void) +{ mi_heap_main_init(); - #if defined(MI_TLS_RECURSE_GUARD) - volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; +#if defined(MI_TLS_RECURSE_GUARD) + volatile mi_heap_t *dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; UNUSED(dummy); - #endif +#endif os_preloading = false; atexit(&mi_process_done); _mi_options_init(); mi_process_init(); //mi_stats_reset();- - if (mi_redirected) _mi_verbose_message("malloc is redirected.\n"); + if (mi_redirected) + _mi_verbose_message("malloc is redirected.\n"); // show message from the redirector (if present) - const char* msg = NULL; + const char *msg = NULL; mi_allocator_init(&msg); - if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) { - _mi_fputs(NULL,NULL,NULL,msg); + if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) + { + _mi_fputs(NULL, NULL, NULL, msg); } } @@ -462,22 +515,26 @@ static void mi_process_load(void) { #include mi_decl_cache_align bool _mi_cpu_has_fsrm = false; -static void mi_detect_cpu_features(void) { +static void mi_detect_cpu_features(void) +{ // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) int32_t cpu_info[4]; __cpuid(cpu_info, 7); _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see } #else -static void mi_detect_cpu_features(void) { +static void mi_detect_cpu_features(void) +{ // nothing } #endif // Initialize the process; called by thread_init or the process loader -void mi_process_init(void) mi_attr_noexcept { +void mi_process_init(void) mi_attr_noexcept +{ // ensure we are called once - if (_mi_process_is_initialized) return; + if (_mi_process_is_initialized) + return; _mi_process_is_initialized = true; mi_process_setup_auto_thread_done(); @@ -485,99 +542,113 @@ void mi_process_init(void) mi_attr_noexcept { mi_detect_cpu_features(); _mi_os_init(); mi_heap_main_init(); - #if (MI_DEBUG) +#if (MI_DEBUG) _mi_verbose_message("debug level : %d\n", MI_DEBUG); - #endif +#endif _mi_verbose_message("secure level: %d\n", MI_SECURE); mi_thread_init(); - mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL) + mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL) - if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) + { size_t pages = mi_option_get(mi_option_reserve_huge_os_pages); - mi_reserve_huge_os_pages_interleave(pages, 0, pages*500); - } - if (mi_option_is_enabled(mi_option_reserve_os_memory)) { + mi_reserve_huge_os_pages_interleave(pages, 0, pages * 500); + } + if (mi_option_is_enabled(mi_option_reserve_os_memory)) + { long ksize = mi_option_get(mi_option_reserve_os_memory); - if (ksize > 0) mi_reserve_os_memory((size_t)ksize*KiB, true, true); + if (ksize > 0) + mi_reserve_os_memory((size_t)ksize * KiB, true, true); } } // Called when the process is done (through `at_exit`) -static void mi_process_done(void) { +static void mi_process_done(void) +{ // only shutdown if we were initialized - if (!_mi_process_is_initialized) return; + if (!_mi_process_is_initialized) + return; // ensure we are called once static bool process_done = false; - if (process_done) return; + if (process_done) + return; process_done = true; - #if defined(_WIN32) && !defined(MI_SHARED_LIB) - FlsSetValue(mi_fls_key, NULL); // don't call main-thread callback - FlsFree(mi_fls_key); // call thread-done on all threads to prevent dangling callback pointer if statically linked with a DLL; Issue #208 - #endif - - #if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB) +#if defined(_WIN32) && !defined(MI_SHARED_LIB) + FlsSetValue(mi_fls_key, NULL); // don't call main-thread callback + FlsFree(mi_fls_key); // call thread-done on all threads to prevent dangling callback pointer if statically linked with a DLL; Issue #208 +#endif + +#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB) // free all memory if possible on process exit. This is not needed for a stand-alone process // but should be done if mimalloc is statically linked into another shared library which // is repeatedly loaded/unloaded, see issue #281. - mi_collect(true /* force */ ); - #endif + mi_collect(true /* force */); +#endif - if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { + if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) + { mi_stats_print(NULL); } - mi_allocator_done(); + mi_allocator_done(); _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id); os_preloading = true; // don't call the C runtime anymore } - - #if defined(_WIN32) && defined(MI_SHARED_LIB) - // Windows DLL: easy to hook into process_init and thread_done - __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { - UNUSED(reserved); - UNUSED(inst); - if (reason==DLL_PROCESS_ATTACH) { - mi_process_load(); - } - else if (reason==DLL_THREAD_DETACH) { - if (!mi_is_redirected()) mi_thread_done(); - } - return TRUE; +// Windows DLL: easy to hook into process_init and thread_done +__declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) +{ + UNUSED(reserved); + UNUSED(inst); + if (reason == DLL_PROCESS_ATTACH) + { + mi_process_load(); } + else if (reason == DLL_THREAD_DETACH) + { + if (!mi_is_redirected()) + mi_thread_done(); + } + return TRUE; +} #elif defined(__cplusplus) - // C++: use static initialization to detect process start - static bool _mi_process_init(void) { - mi_process_load(); - return (_mi_heap_main.thread_id != 0); - } - static bool mi_initialized = _mi_process_init(); +// C++: use static initialization to detect process start +static bool _mi_process_init(void) +{ + mi_process_load(); + return (_mi_heap_main.thread_id != 0); +} +static bool mi_initialized = _mi_process_init(); #elif defined(__GNUC__) || defined(__clang__) - // GCC,Clang: use the constructor attribute - static void __attribute__((constructor)) _mi_process_init(void) { - mi_process_load(); - } +// GCC,Clang: use the constructor attribute +static void __attribute__((constructor)) _mi_process_init(void) +{ + mi_process_load(); +} #elif defined(_MSC_VER) - // MSVC: use data section magic for static libraries - // See - static int _mi_process_init(void) { - mi_process_load(); - return 0; - } - typedef int(*_crt_cb)(void); - #ifdef _M_X64 - __pragma(comment(linker, "/include:" "_mi_msvc_initu")) - #pragma section(".CRT$XIU", long, read) - #else - __pragma(comment(linker, "/include:" "__mi_msvc_initu")) - #endif - #pragma data_seg(".CRT$XIU") - _crt_cb _mi_msvc_initu[] = { &_mi_process_init }; - #pragma data_seg() +// MSVC: use data section magic for static libraries +// See +static int _mi_process_init(void) +{ + mi_process_load(); + return 0; +} +typedef int (*_crt_cb)(void); +#ifdef _M_X64 +__pragma(comment(linker, "/include:" + "_mi_msvc_initu")) +#pragma section(".CRT$XIU", long, read) +#else +__pragma(comment(linker, "/include:" + "__mi_msvc_initu")) +#endif +#pragma data_seg(".CRT$XIU") + _crt_cb _mi_msvc_initu[] = {&_mi_process_init}; +#pragma data_seg() #else #pragma message("define a way to call mi_process_load on your platform") diff --git a/src/page.c b/src/page.c index 4b7e9ffb..a86b0bb2 100644 --- a/src/page.c +++ b/src/page.c @@ -23,26 +23,28 @@ terms of the MIT license. A copy of the license can be found in the file #include "page-queue.c" #undef MI_IN_PAGE_C - /* ----------------------------------------------------------- Page helpers ----------------------------------------------------------- */ // Index a block in a page -static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) { +static inline mi_block_t *mi_page_block_at(const mi_page_t *page, void *page_start, size_t block_size, size_t i) +{ UNUSED(page); mi_assert_internal(page != NULL); mi_assert_internal(i <= page->reserved); - return (mi_block_t*)((uint8_t*)page_start + (i * block_size)); + return (mi_block_t *)((uint8_t *)page_start + (i * block_size)); } -static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld); +static void mi_page_init(mi_heap_t *heap, mi_page_t *page, size_t size, mi_tld_t *tld); +static void mi_page_extend_free(mi_heap_t *heap, mi_page_t *page, mi_tld_t *tld); -#if (MI_DEBUG>=3) -static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { +#if (MI_DEBUG >= 3) +static size_t mi_page_list_count(mi_page_t *page, mi_block_t *head) +{ size_t count = 0; - while (head != NULL) { + while (head != NULL) + { mi_assert_internal(page == _mi_ptr_page(head)); count++; head = mi_block_next(page, head); @@ -57,41 +59,47 @@ static inline uint8_t* mi_page_area(const mi_page_t* page) { } */ -static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { +static bool mi_page_list_is_valid(mi_page_t *page, mi_block_t *p) +{ size_t psize; - uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize); - mi_block_t* start = (mi_block_t*)page_area; - mi_block_t* end = (mi_block_t*)(page_area + psize); - while(p != NULL) { - if (p < start || p >= end) return false; + uint8_t *page_area = _mi_page_start(_mi_page_segment(page), page, &psize); + mi_block_t *start = (mi_block_t *)page_area; + mi_block_t *end = (mi_block_t *)(page_area + psize); + while (p != NULL) + { + if (p < start || p >= end) + return false; p = mi_block_next(page, p); } return true; } -static bool mi_page_is_valid_init(mi_page_t* page) { +static bool mi_page_is_valid_init(mi_page_t *page) +{ mi_assert_internal(page->xblock_size > 0); mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->capacity <= page->reserved); const size_t bsize = mi_page_block_size(page); - mi_segment_t* segment = _mi_page_segment(page); - uint8_t* start = _mi_page_start(segment,page,NULL); - mi_assert_internal(start == _mi_segment_page_start(segment,page,bsize,NULL,NULL)); + mi_segment_t *segment = _mi_page_segment(page); + uint8_t *start = _mi_page_start(segment, page, NULL); + mi_assert_internal(start == _mi_segment_page_start(segment, page, bsize, NULL, NULL)); //mi_assert_internal(start + page->capacity*page->block_size == page->top); - mi_assert_internal(mi_page_list_is_valid(page,page->free)); - mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); + mi_assert_internal(mi_page_list_is_valid(page, page->free)); + mi_assert_internal(mi_page_list_is_valid(page, page->local_free)); - #if MI_DEBUG>3 // generally too expensive to check this - if (page->flags.is_zero) { - for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) { +#if MI_DEBUG > 3 // generally too expensive to check this + if (page->flags.is_zero) + { + for (mi_block_t *block = page->free; block != NULL; mi_block_next(page, block)) + { mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t))); } } - #endif +#endif - mi_block_t* tfree = mi_page_thread_free(page); + mi_block_t *tfree = mi_page_thread_free(page); mi_assert_internal(mi_page_list_is_valid(page, tfree)); //size_t tfree_count = mi_page_list_count(page, tfree); //mi_assert_internal(tfree_count <= page->thread_freed + 1); @@ -102,41 +110,49 @@ static bool mi_page_is_valid_init(mi_page_t* page) { return true; } -bool _mi_page_is_valid(mi_page_t* page) { +bool _mi_page_is_valid(mi_page_t *page) +{ mi_assert_internal(mi_page_is_valid_init(page)); - #if MI_SECURE +#if MI_SECURE mi_assert_internal(page->keys[0] != 0); - #endif - if (mi_page_heap(page)!=NULL) { - mi_segment_t* segment = _mi_page_segment(page); - mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0); - if (segment->page_kind != MI_PAGE_HUGE) { - mi_page_queue_t* pq = mi_page_queue_of(page); +#endif + if (mi_page_heap(page) != NULL) + { + mi_segment_t *segment = _mi_page_segment(page); + mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id == 0); + if (segment->page_kind != MI_PAGE_HUGE) + { + mi_page_queue_t *pq = mi_page_queue_of(page); mi_assert_internal(mi_page_queue_contains(pq, page)); - mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page)); - mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); + mi_assert_internal(pq->block_size == mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page)); + mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page), pq)); } } return true; } #endif -void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { +void _mi_page_use_delayed_free(mi_page_t *page, mi_delayed_t delay, bool override_never) +{ mi_thread_free_t tfreex; - mi_delayed_t old_delay; - mi_thread_free_t tfree; - do { + mi_delayed_t old_delay; + mi_thread_free_t tfree; + do + { tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS; tfreex = mi_tf_set_delayed(tfree, delay); old_delay = mi_tf_delayed(tfree); - if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) { + if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) + { mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail } - else if (delay == old_delay) { + else if (delay == old_delay) + { break; // avoid atomic operation if already equal } - else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) { + else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) + { break; // leave never-delayed flag set } } while ((old_delay == MI_DELAYED_FREEING) || @@ -151,63 +167,73 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid // Note: The exchange must be done atomically as this is used right after // moving to the full list in `mi_page_collect_ex` and we need to // ensure that there was no race where the page became unfull just before the move. -static void _mi_page_thread_free_collect(mi_page_t* page) +static void _mi_page_thread_free_collect(mi_page_t *page) { - mi_block_t* head; + mi_block_t *head; mi_thread_free_t tfreex; mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); - do { + do + { head = mi_tf_block(tfree); - tfreex = mi_tf_set_block(tfree,NULL); + tfreex = mi_tf_set_block(tfree, NULL); } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex)); // return if the list is empty - if (head == NULL) return; + if (head == NULL) + return; // find the tail -- also to get a proper count (without data races) uint32_t max_count = page->capacity; // cannot collect more than capacity uint32_t count = 1; - mi_block_t* tail = head; - mi_block_t* next; - while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { + mi_block_t *tail = head; + mi_block_t *next; + while ((next = mi_block_next(page, tail)) != NULL && count <= max_count) + { count++; tail = next; } // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free) - if (count > max_count) { + if (count > max_count) + { _mi_error_message(EFAULT, "corrupted thread-free list\n"); return; // the thread-free items cannot be freed } // and append the current local free list - mi_block_set_next(page,tail, page->local_free); + mi_block_set_next(page, tail, page->local_free); page->local_free = head; // update counts now page->used -= count; } -void _mi_page_free_collect(mi_page_t* page, bool force) { - mi_assert_internal(page!=NULL); +void _mi_page_free_collect(mi_page_t *page, bool force) +{ + mi_assert_internal(page != NULL); // collect the thread free list - if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation + if (force || mi_page_thread_free(page) != NULL) + { // quick test to avoid an atomic operation _mi_page_thread_free_collect(page); } // and the local free list - if (page->local_free != NULL) { - if (mi_likely(page->free == NULL)) { + if (page->local_free != NULL) + { + if (mi_likely(page->free == NULL)) + { // usual case page->free = page->local_free; page->local_free = NULL; page->is_zero = false; } - else if (force) { + else if (force) + { // append -- only on shutdown (force) as this is a linear operation - mi_block_t* tail = page->local_free; - mi_block_t* next; - while ((next = mi_block_next(page, tail)) != NULL) { + mi_block_t *tail = page->local_free; + mi_block_t *next; + while ((next = mi_block_next(page, tail)) != NULL) + { tail = next; } mi_block_set_next(page, tail, page->free); @@ -220,50 +246,54 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { mi_assert_internal(!force || page->local_free == NULL); } - - /* ----------------------------------------------------------- Page fresh and retire ----------------------------------------------------------- */ // called from segments when reclaiming abandoned pages -void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { +void _mi_page_reclaim(mi_heap_t *heap, mi_page_t *page) +{ mi_assert_expensive(mi_page_is_valid_init(page)); mi_assert_internal(mi_page_heap(page) == heap); mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(!page->is_reset); // TODO: push on full queue immediately if it is full? - mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); + mi_page_queue_t *pq = mi_page_queue(heap, mi_page_block_size(page)); mi_page_queue_push(heap, pq, page); mi_assert_expensive(_mi_page_is_valid(page)); } // allocate a fresh page from a segment -static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) { - mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq)); - mi_assert_internal(pq==NULL||block_size == pq->block_size); - mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os); - if (page == NULL) { +static mi_page_t *mi_page_fresh_alloc(mi_heap_t *heap, mi_page_queue_t *pq, size_t block_size) +{ + mi_assert_internal(pq == NULL || mi_heap_contains_queue(heap, pq)); + mi_assert_internal(pq == NULL || block_size == pq->block_size); + mi_page_t *page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os); + if (page == NULL) + { // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) return NULL; } // a fresh page was found, initialize it - mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE); + mi_assert_internal(pq == NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_page_init(heap, page, block_size, heap->tld); _mi_stat_increase(&heap->tld->stats.pages, 1); - if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL + if (pq != NULL) + mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL mi_assert_expensive(_mi_page_is_valid(page)); return page; } // Get a fresh page to use -static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) { +static mi_page_t *mi_page_fresh(mi_heap_t *heap, mi_page_queue_t *pq) +{ mi_assert_internal(mi_heap_contains_queue(heap, pq)); - mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size); - if (page==NULL) return NULL; - mi_assert_internal(pq->block_size==mi_page_block_size(page)); - mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page))); + mi_page_t *page = mi_page_fresh_alloc(heap, pq, pq->block_size); + if (page == NULL) + return NULL; + mi_assert_internal(pq->block_size == mi_page_block_size(page)); + mi_assert_internal(pq == mi_page_queue(heap, mi_page_block_size(page))); return page; } @@ -271,22 +301,28 @@ static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) { Do any delayed frees (put there by other threads if they deallocated in a full page) ----------------------------------------------------------- */ -void _mi_heap_delayed_free(mi_heap_t* heap) { +void _mi_heap_delayed_free(mi_heap_t *heap) +{ // take over the list (note: no atomic exchange since it is often NULL) - mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); - while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ }; + mi_block_t *block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) + { /* nothing */ + }; // and free them all - while(block != NULL) { - mi_block_t* next = mi_block_nextx(heap,block, heap->keys); + while (block != NULL) + { + mi_block_t *next = mi_block_nextx(heap, block, heap->keys); // use internal free instead of regular one to keep stats etc correct - if (!_mi_free_delayed_block(block)) { + if (!_mi_free_delayed_block(block)) + { // we might already start delayed freeing while another thread has not yet // reset the delayed_freeing flag; in that case delay it further by reinserting. - mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); - do { + mi_block_t *dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + do + { mi_block_set_nextx(heap, block, dfree, heap->keys); - } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); + } while (!mi_atomic_cas_ptr_weak_release(mi_block_t, &heap->thread_delayed_free, &dfree, block)); } block = next; } @@ -297,87 +333,92 @@ void _mi_heap_delayed_free(mi_heap_t* heap) { ----------------------------------------------------------- */ // Move a page from the full list back to a regular list -void _mi_page_unfull(mi_page_t* page) { +void _mi_page_unfull(mi_page_t *page) +{ mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_is_in_full(page)); - if (!mi_page_is_in_full(page)) return; + if (!mi_page_is_in_full(page)) + return; - mi_heap_t* heap = mi_page_heap(page); - mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL]; + mi_heap_t *heap = mi_page_heap(page); + mi_page_queue_t *pqfull = &heap->pages[MI_BIN_FULL]; mi_page_set_in_full(page, false); // to get the right queue - mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_page_queue_t *pq = mi_heap_page_queue_of(heap, page); mi_page_set_in_full(page, true); mi_page_queue_enqueue_from(pq, pqfull, page); } -static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { +static void mi_page_to_full(mi_page_t *page, mi_page_queue_t *pq) +{ mi_assert_internal(pq == mi_page_queue_of(page)); mi_assert_internal(!mi_page_immediate_available(page)); mi_assert_internal(!mi_page_is_in_full(page)); - if (mi_page_is_in_full(page)) return; + if (mi_page_is_in_full(page)) + return; mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); - _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set + _mi_page_free_collect(page, false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set } - // Abandon a page with used blocks at the end of a thread. // Note: only call if it is ensured that no references exist from // the `page->heap->thread_delayed_free` into this page. // Currently only called through `mi_heap_collect_ex` which ensures this. -void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { +void _mi_page_abandon(mi_page_t *page, mi_page_queue_t *pq) +{ mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(pq == mi_page_queue_of(page)); mi_assert_internal(mi_page_heap(page) != NULL); - mi_heap_t* pheap = mi_page_heap(page); + mi_heap_t *pheap = mi_page_heap(page); // remove from our page list - mi_segments_tld_t* segments_tld = &pheap->tld->segments; + mi_segments_tld_t *segments_tld = &pheap->tld->segments; mi_page_queue_remove(pq, page); // page is no longer associated with our heap - mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_thread_free_flag(page) == MI_NEVER_DELAYED_FREE); mi_page_set_heap(page, NULL); -#if MI_DEBUG>1 +#if MI_DEBUG > 1 // check there are no references left.. - for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) { + for (mi_block_t *block = (mi_block_t *)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) + { mi_assert_internal(_mi_ptr_page(block) != page); } #endif // and abandon it mi_assert_internal(mi_page_heap(page) == NULL); - _mi_segment_page_abandon(page,segments_tld); + _mi_segment_page_abandon(page, segments_tld); } - // Free a page with no more free blocks -void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { +void _mi_page_free(mi_page_t *page, mi_page_queue_t *pq, bool force) +{ mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(pq == mi_page_queue_of(page)); mi_assert_internal(mi_page_all_free(page)); - mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); + mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); // no more aligned blocks in here mi_page_set_has_aligned(page, false); // remove from the page list // (no need to do _mi_heap_delayed_free first as all blocks are already free) - mi_segments_tld_t* segments_tld = &mi_page_heap(page)->tld->segments; + mi_segments_tld_t *segments_tld = &mi_page_heap(page)->tld->segments; mi_page_queue_remove(pq, page); // and free it - mi_page_set_heap(page,NULL); + mi_page_set_heap(page, NULL); _mi_segment_page_free(page, force, segments_tld); } -#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX -#define MI_RETIRE_CYCLES (8) +#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX +#define MI_RETIRE_CYCLES (8) // Retire a page with no more used blocks // Important to not retire too quickly though as new @@ -385,7 +426,8 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { // Note: called from `mi_free` and benchmarks often // trigger this due to freeing everything and then // allocating again so careful when changing this. -void _mi_page_retire(mi_page_t* page) { +void _mi_page_retire(mi_page_t *page) +{ mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_all_free(page)); @@ -398,17 +440,21 @@ void _mi_page_retire(mi_page_t* page) { // is the only page left with free blocks. It is not clear // how to check this efficiently though... // for now, we don't retire if it is the only page left of this size class. - mi_page_queue_t* pq = mi_page_queue_of(page); - if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) { - if (pq->last==page && pq->first==page) { // the only page in the queue? - mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); - page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); - mi_heap_t* heap = mi_page_heap(page); + mi_page_queue_t *pq = mi_page_queue_of(page); + if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) + { + if (pq->last == page && pq->first == page) + { // the only page in the queue? + mi_stat_counter_increase(_mi_stats_main.page_no_retire, 1); + page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES / 4); + mi_heap_t *heap = mi_page_heap(page); mi_assert_internal(pq >= heap->pages); const size_t index = pq - heap->pages; mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE); - if (index < heap->page_retired_min) heap->page_retired_min = index; - if (index > heap->page_retired_max) heap->page_retired_max = index; + if (index < heap->page_retired_min) + heap->page_retired_min = index; + if (index > heap->page_retired_max) + heap->page_retired_max = index; mi_assert_internal(mi_page_all_free(page)); return; // dont't free after all } @@ -419,25 +465,34 @@ void _mi_page_retire(mi_page_t* page) { // free retired pages: we don't need to look at the entire queues // since we only retire pages that are at the head position in a queue. -void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { +void _mi_heap_collect_retired(mi_heap_t *heap, bool force) +{ size_t min = MI_BIN_FULL; size_t max = 0; - for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) { - mi_page_queue_t* pq = &heap->pages[bin]; - mi_page_t* page = pq->first; - if (page != NULL && page->retire_expire != 0) { - if (mi_page_all_free(page)) { + for (size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) + { + mi_page_queue_t *pq = &heap->pages[bin]; + mi_page_t *page = pq->first; + if (page != NULL && page->retire_expire != 0) + { + if (mi_page_all_free(page)) + { page->retire_expire--; - if (force || page->retire_expire == 0) { + if (force || page->retire_expire == 0) + { _mi_page_free(pq->first, pq, force); } - else { + else + { // keep retired, update min/max - if (bin < min) min = bin; - if (bin > max) max = bin; + if (bin < min) + min = bin; + if (bin > max) + max = bin; } } - else { + else + { page->retire_expire = 0; } } @@ -446,93 +501,100 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { heap->page_retired_max = max; } - /* ----------------------------------------------------------- Initialize the initial free list in a page. In secure mode we initialize a randomized list by alternating between slices. ----------------------------------------------------------- */ -#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices -#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT) -#define MI_MIN_SLICES (2) +#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices +#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT) +#define MI_MIN_SLICES (2) -static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { +static void mi_page_free_list_extend_secure(mi_heap_t *const heap, mi_page_t *const page, const size_t bsize, const size_t extend, mi_stats_t *const stats) +{ UNUSED(stats); - #if (MI_SECURE<=2) +#if (MI_SECURE <= 2) mi_assert_internal(page->free == NULL); mi_assert_internal(page->local_free == NULL); - #endif +#endif mi_assert_internal(page->capacity + extend <= page->reserved); mi_assert_internal(bsize == mi_page_block_size(page)); - void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL); + void *const page_area = _mi_page_start(_mi_page_segment(page), page, NULL); // initialize a randomized free list // set up `slice_count` slices to alternate between size_t shift = MI_MAX_SLICE_SHIFT; - while ((extend >> shift) == 0) { + while ((extend >> shift) == 0) + { shift--; } const size_t slice_count = (size_t)1U << shift; const size_t slice_extend = extend / slice_count; mi_assert_internal(slice_extend >= 1); - mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice - size_t counts[MI_MAX_SLICES]; // available objects in the slice - for (size_t i = 0; i < slice_count; i++) { - blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend); + mi_block_t *blocks[MI_MAX_SLICES]; // current start of the slice + size_t counts[MI_MAX_SLICES]; // available objects in the slice + for (size_t i = 0; i < slice_count; i++) + { + blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i * slice_extend); counts[i] = slice_extend; } - counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?) + counts[slice_count - 1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?) // and initialize the free list by randomly threading through them // set up first element const uintptr_t r = _mi_heap_random_next(heap); size_t current = r % slice_count; counts[current]--; - mi_block_t* const free_start = blocks[current]; + mi_block_t *const free_start = blocks[current]; // and iterate through the rest; use `random_shuffle` for performance - uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0 - for (size_t i = 1; i < extend; i++) { + uintptr_t rnd = _mi_random_shuffle(r | 1); // ensure not 0 + for (size_t i = 1; i < extend; i++) + { // call random_shuffle only every INTPTR_SIZE rounds - const size_t round = i%MI_INTPTR_SIZE; - if (round == 0) rnd = _mi_random_shuffle(rnd); + const size_t round = i % MI_INTPTR_SIZE; + if (round == 0) + rnd = _mi_random_shuffle(rnd); // select a random next slice index - size_t next = ((rnd >> 8*round) & (slice_count-1)); - while (counts[next]==0) { // ensure it still has space + size_t next = ((rnd >> 8 * round) & (slice_count - 1)); + while (counts[next] == 0) + { // ensure it still has space next++; - if (next==slice_count) next = 0; + if (next == slice_count) + next = 0; } // and link the current block to it counts[next]--; - mi_block_t* const block = blocks[current]; - blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block - mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next` + mi_block_t *const block = blocks[current]; + blocks[current] = (mi_block_t *)((uint8_t *)block + bsize); // bump to the following block + mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next` current = next; } // prepend to the free list (usually NULL) - mi_block_set_next(page, blocks[current], page->free); // end of the list + mi_block_set_next(page, blocks[current], page->free); // end of the list page->free = free_start; } -static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) +static mi_decl_noinline void mi_page_free_list_extend(mi_page_t *const page, const size_t bsize, const size_t extend, mi_stats_t *const stats) { UNUSED(stats); - #if (MI_SECURE <= 2) +#if (MI_SECURE <= 2) mi_assert_internal(page->free == NULL); mi_assert_internal(page->local_free == NULL); - #endif +#endif mi_assert_internal(page->capacity + extend <= page->reserved); mi_assert_internal(bsize == mi_page_block_size(page)); - void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL ); + void *const page_area = _mi_page_start(_mi_page_segment(page), page, NULL); - mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); + mi_block_t *const start = mi_page_block_at(page, page_area, bsize, page->capacity); // initialize a sequential free list - mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1); - mi_block_t* block = start; - while(block <= last) { - mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize); - mi_block_set_next(page,block,next); + mi_block_t *const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1); + mi_block_t *block = start; + while (block <= last) + { + mi_block_t *next = (mi_block_t *)((uint8_t *)block + bsize); + mi_block_set_next(page, block, next); block = next; } // prepend to free list (usually `NULL`) @@ -544,11 +606,11 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co Page initialize and extend the capacity ----------------------------------------------------------- */ -#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well. -#if (MI_SECURE>0) -#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many +#define MI_MAX_EXTEND_SIZE (4 * 1024) // heuristic, one OS page seems to work well. +#if (MI_SECURE > 0) +#define MI_MIN_EXTEND (8 * MI_SECURE) // extend at least by this many #else -#define MI_MIN_EXTEND (1) +#define MI_MIN_EXTEND (1) #endif // Extend the capacity (up to reserved) by initializing a free list @@ -556,40 +618,47 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co // Note: we also experimented with "bump" allocation on the first // allocations but this did not speed up any benchmark (due to an // extra test in malloc? or cache effects?) -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { +static void mi_page_extend_free(mi_heap_t *heap, mi_page_t *page, mi_tld_t *tld) +{ mi_assert_expensive(mi_page_is_valid_init(page)); - #if (MI_SECURE<=2) +#if (MI_SECURE <= 2) mi_assert(page->free == NULL); mi_assert(page->local_free == NULL); - if (page->free != NULL) return; - #endif - if (page->capacity >= page->reserved) return; + if (page->free != NULL) + return; +#endif + if (page->capacity >= page->reserved) + return; size_t page_size; - //uint8_t* page_start = + //uint8_t* page_start = _mi_page_start(_mi_page_segment(page), page, &page_size); mi_stat_counter_increase(tld->stats.pages_extended, 1); // calculate the extend count const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size); size_t extend = page->reserved - page->capacity; - size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize); - if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND; + size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE / (uint32_t)bsize); + if (max_extend < MI_MIN_EXTEND) + max_extend = MI_MIN_EXTEND; - if (extend > max_extend) { + if (extend > max_extend) + { // ensure we don't touch memory beyond the page to reduce page commit. // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%. - extend = (max_extend==0 ? 1 : max_extend); + extend = (max_extend == 0 ? 1 : max_extend); } mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved); - mi_assert_internal(extend < (1UL<<16)); + mi_assert_internal(extend < (1UL << 16)); // and append the extend the free list - if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) { - mi_page_free_list_extend(page, bsize, extend, &tld->stats ); + if (extend < MI_MIN_SLICES || MI_SECURE == 0) + { //!mi_option_is_enabled(mi_option_secure)) { + mi_page_free_list_extend(page, bsize, extend, &tld->stats); } - else { + else + { mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats); } // enable the new free list @@ -597,16 +666,18 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) mi_stat_increase(tld->stats.page_committed, extend * bsize); // extension into zero initialized memory preserves the zero'd free list - if (!page->is_zero_init) { + if (!page->is_zero_init) + { page->is_zero = false; } mi_assert_expensive(mi_page_is_valid_init(page)); } // Initialize a fresh page -static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) { +static void mi_page_init(mi_heap_t *heap, mi_page_t *page, size_t block_size, mi_tld_t *tld) +{ mi_assert(page != NULL); - mi_segment_t* segment = _mi_page_segment(page); + mi_segment_t *segment = _mi_page_segment(page); mi_assert(segment != NULL); mi_assert_internal(block_size > 0); // set fields @@ -614,12 +685,12 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi size_t page_size; _mi_segment_page_start(segment, page, block_size, &page_size, NULL); page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); - mi_assert_internal(page_size / block_size < (1L<<16)); + mi_assert_internal(page_size / block_size < (1L << 16)); page->reserved = (uint16_t)(page_size / block_size); - #ifdef MI_ENCODE_FREELIST +#ifdef MI_ENCODE_FREELIST page->keys[0] = _mi_heap_random_next(heap); page->keys[1] = _mi_heap_random_next(heap); - #endif +#endif page->is_zero = page->is_zero_init; mi_assert_internal(page->capacity == 0); @@ -630,43 +701,44 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert_internal(page->prev == NULL); mi_assert_internal(page->retire_expire == 0); mi_assert_internal(!mi_page_has_aligned(page)); - #if (MI_ENCODE_FREELIST) +#if (MI_ENCODE_FREELIST) mi_assert_internal(page->keys[0] != 0); mi_assert_internal(page->keys[1] != 0); - #endif +#endif mi_assert_expensive(mi_page_is_valid_init(page)); // initialize an initial free list - mi_page_extend_free(heap,page,tld); + mi_page_extend_free(heap, page, tld); mi_assert(mi_page_immediate_available(page)); } - /* ----------------------------------------------------------- Find pages with free blocks -------------------------------------------------------------*/ // Find a page with free blocks of `page->block_size`. -static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) +static mi_page_t *mi_page_queue_find_free_ex(mi_heap_t *heap, mi_page_queue_t *pq, bool first_try) { // search through the pages in "next fit" order size_t count = 0; - mi_page_t* page = pq->first; + mi_page_t *page = pq->first; while (page != NULL) { - mi_page_t* next = page->next; // remember next + mi_page_t *next = page->next; // remember next count++; // 0. collect freed blocks by us and other threads _mi_page_free_collect(page, false); // 1. if the page contains free blocks, we are done - if (mi_page_immediate_available(page)) { - break; // pick this one + if (mi_page_immediate_available(page)) + { + break; // pick this one } // 2. Try to extend - if (page->capacity < page->reserved) { + if (page->capacity < page->reserved) + { mi_page_extend_free(heap, page, heap->tld); mi_assert_internal(mi_page_immediate_available(page)); break; @@ -682,15 +754,18 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p mi_stat_counter_increase(heap->tld->stats.searches, count); - if (page == NULL) { + if (page == NULL) + { _mi_heap_collect_retired(heap, false); // perhaps make a page available page = mi_page_fresh(heap, pq); - if (page == NULL && first_try) { + if (page == NULL && first_try) + { // out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again - page = mi_page_queue_find_free_ex(heap, pq, false); + page = mi_page_queue_find_free_ex(heap, pq, false); } } - else { + else + { mi_assert(pq->first == page); page->retire_expire = 0; } @@ -698,25 +773,27 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p return page; } - - // Find a page with free blocks of `size`. -static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { - mi_page_queue_t* pq = mi_page_queue(heap,size); - mi_page_t* page = pq->first; - if (page != NULL) { - #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness - if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { +static inline mi_page_t *mi_find_free_page(mi_heap_t *heap, size_t size) +{ + mi_page_queue_t *pq = mi_page_queue(heap, size); + mi_page_t *page = pq->first; + if (page != NULL) + { +#if (MI_SECURE >= 3) // in secure mode, we extend half the time to increase randomness + if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) + { mi_page_extend_free(heap, page, heap->tld); mi_assert_internal(mi_page_immediate_available(page)); } - else - #endif + else +#endif { - _mi_page_free_collect(page,false); + _mi_page_free_collect(page, false); } - - if (mi_page_immediate_available(page)) { + + if (mi_page_immediate_available(page)) + { page->retire_expire = 0; return page; // fast path } @@ -724,7 +801,6 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { return mi_page_queue_find_free_ex(heap, pq, true); } - /* ----------------------------------------------------------- Users can register a deferred free function called when the `free` list is empty. Since the `local_free` @@ -732,24 +808,32 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { a certain number of allocations. ----------------------------------------------------------- */ -static mi_deferred_free_fun* volatile deferred_free = NULL; -static _Atomic(void*) deferred_arg; // = NULL +static mi_deferred_free_fun *volatile deferred_free = NULL; +static _Atomic(void *) deferred_arg; // = NULL -void _mi_deferred_free(mi_heap_t* heap, bool force) { +void _mi_deferred_free(mi_heap_t *heap, bool force) +{ heap->tld->heartbeat++; - if (deferred_free != NULL && !heap->tld->recurse) { + if (heap->deferred_free != NULL && !heap->tld->recurse) + { heap->tld->recurse = true; - deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg)); + ((mi_local_deferred_free_fun *)heap->deferred_free)(heap, force, heap->tld->heartbeat, heap->deferred_arg); + heap->tld->recurse = false; + } + if (deferred_free != NULL && !heap->tld->recurse) + { + heap->tld->recurse = true; + deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void, &deferred_arg)); heap->tld->recurse = false; } } -void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept { +void mi_register_deferred_free(mi_deferred_free_fun *fn, void *arg) mi_attr_noexcept +{ deferred_free = fn; - mi_atomic_store_ptr_release(void,&deferred_arg, arg); + mi_atomic_store_ptr_release(void, &deferred_arg, arg); } - /* ----------------------------------------------------------- General allocation ----------------------------------------------------------- */ @@ -758,24 +842,28 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex // Because huge pages contain just one block, and the segment contains // just that page, we always treat them as abandoned and any thread // that frees the block can free the whole page and segment directly. -static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) { +static mi_page_t *mi_huge_page_alloc(mi_heap_t *heap, size_t size) +{ size_t block_size = _mi_os_good_alloc_size(size); mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE); - mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size); - if (page != NULL) { - const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already + mi_page_t *page = mi_page_fresh_alloc(heap, NULL, block_size); + if (page != NULL) + { + const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already mi_assert_internal(bsize >= size); mi_assert_internal(mi_page_immediate_available(page)); - mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE); - mi_assert_internal(_mi_page_segment(page)->used==1); - mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue + mi_assert_internal(_mi_page_segment(page)->page_kind == MI_PAGE_HUGE); + mi_assert_internal(_mi_page_segment(page)->used == 1); + mi_assert_internal(_mi_page_segment(page)->thread_id == 0); // abandoned, not in the huge queue mi_page_set_heap(page, NULL); - if (bsize > MI_HUGE_OBJ_SIZE_MAX) { + if (bsize > MI_HUGE_OBJ_SIZE_MAX) + { _mi_stat_increase(&heap->tld->stats.giant, bsize); _mi_stat_counter_increase(&heap->tld->stats.giant_count, 1); } - else { + else + { _mi_stat_increase(&heap->tld->stats.huge, bsize); _mi_stat_counter_increase(&heap->tld->stats.huge_count, 1); } @@ -783,22 +871,26 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) { return page; } - // Allocate a page // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. -static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept { +static mi_page_t *mi_find_page(mi_heap_t *heap, size_t size) mi_attr_noexcept +{ // huge allocation? - const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` - if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) { - if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see ) + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE))) + { + if (mi_unlikely(req_size > PTRDIFF_MAX)) + { // we don't allocate more than PTRDIFF_MAX (see ) _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); return NULL; } - else { - return mi_huge_page_alloc(heap,size); + else + { + return mi_huge_page_alloc(heap, size); } } - else { + else + { // otherwise find a page with free blocks in our size segregated queues mi_assert_internal(size >= MI_PADDING_SIZE); return mi_find_free_page(heap, size); @@ -807,15 +899,19 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept { // Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. -void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept +void *_mi_malloc_generic(mi_heap_t *heap, size_t size) mi_attr_noexcept { mi_assert_internal(heap != NULL); // initialize if necessary - if (mi_unlikely(!mi_heap_is_initialized(heap))) { + if (mi_unlikely(!mi_heap_is_initialized(heap))) + { mi_thread_init(); // calls `_mi_heap_init` in turn heap = mi_get_default_heap(); - if (mi_unlikely(!mi_heap_is_initialized(heap))) { return NULL; } + if (mi_unlikely(!mi_heap_is_initialized(heap))) + { + return NULL; + } } mi_assert_internal(mi_heap_is_initialized(heap)); @@ -826,14 +922,16 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept _mi_heap_delayed_free(heap); // find (or allocate) a page of the right size - mi_page_t* page = mi_find_page(heap, size); - if (mi_unlikely(page == NULL)) { // first time out of memory, try to collect and retry the allocation once more + mi_page_t *page = mi_find_page(heap, size); + if (mi_unlikely(page == NULL)) + { // first time out of memory, try to collect and retry the allocation once more mi_heap_collect(heap, true /* force */); page = mi_find_page(heap, size); } - if (mi_unlikely(page == NULL)) { // out of memory - const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + if (mi_unlikely(page == NULL)) + { // out of memory + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size); return NULL; }