merge from dev3

This commit is contained in:
daanx 2024-12-21 15:52:15 -08:00
commit 5de5550c63
18 changed files with 835 additions and 653 deletions

View file

@ -279,7 +279,7 @@ mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_commit
mi_decl_export void mi_debug_show_arenas(bool show_pages, bool show_inuse, bool show_committed) mi_attr_noexcept;
// Experimental: heaps associated with specific memory arena's
typedef int mi_arena_id_t;
typedef void* mi_arena_id_t;
mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
@ -326,7 +326,13 @@ mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min,
//mi_decl_export void mi_os_decommit(void* p, size_t size);
mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* accessed_size, size_t* size);
mi_decl_export bool mi_arena_reload(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, mi_arena_id_t* arena_id);
mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* arena_id);
mi_decl_export bool mi_heap_reload(mi_heap_t* heap, mi_arena_id_t arena);
mi_decl_export void mi_heap_unload(mi_heap_t* heap);
// Is a pointer contained in the given arena area?
mi_decl_export bool mi_arena_contains(mi_arena_id_t arena_id, const void* p);
// ------------------------------------------------------
// Convenience

View file

@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
Copyright (c) 2018-2024 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@ -407,19 +407,45 @@ static inline void mi_atomic_yield(void) {
// ----------------------------------------------------------------------
// Locks are only used for abandoned segment visiting in `arena.c`
// Locks
// These should be light-weight in-process only locks.
// Only used for reserving arena's and to maintain the abandoned list.
// ----------------------------------------------------------------------
#if _MSC_VER
#pragma warning(disable:26110) // unlock with holding lock
#endif
#define mi_lock(lock) for(bool _go = (mi_lock_acquire(lock),true); _go; (mi_lock_release(lock), _go=false) )
#if defined(_WIN32)
#if 1
#define mi_lock_t SRWLOCK // slim reader-writer lock
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return TryAcquireSRWLockExclusive(lock);
}
static inline void mi_lock_acquire(mi_lock_t* lock) {
AcquireSRWLockExclusive(lock);
}
static inline void mi_lock_release(mi_lock_t* lock) {
ReleaseSRWLockExclusive(lock);
}
static inline void mi_lock_init(mi_lock_t* lock) {
InitializeSRWLock(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
#else
#define mi_lock_t CRITICAL_SECTION
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return TryEnterCriticalSection(lock);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
static inline void mi_lock_acquire(mi_lock_t* lock) {
EnterCriticalSection(lock);
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
LeaveCriticalSection(lock);
@ -431,16 +457,22 @@ static inline void mi_lock_done(mi_lock_t* lock) {
DeleteCriticalSection(lock);
}
#endif
#elif defined(MI_USE_PTHREADS)
void _mi_error_message(int err, const char* fmt, ...);
#define mi_lock_t pthread_mutex_t
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return (pthread_mutex_trylock(lock) == 0);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
return (pthread_mutex_lock(lock) == 0);
static inline void mi_lock_acquire(mi_lock_t* lock) {
const int err = pthread_mutex_lock(lock);
if (err != 0) {
_mi_error_message(err, "internal error: lock cannot be acquired\n");
}
}
static inline void mi_lock_release(mi_lock_t* lock) {
pthread_mutex_unlock(lock);
@ -452,18 +484,16 @@ static inline void mi_lock_done(mi_lock_t* lock) {
pthread_mutex_destroy(lock);
}
/*
#elif defined(__cplusplus)
#include <mutex>
#define mi_lock_t std::mutex
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return lock->lock_try_acquire();
return lock->try_lock();
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
static inline void mi_lock_acquire(mi_lock_t* lock) {
lock->lock();
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
lock->unlock();
@ -474,7 +504,6 @@ static inline void mi_lock_init(mi_lock_t* lock) {
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
*/
#else
@ -487,12 +516,11 @@ static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
static inline void mi_lock_acquire(mi_lock_t* lock) {
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
if (mi_lock_try_acquire(lock)) return true;
if (mi_lock_try_acquire(lock)) return;
mi_atomic_yield();
}
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
mi_atomic_store_release(lock, (uintptr_t)0);
@ -507,6 +535,4 @@ static inline void mi_lock_done(mi_lock_t* lock) {
#endif
#endif // MI_ATOMIC_H

View file

@ -90,7 +90,6 @@ uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
void _mi_process_load(void);
void mi_cdecl _mi_process_done(void);
@ -101,8 +100,10 @@ bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void);
bool _mi_preloading(void); // true while the C runtime is not initialized yet
void _mi_thread_done(mi_heap_t* heap);
mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld`
mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld`
mi_subproc_t* _mi_subproc(void);
mi_subproc_t* _mi_subproc_main(void);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
size_t _mi_thread_seq_id(void) mi_attr_noexcept;
@ -142,10 +143,12 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t m
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_init(void);
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, size_t tseq, mi_memid_t* memid);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, size_t tseq, mi_memid_t* memid);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
mi_arena_t* _mi_arena_from_id(mi_arena_id_t id);
void* _mi_arena_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void* _mi_arena_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void _mi_arena_free(void* p, size_t size, mi_memid_t memid);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena);
bool _mi_arena_contains(const void* p);
void _mi_arenas_collect(bool force_purge);
void _mi_arena_unsafe_destroy_all(void);
@ -201,6 +204,7 @@ void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
void _mi_stats_merge_from(mi_stats_t* to, mi_stats_t* from);
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
@ -418,11 +422,11 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
return (heap != &_mi_heap_empty);
}
static inline uintptr_t _mi_ptr_cookie(const void* p) {
extern mi_heap_t _mi_heap_main;
mi_assert_internal(_mi_heap_main.cookie != 0);
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
//static inline uintptr_t _mi_ptr_cookie(const void* p) {
// extern mi_heap_t _mi_heap_main;
// mi_assert_internal(_mi_heap_main.cookie != 0);
// return ((uintptr_t)p ^ _mi_heap_main.cookie);
//}
/* -----------------------------------------------------------
@ -524,7 +528,7 @@ static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
if (heap != NULL) {
page->heap = heap;
page->heap_tag = heap->tag;
mi_atomic_store_release(&page->xthread_id, heap->thread_id);
mi_atomic_store_release(&page->xthread_id, heap->tld->thread_id);
}
else {
page->heap = NULL;

View file

@ -243,9 +243,6 @@ typedef size_t mi_page_flags_t;
// atomically in `free.c:mi_free_block_mt`.
typedef uintptr_t mi_thread_free_t;
// Sub processes are used to keep memory separate between them (e.g. multiple interpreters in CPython)
typedef struct mi_subproc_s mi_subproc_t;
// A heap can serve only specific objects signified by its heap tag (e.g. various object types in CPython)
typedef uint8_t mi_heaptag_t;
@ -296,10 +293,9 @@ typedef struct mi_page_s {
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
#endif
mi_heap_t* heap; // heap this threads belong to.
mi_heap_t* heap; // the heap owning this page (or NULL for abandoned pages)
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
mi_subproc_t* subproc; // sub-process of this heap
mi_memid_t memid; // provenance of the page memory
} mi_page_t;
@ -380,7 +376,7 @@ typedef struct mi_random_cxt_s {
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
#if (MI_PADDING)
#if MI_PADDING
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
@ -397,19 +393,14 @@ typedef struct mi_padding_s {
// A heap owns a set of pages.
struct mi_heap_s {
mi_tld_t* tld;
// _Atomic(mi_block_t*) thread_delayed_free;
mi_threadid_t thread_id; // thread this heap belongs too
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
mi_tld_t* tld; // thread-local data
mi_arena_t* exclusive_arena; // if the heap should only allocate from a specific arena (or NULL)
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues.
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
mi_memid_t memid; // provenance of the heap struct itseft (meta or os)
long generic_count;
long full_page_retain; // how many full pages can be retained per queue (before abondoning them)
bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages
bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
@ -422,7 +413,8 @@ struct mi_heap_s {
size_t guarded_sample_count; // current sample count (counting down to 0)
#endif
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
mi_page_queue_t pages[MI_BIN_COUNT]; // queue of pages for each size class (or "bin")
mi_memid_t memid; // provenance of the heap struct itself (meta or os)
};
@ -451,18 +443,18 @@ typedef struct mi_stat_counter_s {
} mi_stat_counter_t;
typedef struct mi_stats_s {
mi_stat_count_t pages;
mi_stat_count_t reserved;
mi_stat_count_t committed;
mi_stat_count_t reset;
mi_stat_count_t purged;
mi_stat_count_t page_committed;
mi_stat_count_t pages_abandoned;
mi_stat_count_t threads;
mi_stat_count_t normal;
mi_stat_count_t huge;
mi_stat_count_t giant;
mi_stat_count_t malloc;
mi_stat_count_t pages;
mi_stat_count_t reserved;
mi_stat_count_t committed;
mi_stat_count_t reset;
mi_stat_count_t purged;
mi_stat_count_t page_committed;
mi_stat_count_t pages_abandoned;
mi_stat_count_t threads;
mi_stat_count_t normal;
mi_stat_count_t huge;
mi_stat_count_t giant;
mi_stat_count_t malloc;
mi_stat_counter_t pages_extended;
mi_stat_counter_t pages_reclaim_on_alloc;
mi_stat_counter_t pages_reclaim_on_free;
@ -480,53 +472,89 @@ typedef struct mi_stats_s {
mi_stat_counter_t arena_count;
mi_stat_counter_t guarded_alloc_count;
#if MI_STAT>1
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
mi_stat_count_t normal_bins[MI_BIN_COUNT];
#endif
} mi_stats_t;
// add to stat keeping track of the peak
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
void __mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void __mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
void __mi_stat_increase_mt(mi_stat_count_t* stat, size_t amount);
void __mi_stat_decrease_mt(mi_stat_count_t* stat, size_t amount);
// adjust stat in special cases to compensate for double counting
void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount, bool on_alloc);
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount, bool on_free);
void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount, bool on_alloc);
void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount, bool on_free);
void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount, bool on_alloc);
void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount, bool on_free);
// counters can just be increased
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
void __mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount);
#if (MI_STAT)
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
#define mi_stat_adjust_increase(stat,amnt,b) _mi_stat_adjust_increase( &(stat), amnt, b)
#define mi_stat_adjust_decrease(stat,amnt,b) _mi_stat_adjust_decrease( &(stat), amnt, b)
#define mi_debug_stat_increase(stat,amount) __mi_stat_increase( &(stat), amount)
#define mi_debug_stat_decrease(stat,amount) __mi_stat_decrease( &(stat), amount)
#define mi_debug_stat_counter_increase(stat,amount) __mi_stat_counter_increase( &(stat), amount)
#define mi_debug_stat_increase_mt(stat,amount) __mi_stat_increase_mt( &(stat), amount)
#define mi_debug_stat_decrease_mt(stat,amount) __mi_stat_decrease_mt( &(stat), amount)
#define mi_debug_stat_counter_increase_mt(stat,amount) __mi_stat_counter_increase_mt( &(stat), amount)
#define mi_debug_stat_adjust_increase_mt(stat,amnt,b) __mi_stat_adjust_increase_mt( &(stat), amnt, b)
#define mi_debug_stat_adjust_decrease_mt(stat,amnt,b) __mi_stat_adjust_decrease_mt( &(stat), amnt, b)
#else
#define mi_stat_increase(stat,amount) ((void)0)
#define mi_stat_decrease(stat,amount) ((void)0)
#define mi_stat_counter_increase(stat,amount) ((void)0)
#define mi_stat_adjuct_increase(stat,amnt,b) ((void)0)
#define mi_stat_adjust_decrease(stat,amnt,b) ((void)0)
#define mi_debug_stat_increase(stat,amount) ((void)0)
#define mi_debug_stat_decrease(stat,amount) ((void)0)
#define mi_debug_stat_counter_increase(stat,amount) ((void)0)
#define mi_debug_stat_increase_mt(stat,amount) ((void)0)
#define mi_debug_stat_decrease_mt(stat,amount) ((void)0)
#define mi_debug_stat_counter_increase_mt(stat,amount) ((void)0)
#define mi_debug_stat_adjust_increase(stat,amnt,b) ((void)0)
#define mi_debug_stat_adjust_decrease(stat,amnt,b) ((void)0)
#endif
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
#define mi_subproc_stat_counter_increase(subproc,stat,amount) __mi_stat_counter_increase_mt( &(subproc)->stats.stat, amount)
#define mi_subproc_stat_increase(subproc,stat,amount) __mi_stat_increase_mt( &(subproc)->stats.stat, amount)
#define mi_subproc_stat_decrease(subproc,stat,amount) __mi_stat_decrease_mt( &(subproc)->stats.stat, amount)
#define mi_subproc_stat_adjust_increase(subproc,stat,amnt,b) __mi_stat_adjust_increase_mt( &(subproc)->stats.stat, amnt, b)
#define mi_subproc_stat_adjust_decrease(subproc,stat,amnt,b) __mi_stat_adjust_decrease_mt( &(subproc)->stats.stat, amnt, b)
#define mi_os_stat_counter_increase(stat,amount) mi_subproc_stat_counter_increase(_mi_subproc(),stat,amount)
#define mi_os_stat_increase(stat,amount) mi_subproc_stat_increase(_mi_subproc(),stat,amount)
#define mi_os_stat_decrease(stat,amount) mi_subproc_stat_decrease(_mi_subproc(),stat,amount)
#define mi_heap_stat_counter_increase(heap,stat,amount) __mi_stat_counter_increase( &(heap)->tld->stats.stat, amount)
#define mi_heap_stat_increase(heap,stat,amount) __mi_stat_increase( &(heap)->tld->stats.stat, amount)
#define mi_heap_stat_decrease(heap,stat,amount) __mi_stat_decrease( &(heap)->tld->stats.stat, amount)
#define mi_debug_heap_stat_counter_increase(heap,stat,amount) mi_debug_stat_counter_increase( (heap)->tld->stats.stat, amount)
#define mi_debug_heap_stat_increase(heap,stat,amount) mi_debug_stat_increase( (heap)->tld->stats.stat, amount)
#define mi_debug_heap_stat_decrease(heap,stat,amount) mi_debug_stat_decrease( (heap)->tld->stats.stat, amount)
// ------------------------------------------------------
// Sub processes do not reclaim or visit segments
// from other sub processes
// Sub processes use separate arena's and no heaps/pages/blocks
// are shared between sub processes.
// The subprocess structure contains essentially all static variables (except per subprocess :-))
//
// Each thread should belong to one sub-process only
// ------------------------------------------------------
struct mi_subproc_s {
_Atomic(size_t) abandoned_count[MI_BIN_COUNT]; // count of abandoned pages for this sub-process
_Atomic(size_t) abandoned_os_list_count; // count of abandoned pages in the os-list
mi_lock_t abandoned_os_lock; // lock for the abandoned os pages list (outside of arena's) (this lock protect list operations)
mi_lock_t abandoned_os_visit_lock; // ensure only one thread per subproc visits the abandoned os list
mi_page_t* abandoned_os_list; // doubly-linked list of abandoned pages outside of arena's (in OS allocated memory)
mi_page_t* abandoned_os_list_tail; // the tail-end of the list
mi_memid_t memid; // provenance of this memory block
};
#define MI_MAX_ARENAS (160) // Limited for now (and takes up .bss).. but arena's scale up exponentially (see `mi_arena_reserve`)
// 160 arenas is enough for ~2 TiB memory
typedef struct mi_subproc_s {
_Atomic(size_t) arena_count; // current count of arena's
_Atomic(mi_arena_t*) arenas[MI_MAX_ARENAS]; // arena's of this sub-process
mi_lock_t arena_reserve_lock; // lock to ensure arena's get reserved one at a time
_Atomic(int64_t) purge_expire; // expiration is set if any arenas can be purged
_Atomic(size_t) abandoned_count[MI_BIN_COUNT]; // total count of abandoned pages for this sub-process
mi_page_t* os_abandoned_pages; // list of pages that OS allocated and not in an arena (only used if `mi_option_visit_abandoned` is on)
mi_lock_t os_abandoned_pages_lock; // lock for the os abandoned pages list (this lock protects list operations)
mi_memid_t memid; // provenance of this memory block (meta or OS)
mi_stats_t stats; // sub-process statistics (tld stats are merged in on thread termination)
} mi_subproc_t;
// ------------------------------------------------------
// Thread Local data
@ -535,20 +563,21 @@ struct mi_subproc_s {
// Milliseconds as in `int64_t` to avoid overflows
typedef int64_t mi_msecs_t;
// Thread local data
struct mi_tld_s {
unsigned long long heartbeat; // monotonic heartbeat count
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
mi_subproc_t* subproc; // sub-process this thread belongs to.
size_t tseq; // thread sequence id
mi_memid_t memid; // provenance of the tld memory itself (meta or OS)
bool recurse; // true if deferred was called; used to prevent infinite recursion.
bool is_in_threadpool; // true if this thread is part of a threadpool (and can run arbitrary tasks)
mi_stats_t stats; // statistics
mi_threadid_t thread_id; // thread id of this thread
size_t thread_seq; // thread sequence id (linear count of created threads)
mi_subproc_t* subproc; // sub-process this thread belongs to.
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
unsigned long long heartbeat; // monotonic heartbeat count
bool recurse; // true if deferred was called; used to prevent infinite recursion.
bool is_in_threadpool; // true if this thread is part of a threadpool (and can run arbitrary tasks)
mi_stats_t stats; // statistics
mi_memid_t memid; // provenance of the tld memory itself (meta or OS)
};
/* -----------------------------------------------------------
Error codes passed to `_mi_fatal_error`
All are recoverable but EFAULT is a serious error and aborts by default in secure mode.