mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
fix recursive tls access on macOS <= 14
This commit is contained in:
parent
f605cb73e5
commit
dd1b37c9f8
6 changed files with 41 additions and 35 deletions
|
@ -101,7 +101,6 @@ size_t _mi_current_thread_count(void);
|
||||||
bool _mi_preloading(void); // true while the C runtime is not initialized yet
|
bool _mi_preloading(void); // true while the C runtime is not initialized yet
|
||||||
void _mi_thread_done(mi_heap_t* heap);
|
void _mi_thread_done(mi_heap_t* heap);
|
||||||
|
|
||||||
mi_tld_t* _mi_tld(void); // current tld: `_mi_tld() == _mi_heap_get_default()->tld`
|
|
||||||
mi_subproc_t* _mi_subproc(void);
|
mi_subproc_t* _mi_subproc(void);
|
||||||
mi_subproc_t* _mi_subproc_main(void);
|
mi_subproc_t* _mi_subproc_main(void);
|
||||||
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
|
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
|
||||||
|
@ -148,8 +147,8 @@ void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit,
|
||||||
void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
|
void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
|
||||||
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid);
|
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid);
|
||||||
bool _mi_arenas_contain(const void* p);
|
bool _mi_arenas_contain(const void* p);
|
||||||
void _mi_arenas_collect(bool force_purge);
|
void _mi_arenas_collect(bool force_purge, mi_tld_t* tld);
|
||||||
void _mi_arenas_unsafe_destroy_all(void);
|
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld);
|
||||||
|
|
||||||
mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment);
|
mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment);
|
||||||
void _mi_arenas_page_free(mi_page_t* page);
|
void _mi_arenas_page_free(mi_page_t* page);
|
||||||
|
|
15
src/arena.c
15
src/arena.c
|
@ -923,7 +923,7 @@ void _mi_arenas_page_unabandon(mi_page_t* page) {
|
||||||
Arena free
|
Arena free
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices);
|
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices);
|
||||||
static void mi_arenas_try_purge(bool force, bool visit_all);
|
static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld);
|
||||||
|
|
||||||
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
|
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
|
||||||
if (p==NULL) return;
|
if (p==NULL) return;
|
||||||
|
@ -979,12 +979,12 @@ void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to purge expired decommits
|
// try to purge expired decommits
|
||||||
mi_arenas_try_purge(false, false);
|
// mi_arenas_try_purge(false, false, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
|
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
|
||||||
void _mi_arenas_collect(bool force_purge) {
|
void _mi_arenas_collect(bool force_purge, mi_tld_t* tld) {
|
||||||
mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
|
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1038,9 +1038,9 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
|
||||||
|
|
||||||
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
|
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
|
||||||
// for dynamic libraries that are unloaded and need to release all their allocated memory.
|
// for dynamic libraries that are unloaded and need to release all their allocated memory.
|
||||||
void _mi_arenas_unsafe_destroy_all(void) {
|
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) {
|
||||||
mi_arenas_unsafe_destroy(_mi_subproc());
|
mi_arenas_unsafe_destroy(_mi_subproc());
|
||||||
_mi_arenas_collect(true /* force purge */); // purge non-owned arenas
|
_mi_arenas_collect(true /* force purge */, tld); // purge non-owned arenas
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1551,13 +1551,12 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void mi_arenas_try_purge(bool force, bool visit_all)
|
static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld)
|
||||||
{
|
{
|
||||||
const long delay = mi_arena_purge_delay();
|
const long delay = mi_arena_purge_delay();
|
||||||
if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled
|
if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled
|
||||||
|
|
||||||
// check if any arena needs purging?
|
// check if any arena needs purging?
|
||||||
mi_tld_t* tld = _mi_tld();
|
|
||||||
mi_subproc_t* subproc = tld->subproc;
|
mi_subproc_t* subproc = tld->subproc;
|
||||||
const mi_msecs_t now = _mi_clock_now();
|
const mi_msecs_t now = _mi_clock_now();
|
||||||
mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
|
mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
|
||||||
|
|
|
@ -120,7 +120,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||||
|
|
||||||
// collect arenas (this is program wide so don't force purges on abandonment of threads)
|
// collect arenas (this is program wide so don't force purges on abandonment of threads)
|
||||||
_mi_arenas_collect(collect == MI_FORCE /* force purge? */);
|
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, heap->tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
||||||
|
@ -503,8 +503,8 @@ bool mi_heap_reload(mi_heap_t* heap, mi_arena_id_t arena_id) {
|
||||||
|
|
||||||
mi_assert_internal(heap->page_count==0);
|
mi_assert_internal(heap->page_count==0);
|
||||||
|
|
||||||
// re-associate from the current thread-local and static state
|
// re-associate with the current thread-local and static state
|
||||||
heap->tld = _mi_tld();
|
heap->tld = mi_heap_get_default()->tld;
|
||||||
|
|
||||||
// reinit direct pages (as we may be in a different process)
|
// reinit direct pages (as we may be in a different process)
|
||||||
mi_assert_internal(heap->page_count == 0);
|
mi_assert_internal(heap->page_count == 0);
|
||||||
|
|
24
src/init.c
24
src/init.c
|
@ -309,17 +309,21 @@ static mi_tld_t* mi_tld_alloc(void) {
|
||||||
|
|
||||||
#define MI_TLD_INVALID ((mi_tld_t*)1)
|
#define MI_TLD_INVALID ((mi_tld_t*)1)
|
||||||
|
|
||||||
mi_decl_noinline static void mi_tld_free(void) {
|
mi_decl_noinline static void mi_tld_free(mi_tld_t* tld) {
|
||||||
mi_tld_t* tld = _mi_tld();
|
|
||||||
if (tld != NULL && tld != MI_TLD_INVALID) {
|
if (tld != NULL && tld != MI_TLD_INVALID) {
|
||||||
_mi_stats_done(&tld->stats);
|
_mi_stats_done(&tld->stats);
|
||||||
_mi_meta_free(tld, sizeof(mi_tld_t), tld->memid);
|
_mi_meta_free(tld, sizeof(mi_tld_t), tld->memid);
|
||||||
}
|
}
|
||||||
tld = MI_TLD_INVALID;
|
#if 0
|
||||||
|
// do not read/write to `thread_tld` on older macOS <= 14 as that will re-initialize the thread local storage
|
||||||
|
// (since we are calling this during pthread shutdown)
|
||||||
|
// (and this could happen on other systems as well, so let's never do it)
|
||||||
|
thread_tld = MI_TLD_INVALID;
|
||||||
|
#endif
|
||||||
mi_atomic_decrement_relaxed(&thread_count);
|
mi_atomic_decrement_relaxed(&thread_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_decl_noinline mi_tld_t* _mi_tld(void) {
|
static mi_tld_t* mi_tld(void) {
|
||||||
mi_tld_t* tld = thread_tld;
|
mi_tld_t* tld = thread_tld;
|
||||||
if (tld == MI_TLD_INVALID) {
|
if (tld == MI_TLD_INVALID) {
|
||||||
_mi_error_message(EFAULT, "internal error: tld is accessed after the thread terminated\n");
|
_mi_error_message(EFAULT, "internal error: tld is accessed after the thread terminated\n");
|
||||||
|
@ -337,11 +341,11 @@ mi_subproc_t* _mi_subproc(void) {
|
||||||
// on such systems we can check for this with the _mi_prim_get_default_heap as those are protected (by being
|
// on such systems we can check for this with the _mi_prim_get_default_heap as those are protected (by being
|
||||||
// stored in a TLS slot for example)
|
// stored in a TLS slot for example)
|
||||||
mi_heap_t* heap = mi_prim_get_default_heap();
|
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||||
if (heap == NULL || heap == &_mi_heap_empty) {
|
if (heap == NULL) {
|
||||||
return _mi_subproc_main();
|
return _mi_subproc_main();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return thread_tld->subproc; // don't call `_mi_tld()`
|
return heap->tld->subproc; // avoid using thread local storage (`thread_tld`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -395,7 +399,7 @@ void mi_subproc_delete(mi_subproc_id_t subproc_id) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void mi_subproc_add_current_thread(mi_subproc_id_t subproc_id) {
|
void mi_subproc_add_current_thread(mi_subproc_id_t subproc_id) {
|
||||||
mi_tld_t* tld = _mi_tld();
|
mi_tld_t* tld = mi_tld();
|
||||||
if (tld == NULL) return;
|
if (tld == NULL) return;
|
||||||
mi_assert(tld->subproc == &subproc_main);
|
mi_assert(tld->subproc == &subproc_main);
|
||||||
if (tld->subproc != &subproc_main) return;
|
if (tld->subproc != &subproc_main) return;
|
||||||
|
@ -553,10 +557,12 @@ void _mi_thread_done(mi_heap_t* heap)
|
||||||
if (heap->tld->thread_id != _mi_prim_thread_id()) return;
|
if (heap->tld->thread_id != _mi_prim_thread_id()) return;
|
||||||
|
|
||||||
// abandon the thread local heap
|
// abandon the thread local heap
|
||||||
|
// note: we store the tld as we should avoid reading `thread_tld` at this point (to avoid reinitializing the thread local storage)
|
||||||
|
mi_tld_t* tld = heap->tld;
|
||||||
_mi_thread_heap_done(heap); // returns true if already ran
|
_mi_thread_heap_done(heap); // returns true if already ran
|
||||||
|
|
||||||
// free thread local data
|
// free thread local data
|
||||||
mi_tld_free();
|
mi_tld_free(tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _mi_heap_set_default_direct(mi_heap_t* heap) {
|
void _mi_heap_set_default_direct(mi_heap_t* heap) {
|
||||||
|
@ -713,7 +719,7 @@ void mi_cdecl _mi_process_done(void) {
|
||||||
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
|
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
|
||||||
mi_collect(true /* force */);
|
mi_collect(true /* force */);
|
||||||
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
|
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
|
||||||
_mi_arenas_unsafe_destroy_all();
|
_mi_arenas_unsafe_destroy_all(&tld_main);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
|
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
|
||||||
|
|
|
@ -356,8 +356,10 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) {
|
||||||
mi_page_queue_remove(pq, page);
|
mi_page_queue_remove(pq, page);
|
||||||
|
|
||||||
// and free it
|
// and free it
|
||||||
|
mi_heap_t* heap = page->heap;
|
||||||
mi_page_set_heap(page,NULL);
|
mi_page_set_heap(page,NULL);
|
||||||
_mi_arenas_page_free(page);
|
_mi_arenas_page_free(page);
|
||||||
|
_mi_arenas_collect(false, heap->tld); // allow purging
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
||||||
|
|
|
@ -408,7 +408,7 @@ static mi_msecs_t mi_process_start; // = 0
|
||||||
|
|
||||||
// return thread local stats
|
// return thread local stats
|
||||||
static mi_stats_t* mi_get_tld_stats(void) {
|
static mi_stats_t* mi_get_tld_stats(void) {
|
||||||
return &_mi_tld()->stats;
|
return &mi_heap_get_default()->tld->stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mi_stats_reset(void) mi_attr_noexcept {
|
void mi_stats_reset(void) mi_attr_noexcept {
|
||||||
|
|
Loading…
Add table
Reference in a new issue