diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index d686044b..414efa57 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -88,6 +88,7 @@ mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap void _mi_thread_done(mi_heap_t* heap); void _mi_thread_data_collect(void); +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap); // os.c void _mi_os_init(void); // called from process init @@ -186,6 +187,7 @@ size_t _mi_bin_size(uint8_t bin); // for stats uint8_t _mi_bin(size_t size); // for stats // "heap.c" +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id); void _mi_heap_destroy_pages(mi_heap_t* heap); void _mi_heap_collect_abandon(mi_heap_t* heap); void _mi_heap_set_default_direct(mi_heap_t* heap); diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h index 9561335a..126ea7cd 100644 --- a/include/mimalloc/prim.h +++ b/include/mimalloc/prim.h @@ -222,7 +222,13 @@ extern bool _mi_process_is_initialized; // has mi_process_init been static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; // Get a unique id for the current thread. -#if defined(_WIN32) +#if defined(MI_PRIM_THREAD_ID) + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + return MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488) +} + +#elif defined(_WIN32) #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN diff --git a/src/heap.c b/src/heap.c index 6c56edd6..7853db31 100644 --- a/src/heap.c +++ b/src/heap.c @@ -128,6 +128,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) const bool force = (collect >= MI_FORCE); _mi_deferred_free(heap, force); + // python/cpython#112532: we may be called from a thread that is not the owner of the heap + const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id()); + // note: never reclaim on collect but leave it to threads that need storage to reclaim const bool force_main = #ifdef NDEBUG @@ -135,7 +138,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) #else collect >= MI_FORCE #endif - && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim; + && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim; if (force_main) { // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. @@ -164,7 +167,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); // if forced, collect thread data cache on program-exit (or shared library unload) - if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) { + if (force && is_main_thread && mi_heap_is_backing(heap)) { _mi_thread_data_collect(); // collect thread data cache } @@ -208,19 +211,29 @@ mi_heap_t* mi_heap_get_backing(void) { return bheap; } +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id) { + _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); + heap->tld = tld; + heap->thread_id = _mi_thread_id(); + heap->arena_id = arena_id; + if (heap == tld->heap_backing) { + _mi_random_init(&heap->random); + } + else { + _mi_random_split(&tld->heap_backing->random, &heap->random); + } + heap->cookie = _mi_heap_random_next(heap) | 1; + heap->keys[0] = _mi_heap_random_next(heap); + heap->keys[1] = _mi_heap_random_next(heap); +} + mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { mi_heap_t* bheap = mi_heap_get_backing(); mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? if (heap == NULL) return NULL; - _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); - heap->tld = bheap->tld; - heap->thread_id = _mi_thread_id(); - heap->arena_id = arena_id; - _mi_random_split(&bheap->random, &heap->random); - heap->cookie = _mi_heap_random_next(heap) | 1; - heap->keys[0] = _mi_heap_random_next(heap); - heap->keys[1] = _mi_heap_random_next(heap); - heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe + _mi_heap_init(heap, bheap->tld, arena_id); + // don't reclaim abandoned pages or otherwise destroy is unsafe + heap->no_reclaim = true; // push on the thread local heaps list heap->next = heap->tld->heaps; heap->tld->heaps = heap; diff --git a/src/init.c b/src/init.c index 946e9e4b..aaa9137f 100644 --- a/src/init.c +++ b/src/init.c @@ -286,7 +286,7 @@ void _mi_thread_data_collect(void) { } // Initialize the thread local default heap, called from `mi_thread_init` -static bool _mi_heap_init(void) { +static bool _mi_thread_heap_init(void) { if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true; if (_mi_is_main_thread()) { // mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization @@ -302,26 +302,25 @@ static bool _mi_heap_init(void) { mi_tld_t* tld = &td->tld; mi_heap_t* heap = &td->heap; - _mi_memcpy_aligned(tld, &tld_empty, sizeof(*tld)); - _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap)); - heap->thread_id = _mi_thread_id(); - _mi_random_init(&heap->random); - heap->cookie = _mi_heap_random_next(heap) | 1; - heap->keys[0] = _mi_heap_random_next(heap); - heap->keys[1] = _mi_heap_random_next(heap); - heap->tld = tld; - tld->heap_backing = heap; - tld->heaps = heap; - tld->segments.stats = &tld->stats; - tld->segments.os = &tld->os; - tld->os.stats = &tld->stats; - _mi_heap_set_default_direct(heap); + _mi_tld_init(tld, heap); // must be before `_mi_heap_init` + _mi_heap_init(heap, tld, _mi_arena_id_none()); + _mi_heap_set_default_direct(heap); } return false; } +// initialize thread local data +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) { + _mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t)); + tld->heap_backing = bheap; + tld->heaps = bheap; + tld->segments.stats = &tld->stats; + tld->segments.os = &tld->os; + tld->os.stats = &tld->stats; +} + // Free the thread local default heap (called from `mi_thread_done`) -static bool _mi_heap_done(mi_heap_t* heap) { +static bool _mi_thread_heap_done(mi_heap_t* heap) { if (!mi_heap_is_initialized(heap)) return true; // reset default heap @@ -418,7 +417,7 @@ void mi_thread_init(void) mi_attr_noexcept // initialize the thread local default heap // (this will call `_mi_heap_set_default_direct` and thus set the // fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called) - if (_mi_heap_init()) return; // returns true if already initialized + if (_mi_thread_heap_init()) return; // returns true if already initialized _mi_stat_increase(&_mi_stats_main.threads, 1); mi_atomic_increment_relaxed(&thread_count); @@ -450,7 +449,7 @@ void _mi_thread_done(mi_heap_t* heap) if (heap->thread_id != _mi_thread_id()) return; // abandon the thread local heap - if (_mi_heap_done(heap)) return; // returns true if already ran + if (_mi_thread_heap_done(heap)) return; // returns true if already ran } void _mi_heap_set_default_direct(mi_heap_t* heap) {