add initial support for visiting abandoned segments per subprocess, upstream for python/cpython#114133

This commit is contained in:
daanx 2024-06-02 07:47:08 -07:00
parent f93fb900b7
commit 8f874555d5
8 changed files with 206 additions and 88 deletions

View file

@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#elif !defined(_WIN32) && (defined(__EMSCRIPTEN_SHARED_MEMORY__) || !defined(__wasi__))
#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__))
#define MI_USE_PTHREADS
#include <pthread.h>
#endif
@ -35,9 +35,9 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_atomic(name) std::atomic_##name
#define mi_memory_order(name) std::memory_order_##name
#if (__cplusplus >= 202002L) // c++20, see issue #571
#define MI_ATOMIC_VAR_INIT(x) x
#define MI_ATOMIC_VAR_INIT(x) x
#elif !defined(ATOMIC_VAR_INIT)
#define MI_ATOMIC_VAR_INIT(x) x
#define MI_ATOMIC_VAR_INIT(x) x
#else
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
#endif
@ -337,6 +337,7 @@ typedef _Atomic(uintptr_t) mi_atomic_guard_t;
// ----------------------------------------------------------------------
// Yield
// ----------------------------------------------------------------------
#if defined(__cplusplus)
#include <thread>
static inline void mi_atomic_yield(void) {
@ -401,59 +402,73 @@ static inline void mi_atomic_yield(void) {
// ----------------------------------------------------------------------
// Locks are only used for abandoned segment visiting
// Locks are only used for abandoned segment visiting in `arena.c`
// ----------------------------------------------------------------------
#if defined(_WIN32)
#define mi_lock_t CRITICAL_SECTION
#define mi_lock_t CRITICAL_SECTION
static inline bool _mi_prim_lock(mi_lock_t* lock) {
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return TryEnterCriticalSection(lock);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
EnterCriticalSection(lock);
return true;
}
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
return TryEnterCriticalSection(lock);
}
static inline void _mi_prim_unlock(mi_lock_t* lock) {
static inline void mi_lock_release(mi_lock_t* lock) {
LeaveCriticalSection(lock);
}
static inline void mi_lock_init(mi_lock_t* lock) {
InitializeCriticalSection(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
DeleteCriticalSection(lock);
}
#elif defined(MI_USE_PTHREADS)
#define mi_lock_t pthread_mutex_t
static inline bool _mi_prim_lock(mi_lock_t* lock) {
return (pthread_mutex_lock(lock) == 0);
}
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return (pthread_mutex_trylock(lock) == 0);
}
static inline void _mi_prim_unlock(mi_lock_t* lock) {
static inline bool mi_lock_acquire(mi_lock_t* lock) {
return (pthread_mutex_lock(lock) == 0);
}
static inline void mi_lock_release(mi_lock_t* lock) {
pthread_mutex_unlock(lock);
}
static inline void mi_lock_init(mi_lock_t* lock) {
(void)(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
#elif defined(__cplusplus)
#include <mutex>
#define mi_lock_t std::mutex
static inline bool _mi_prim_lock(mi_lock_t* lock) {
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return lock->lock_try_acquire();
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
lock->lock();
return true;
}
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
return (lock->try_lock();
}
static inline void _mi_prim_unlock(mi_lock_t* lock) {
static inline void mi_lock_release(mi_lock_t* lock) {
lock->unlock();
}
static inline void mi_lock_init(mi_lock_t* lock) {
(void)(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
#else
@ -462,22 +477,26 @@ static inline void _mi_prim_unlock(mi_lock_t* lock) {
#define mi_lock_t _Atomic(uintptr_t)
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
}
static inline bool _mi_prim_lock(mi_lock_t* lock) {
static inline bool mi_lock_acquire(mi_lock_t* lock) {
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
if (_mi_prim_try_lock(lock)) return true;
if (mi_lock_try_acquire(lock)) return true;
mi_atomic_yield();
}
return true;
}
static inline void _mi_prim_unlock(mi_lock_t* lock) {
static inline void mi_lock_release(mi_lock_t* lock) {
mi_atomic_store_release(lock, (uintptr_t)0);
}
static inline void mi_lock_init(mi_lock_t* lock) {
mi_lock_release(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
#endif

View file

@ -79,11 +79,12 @@ extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void);
bool _mi_preloading(void); // true while the C runtime is not initialized yet
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
void _mi_thread_done(mi_heap_t* heap);
void _mi_thread_data_collect(void);
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
// os.c
void _mi_os_init(void); // called from process init
@ -136,7 +137,7 @@ typedef struct mi_arena_field_cursor_s { // abstract struct
mi_subproc_t* subproc;
} mi_arena_field_cursor_t;
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, mi_arena_field_cursor_t* current);
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous);
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous, bool visit_all);
// "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
@ -158,6 +159,7 @@ void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(void);
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
@ -189,6 +191,8 @@ void _mi_heap_set_default_direct(mi_heap_t* heap);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
void _mi_heap_unsafe_destroy_all(void);
mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);