add numa-affine allocation, and per-heap numa affinity

This commit is contained in:
Daan 2025-03-03 18:08:57 -08:00
parent 30dfe97f5b
commit c1cbe71836
8 changed files with 75 additions and 63 deletions

View file

@ -266,7 +266,7 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// Experimental
// Advanced
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
@ -279,7 +279,7 @@ mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_commi
mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
mi_decl_export void mi_arenas_print(void) mi_attr_noexcept;
// Experimental: heaps associated with specific memory arena's
// Advanced: heaps associated with specific memory arena's
typedef void* mi_arena_id_t;
mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
@ -292,7 +292,7 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t a
#endif
// Experimental: allow sub-processes whose memory areas stay separated (and no reclamation between them)
// Advanced: allow sub-processes whose memory areas stay separated (and no reclamation between them)
// Used for example for separate interpreters in one process.
typedef void* mi_subproc_id_t;
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
@ -300,10 +300,15 @@ mi_decl_export mi_subproc_id_t mi_subproc_new(void);
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
// Experimental: visit abandoned heap areas (that are not owned by a specific heap)
// Advanced: visit abandoned heap areas (that are not owned by a specific heap)
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// Experimental: set numa-affinity of a heap
mi_decl_export void mi_heap_set_numa_affinity(mi_heap_t* heap, int numa_node);
// Experimental: objects followed by a guard page.
// Setting the sample rate on a specific heap can be used to test parts of the program more
// specifically (in combination with `mi_heap_set_default`).
// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
@ -324,13 +329,6 @@ mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexce
// experimental
//mi_decl_export void* mi_os_alloc(size_t size, bool commit, size_t* full_size);
//mi_decl_export void* mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, void** base, size_t* full_size);
//mi_decl_export void* mi_os_alloc_aligned_allow_large(size_t size, size_t alignment, bool commit, bool* is_committed, bool* is_pinned, void** base, size_t* full_size);
//mi_decl_export void mi_os_free(void* p, size_t size);
//mi_decl_export void mi_os_commit(void* p, size_t size);
//mi_decl_export void mi_os_decommit(void* p, size_t size);
mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* accessed_size, size_t* size);
mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* arena_id);
mi_decl_export bool mi_heap_reload(mi_heap_t* heap, mi_arena_id_t arena);

View file

@ -159,6 +159,8 @@ bool _mi_os_secure_guard_page_set_before(void* addr, bool is_pinned);
bool _mi_os_secure_guard_page_reset_at(void* addr);
bool _mi_os_secure_guard_page_reset_before(void* addr);
int _mi_os_numa_node(void);
size_t _mi_os_numa_node_count(void);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid);
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid);
@ -174,8 +176,8 @@ mi_arena_id_t _mi_arena_id_none(void);
mi_arena_t* _mi_arena_from_id(mi_arena_id_t id);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena);
void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid);
void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid);
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid);
bool _mi_arenas_contain(const void* p);
void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld);
@ -1026,24 +1028,6 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
return x;
}
// -------------------------------------------------------------------
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
int _mi_os_numa_node_get(void);
size_t _mi_os_numa_node_count_get(void);
extern mi_decl_hidden _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get();
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
if mi_likely(count > 0) { return count; }
else return _mi_os_numa_node_count_get();
}
// ---------------------------------------------------------------------------------
// Provide our own `_mi_memcpy` for potential performance optimizations.

View file

@ -424,6 +424,7 @@ typedef struct mi_padding_s {
struct mi_heap_s {
mi_tld_t* tld; // thread-local data
mi_arena_t* exclusive_arena; // if the heap should only allocate from a specific arena (or NULL)
int numa_node; // preferred numa node (or -1 for no preference)
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues.
@ -485,6 +486,7 @@ typedef int64_t mi_msecs_t;
struct mi_tld_s {
mi_threadid_t thread_id; // thread id of this thread
size_t thread_seq; // thread sequence id (linear count of created threads)
int numa_node; // thread preferred numa node
mi_subproc_t* subproc; // sub-process this thread belongs to.
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)