add numa-affine allocation, and per-heap numa affinity

This commit is contained in:
Daan 2025-03-03 18:08:57 -08:00
parent 30dfe97f5b
commit c1cbe71836
8 changed files with 75 additions and 63 deletions

View file

@ -694,18 +694,18 @@ static void mi_os_free_huge_os_pages(void* p, size_t size) {
Support NUMA aware allocation
-----------------------------------------------------------------------------*/
_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
static _Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
size_t _mi_os_numa_node_count_get(void) {
size_t _mi_os_numa_node_count(void) {
size_t count = mi_atomic_load_acquire(&_mi_numa_node_count);
if (count <= 0) {
if mi_unlikely(count <= 0) {
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
if (ncount > 0) {
count = (size_t)ncount;
}
else {
count = _mi_prim_numa_node_count(); // or detect dynamically
if (count == 0) count = 1;
if (count == 0) { count = 1; }
}
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
_mi_verbose_message("using %zd numa regions\n", count);
@ -713,7 +713,8 @@ size_t _mi_os_numa_node_count_get(void) {
return count;
}
int _mi_os_numa_node_get(void) {
static int mi_os_numa_node_get(void) {
size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0
@ -722,6 +723,13 @@ int _mi_os_numa_node_get(void) {
return (int)numa_node;
}
int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return mi_os_numa_node_get();
}
/* ----------------------------------------------------------------------------
Public API