mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 03:48:42 +03:00
add numa-affine allocation, and per-heap numa affinity
This commit is contained in:
parent
30dfe97f5b
commit
c1cbe71836
8 changed files with 75 additions and 63 deletions
|
@ -64,11 +64,11 @@ static void* mi_meta_block_start( mi_meta_page_t* mpage, size_t block_idx ) {
|
|||
// allocate a fresh meta page and add it to the global list.
|
||||
static mi_meta_page_t* mi_meta_page_zalloc(void) {
|
||||
// allocate a fresh arena slice
|
||||
// note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again..
|
||||
// note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again.. (same with _mi_os_numa_node()...)
|
||||
mi_memid_t memid;
|
||||
uint8_t* base = (uint8_t*)_mi_arenas_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0,
|
||||
true /* commit*/, (MI_SECURE==0) /* allow large? */,
|
||||
NULL /* req arena */, 0 /* thread_seq */, &memid);
|
||||
NULL /* req arena */, 0 /* thread_seq */, -1 /* numa node */, &memid);
|
||||
if (base == NULL) return NULL;
|
||||
mi_assert_internal(_mi_is_aligned(base,MI_META_PAGE_ALIGN));
|
||||
if (!memid.initially_zero) {
|
||||
|
|
53
src/arena.c
53
src/arena.c
|
@ -335,12 +335,13 @@ static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_
|
|||
Arena iteration
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena, int numa_node, bool allow_pinned) {
|
||||
static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena, bool match_numa, int numa_node, bool allow_pinned) {
|
||||
if (!allow_pinned && arena->memid.is_pinned) return false;
|
||||
if (!mi_arena_id_is_suitable(arena, req_arena)) return false;
|
||||
if (req_arena == NULL) { // if not specific, check numa affinity
|
||||
const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
|
||||
if (!numa_suitable) return false;
|
||||
if (match_numa) { if (!numa_suitable) return false; }
|
||||
else { if (numa_suitable) return false; }
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -375,9 +376,9 @@ static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena
|
|||
} \
|
||||
}
|
||||
|
||||
#define mi_forall_suitable_arenas(subproc, req_arena, tseq, allow_large, name_arena) \
|
||||
#define mi_forall_suitable_arenas(subproc, req_arena, tseq, match_numa, numa_node, allow_large, name_arena) \
|
||||
mi_forall_arenas(subproc, req_arena,tseq,name_arena) { \
|
||||
if (mi_arena_is_suitable(name_arena, req_arena, -1 /* todo: numa node */, allow_large)) { \
|
||||
if (mi_arena_is_suitable(name_arena, req_arena, match_numa, numa_node, allow_large)) { \
|
||||
|
||||
#define mi_forall_suitable_arenas_end() \
|
||||
}} \
|
||||
|
@ -390,19 +391,28 @@ static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena
|
|||
// allocate slices from the arenas
|
||||
static mi_decl_noinline void* mi_arenas_try_find_free(
|
||||
mi_subproc_t* subproc, size_t slice_count, size_t alignment,
|
||||
bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid)
|
||||
bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
|
||||
{
|
||||
mi_assert_internal(slice_count <= mi_slice_count_of_size(MI_ARENA_MAX_OBJ_SIZE));
|
||||
mi_assert(alignment <= MI_ARENA_SLICE_ALIGN);
|
||||
if (alignment > MI_ARENA_SLICE_ALIGN) return NULL;
|
||||
|
||||
// search arena's
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, allow_large, arena)
|
||||
// search arena's
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, true /* only numa matching */, numa_node, allow_large, arena)
|
||||
{
|
||||
void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
mi_forall_suitable_arenas_end();
|
||||
if (numa_node < 0) return NULL;
|
||||
|
||||
// search again but now regardless of preferred numa affinity
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, false /* numa non-matching now */, numa_node, allow_large, arena)
|
||||
{
|
||||
void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
mi_forall_suitable_arenas_end();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -411,14 +421,14 @@ static mi_decl_noinline void* mi_arenas_try_alloc(
|
|||
mi_subproc_t* subproc,
|
||||
size_t slice_count, size_t alignment,
|
||||
bool commit, bool allow_large,
|
||||
mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid)
|
||||
mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
|
||||
{
|
||||
mi_assert(slice_count <= MI_ARENA_MAX_OBJ_SLICES);
|
||||
mi_assert(alignment <= MI_ARENA_SLICE_ALIGN);
|
||||
void* p;
|
||||
|
||||
// try to find free slices in the arena's
|
||||
p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, memid);
|
||||
p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid);
|
||||
if (p != NULL) return p;
|
||||
|
||||
// did we need a specific arena?
|
||||
|
@ -441,7 +451,7 @@ static mi_decl_noinline void* mi_arenas_try_alloc(
|
|||
}
|
||||
// try once more to allocate in the new arena
|
||||
mi_assert_internal(req_arena == NULL);
|
||||
p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, memid);
|
||||
p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid);
|
||||
if (p != NULL) return p;
|
||||
|
||||
return NULL;
|
||||
|
@ -472,21 +482,18 @@ static void* mi_arena_os_alloc_aligned(
|
|||
void* _mi_arenas_alloc_aligned( mi_subproc_t* subproc,
|
||||
size_t size, size_t alignment, size_t align_offset,
|
||||
bool commit, bool allow_large,
|
||||
mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid)
|
||||
mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
|
||||
{
|
||||
mi_assert_internal(memid != NULL);
|
||||
mi_assert_internal(size > 0);
|
||||
|
||||
// *memid = _mi_memid_none();
|
||||
// const int numa_node = _mi_os_numa_node(&tld->os); // current numa node
|
||||
|
||||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && // is arena allocation allowed?
|
||||
size >= MI_ARENA_MIN_OBJ_SIZE && size <= MI_ARENA_MAX_OBJ_SIZE && // and not too small/large
|
||||
alignment <= MI_ARENA_SLICE_ALIGN && align_offset == 0) // and good alignment
|
||||
{
|
||||
const size_t slice_count = mi_slice_count_of_size(size);
|
||||
void* p = mi_arenas_try_alloc(subproc,slice_count, alignment, commit, allow_large, req_arena, tseq, memid);
|
||||
void* p = mi_arenas_try_alloc(subproc,slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
|
||||
|
@ -495,9 +502,9 @@ void* _mi_arenas_alloc_aligned( mi_subproc_t* subproc,
|
|||
return p;
|
||||
}
|
||||
|
||||
void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid)
|
||||
void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
|
||||
{
|
||||
return _mi_arenas_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, memid);
|
||||
return _mi_arenas_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, numa_node, memid);
|
||||
}
|
||||
|
||||
|
||||
|
@ -547,7 +554,9 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
|
|||
|
||||
// search arena's
|
||||
const bool allow_large = true;
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, allow_large, arena)
|
||||
const int any_numa = -1;
|
||||
const bool match_numa = true;
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, match_numa, any_numa, allow_large, arena)
|
||||
{
|
||||
size_t slice_index;
|
||||
mi_bitmap_t* const bitmap = arena->pages_abandoned[bin];
|
||||
|
@ -582,7 +591,7 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
|
|||
|
||||
// Allocate a fresh page
|
||||
static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment,
|
||||
mi_arena_t* req_arena, size_t tseq, bool commit)
|
||||
mi_arena_t* req_arena, size_t tseq, int numa_node, bool commit)
|
||||
{
|
||||
const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page
|
||||
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
|
||||
|
@ -596,7 +605,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
|
|||
!os_align && // not large alignment
|
||||
slice_count <= MI_ARENA_MAX_OBJ_SLICES) // and not too large
|
||||
{
|
||||
page = (mi_page_t*)mi_arenas_try_alloc(subproc, slice_count, page_alignment, commit, allow_large, req_arena, tseq, &memid);
|
||||
page = (mi_page_t*)mi_arenas_try_alloc(subproc, slice_count, page_alignment, commit, allow_large, req_arena, tseq, numa_node, &memid);
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(mi_bitmap_is_clearN(memid.mem.arena.arena->pages, memid.mem.arena.slice_index, memid.mem.arena.slice_count));
|
||||
mi_bitmap_set(memid.mem.arena.arena->pages, memid.mem.arena.slice_index);
|
||||
|
@ -727,7 +736,7 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou
|
|||
const long commit_on_demand = mi_option_get(mi_option_page_commit_on_demand);
|
||||
const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages
|
||||
(commit_on_demand == 2 && _mi_os_has_overcommit()) || (commit_on_demand == 0));
|
||||
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq, commit);
|
||||
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq, heap->numa_node, commit);
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
|
||||
_mi_page_init(heap, page);
|
||||
|
@ -749,7 +758,7 @@ static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_s
|
|||
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size());
|
||||
#endif
|
||||
|
||||
mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq, true /* commit singletons always */);
|
||||
mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq, heap->numa_node, true /* commit singletons always */);
|
||||
if (page == NULL) return NULL;
|
||||
|
||||
mi_assert(page->reserved == 1);
|
||||
|
|
12
src/heap.c
12
src/heap.c
|
@ -182,12 +182,13 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy,
|
|||
mi_memid_t memid = heap->memid;
|
||||
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
||||
heap->memid = memid;
|
||||
heap->tld = tld; // avoid reading the thread-local tld during initialization
|
||||
heap->tld = tld; // avoid reading the thread-local tld during initialization
|
||||
heap->tag = heap_tag;
|
||||
heap->numa_node = tld->numa_node;
|
||||
heap->exclusive_arena = _mi_arena_from_id(arena_id);
|
||||
heap->allow_page_reclaim = (!allow_destroy && mi_option_get(mi_option_page_reclaim_on_free) >= 0);
|
||||
heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0);
|
||||
heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
heap->tag = heap_tag;
|
||||
if (heap->tld->is_in_threadpool) {
|
||||
// if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap.
|
||||
// this is checked in `free.c:mi_free_try_collect_mt`
|
||||
|
@ -227,7 +228,7 @@ mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena
|
|||
else {
|
||||
// heaps associated wita a specific arena are allocated in that arena
|
||||
// note: takes up at least one slice which is quite wasteful...
|
||||
heap = (mi_heap_t*)_mi_arenas_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, &memid);
|
||||
heap = (mi_heap_t*)_mi_arenas_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, tld->numa_node, &memid);
|
||||
}
|
||||
if (heap==NULL) {
|
||||
_mi_error_message(ENOMEM, "unable to allocate heap meta-data\n");
|
||||
|
@ -261,6 +262,11 @@ uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
|||
return _mi_random_next(&heap->random);
|
||||
}
|
||||
|
||||
void mi_heap_set_numa_affinity(mi_heap_t* heap, int numa_node) {
|
||||
if (heap == NULL) return;
|
||||
heap->numa_node = (numa_node < 0 ? -1 : numa_node % _mi_os_numa_node_count());
|
||||
}
|
||||
|
||||
// zero out the page queues
|
||||
static void mi_heap_reset_pages(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
|
|
|
@ -104,6 +104,7 @@ static mi_decl_cache_align mi_subproc_t subproc_main
|
|||
static mi_decl_cache_align mi_tld_t tld_empty = {
|
||||
0, // thread_id
|
||||
0, // thread_seq
|
||||
0, // default numa node
|
||||
&subproc_main, // subproc
|
||||
NULL, // heap_backing
|
||||
NULL, // heaps list
|
||||
|
@ -117,6 +118,7 @@ static mi_decl_cache_align mi_tld_t tld_empty = {
|
|||
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||
&tld_empty, // tld
|
||||
NULL, // exclusive_arena
|
||||
0, // preferred numa node
|
||||
0, // cookie
|
||||
//{ 0, 0 }, // keys
|
||||
{ {0}, {0}, 0, true }, // random
|
||||
|
@ -141,6 +143,7 @@ extern mi_decl_hidden mi_decl_cache_align mi_heap_t heap_main;
|
|||
static mi_decl_cache_align mi_tld_t tld_main = {
|
||||
0, // thread_id
|
||||
0, // thread_seq
|
||||
0, // numa node
|
||||
&subproc_main, // subproc
|
||||
&heap_main, // heap_backing
|
||||
&heap_main, // heaps list
|
||||
|
@ -154,6 +157,7 @@ static mi_decl_cache_align mi_tld_t tld_main = {
|
|||
mi_decl_cache_align mi_heap_t heap_main = {
|
||||
&tld_main, // thread local data
|
||||
NULL, // exclusive arena
|
||||
0, // preferred numa node
|
||||
0, // initial cookie
|
||||
//{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
||||
{ {0x846ca68b}, {0}, 0, true }, // random
|
||||
|
@ -306,6 +310,7 @@ static mi_tld_t* mi_tld_alloc(void) {
|
|||
tld->heap_backing = NULL;
|
||||
tld->heaps = NULL;
|
||||
tld->subproc = &subproc_main;
|
||||
tld->numa_node = _mi_os_numa_node();
|
||||
tld->thread_id = _mi_prim_thread_id();
|
||||
tld->thread_seq = mi_atomic_add_acq_rel(&thread_total_count, 1);
|
||||
tld->is_in_threadpool = _mi_prim_thread_is_in_threadpool();
|
||||
|
|
18
src/os.c
18
src/os.c
|
@ -694,18 +694,18 @@ static void mi_os_free_huge_os_pages(void* p, size_t size) {
|
|||
Support NUMA aware allocation
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
|
||||
static _Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
|
||||
|
||||
size_t _mi_os_numa_node_count_get(void) {
|
||||
size_t _mi_os_numa_node_count(void) {
|
||||
size_t count = mi_atomic_load_acquire(&_mi_numa_node_count);
|
||||
if (count <= 0) {
|
||||
if mi_unlikely(count <= 0) {
|
||||
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
|
||||
if (ncount > 0) {
|
||||
count = (size_t)ncount;
|
||||
}
|
||||
else {
|
||||
count = _mi_prim_numa_node_count(); // or detect dynamically
|
||||
if (count == 0) count = 1;
|
||||
if (count == 0) { count = 1; }
|
||||
}
|
||||
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
|
||||
_mi_verbose_message("using %zd numa regions\n", count);
|
||||
|
@ -713,7 +713,8 @@ size_t _mi_os_numa_node_count_get(void) {
|
|||
return count;
|
||||
}
|
||||
|
||||
int _mi_os_numa_node_get(void) {
|
||||
|
||||
static int mi_os_numa_node_get(void) {
|
||||
size_t numa_count = _mi_os_numa_node_count();
|
||||
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
|
||||
// never more than the node count and >= 0
|
||||
|
@ -722,6 +723,13 @@ int _mi_os_numa_node_get(void) {
|
|||
return (int)numa_node;
|
||||
}
|
||||
|
||||
int _mi_os_numa_node(void) {
|
||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
||||
else return mi_os_numa_node_get();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Public API
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue