mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 07:29:30 +03:00
merge from dev
This commit is contained in:
commit
26e4632287
4 changed files with 32 additions and 26 deletions
|
@ -116,6 +116,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
|
||||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
|
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
|
||||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
|
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
|
||||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
|
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
|
||||||
|
#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,(tp*)des)
|
||||||
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x)
|
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x)
|
||||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
|
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
|
||||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
|
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
|
||||||
|
@ -125,6 +126,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
|
||||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
|
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
|
||||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
|
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
|
||||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
|
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
|
||||||
|
#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,des)
|
||||||
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x)
|
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x)
|
||||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
|
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
|
||||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
|
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
|
||||||
|
@ -309,6 +311,7 @@ static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p,
|
||||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||||
|
#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||||
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||||
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||||
|
|
|
@ -168,9 +168,9 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size
|
||||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
||||||
bool _mi_os_use_large_page(size_t size, size_t alignment);
|
bool _mi_os_use_large_page(size_t size, size_t alignment);
|
||||||
size_t _mi_os_large_page_size(void);
|
size_t _mi_os_large_page_size(void);
|
||||||
|
|
||||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
||||||
|
|
||||||
|
|
||||||
// arena.c
|
// arena.c
|
||||||
mi_arena_id_t _mi_arena_id_none(void);
|
mi_arena_id_t _mi_arena_id_none(void);
|
||||||
mi_arena_t* _mi_arena_from_id(mi_arena_id_t id);
|
mi_arena_t* _mi_arena_from_id(mi_arena_id_t id);
|
||||||
|
|
27
src/os.c
27
src/os.c
|
@ -718,31 +718,32 @@ static void mi_os_free_huge_os_pages(void* p, size_t size) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
Support NUMA aware allocation
|
Support NUMA aware allocation
|
||||||
-----------------------------------------------------------------------------*/
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
static _Atomic(int) _mi_numa_node_count; // = 0 // cache the node count
|
static _Atomic(size_t) mi_numa_node_count; // = 0 // cache the node count
|
||||||
|
|
||||||
int _mi_os_numa_node_count(void) {
|
int _mi_os_numa_node_count(void) {
|
||||||
int count = mi_atomic_load_acquire(&_mi_numa_node_count);
|
size_t count = mi_atomic_load_acquire(&mi_numa_node_count);
|
||||||
if mi_unlikely(count <= 0) {
|
if mi_unlikely(count == 0) {
|
||||||
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
|
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
|
||||||
if (ncount > 0 && ncount < INT_MAX) {
|
if (ncount > 0 && ncount < INT_MAX) {
|
||||||
count = (int)ncount;
|
count = (size_t)ncount;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
const size_t n = _mi_prim_numa_node_count(); // or detect dynamically
|
const size_t n = _mi_prim_numa_node_count(); // or detect dynamically
|
||||||
if (n == 0 || n > INT_MAX) { count = 1; }
|
if (n == 0 || n > INT_MAX) { count = 1; }
|
||||||
else { count = (int)n; }
|
else { count = n; }
|
||||||
}
|
}
|
||||||
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
|
mi_atomic_store_release(&mi_numa_node_count, count); // save it
|
||||||
_mi_verbose_message("using %zd numa regions\n", count);
|
_mi_verbose_message("using %zd numa regions\n", count);
|
||||||
}
|
}
|
||||||
return count;
|
mi_assert_internal(count > 0 && count <= INT_MAX);
|
||||||
|
return (int)count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int mi_os_numa_node_get(void) {
|
static int mi_os_numa_node_get(void) {
|
||||||
int numa_count = _mi_os_numa_node_count();
|
int numa_count = _mi_os_numa_node_count();
|
||||||
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
|
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
|
||||||
|
@ -754,13 +755,15 @@ static int mi_os_numa_node_get(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int _mi_os_numa_node(void) {
|
int _mi_os_numa_node(void) {
|
||||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
if mi_likely(mi_atomic_load_relaxed(&mi_numa_node_count) == 1) {
|
||||||
else return mi_os_numa_node_get();
|
return 0;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return mi_os_numa_node_get();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
Public API
|
Public API
|
||||||
-----------------------------------------------------------------------------*/
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
Loading…
Add table
Reference in a new issue