mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge from dev
This commit is contained in:
commit
26e4632287
4 changed files with 32 additions and 26 deletions
22
src/arena.c
22
src/arena.c
|
@ -385,7 +385,7 @@ static mi_decl_noinline void* mi_arenas_try_find_free(
|
|||
mi_assert(alignment <= MI_ARENA_SLICE_ALIGN);
|
||||
if (alignment > MI_ARENA_SLICE_ALIGN) return NULL;
|
||||
|
||||
// search arena's
|
||||
// search arena's
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, true /* only numa matching */, numa_node, allow_large, arena)
|
||||
{
|
||||
void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid);
|
||||
|
@ -400,7 +400,7 @@ static mi_decl_noinline void* mi_arenas_try_find_free(
|
|||
void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
mi_forall_suitable_arenas_end();
|
||||
mi_forall_suitable_arenas_end();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -543,7 +543,7 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
|
|||
// search arena's
|
||||
const bool allow_large = true;
|
||||
const int any_numa = -1;
|
||||
const bool match_numa = true;
|
||||
const bool match_numa = true;
|
||||
mi_forall_suitable_arenas(subproc, req_arena, tseq, match_numa, any_numa, allow_large, arena)
|
||||
{
|
||||
size_t slice_index;
|
||||
|
@ -611,7 +611,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
|
|||
page = (mi_page_t*)mi_arena_os_alloc_aligned(alloc_size, page_alignment, 0 /* align offset */, commit, allow_large, req_arena, &memid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (page == NULL) return NULL;
|
||||
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
||||
mi_assert_internal(!os_align || _mi_is_aligned((uint8_t*)page + page_alignment, block_alignment));
|
||||
|
@ -1147,7 +1147,7 @@ static mi_bbitmap_t* mi_arena_bbitmap_init(size_t slice_count, uint8_t** base) {
|
|||
}
|
||||
|
||||
|
||||
static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive,
|
||||
static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive,
|
||||
mi_memid_t memid, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept
|
||||
{
|
||||
mi_assert(_mi_is_aligned(start,MI_ARENA_SLICE_SIZE));
|
||||
|
@ -1309,7 +1309,7 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
|
|||
Debugging
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Return idx of the slice past the last used slice
|
||||
// Return idx of the slice past the last used slice
|
||||
static size_t mi_arena_used_slices(mi_arena_t* arena) {
|
||||
size_t idx;
|
||||
if (mi_bitmap_bsr(arena->pages, &idx)) {
|
||||
|
@ -1412,16 +1412,16 @@ static size_t mi_debug_show_page_bfield(mi_bfield_t field, char* buf, size_t* k,
|
|||
return bit_set_count;
|
||||
}
|
||||
|
||||
static size_t mi_debug_show_chunks(const char* header1, const char* header2, const char* header3,
|
||||
size_t slice_count, size_t chunk_count,
|
||||
mi_bchunk_t* chunks, mi_bchunkmap_t* chunk_bins, bool invert, mi_arena_t* arena, bool narrow)
|
||||
static size_t mi_debug_show_chunks(const char* header1, const char* header2, const char* header3,
|
||||
size_t slice_count, size_t chunk_count,
|
||||
mi_bchunk_t* chunks, mi_bchunkmap_t* chunk_bins, bool invert, mi_arena_t* arena, bool narrow)
|
||||
{
|
||||
_mi_raw_message("\x1B[37m%s%s%s (use/commit: \x1B[31m0 - 25%%\x1B[33m - 50%%\x1B[36m - 75%%\x1B[32m - 100%%\x1B[0m)\n", header1, header2, header3);
|
||||
const size_t fields_per_line = (narrow ? 2 : 4);
|
||||
const size_t used_slice_count = mi_arena_used_slices(arena);
|
||||
size_t bit_count = 0;
|
||||
size_t bit_set_count = 0;
|
||||
for (size_t i = 0; i < chunk_count && bit_count < slice_count; i++) {
|
||||
for (size_t i = 0; i < chunk_count && bit_count < slice_count; i++) {
|
||||
char buf[5*MI_BCHUNK_BITS + 64]; _mi_memzero(buf, sizeof(buf));
|
||||
if (bit_count > used_slice_count && i+2 < chunk_count) {
|
||||
const size_t diff = chunk_count - 1 - i;
|
||||
|
@ -1882,7 +1882,7 @@ mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t*
|
|||
}
|
||||
|
||||
// find accessed size
|
||||
const size_t asize = mi_size_of_slices(mi_arena_used_slices(arena));
|
||||
const size_t asize = mi_size_of_slices(mi_arena_used_slices(arena));
|
||||
if (base != NULL) { *base = (void*)arena; }
|
||||
if (full_size != NULL) { *full_size = arena->memid.mem.os.size; }
|
||||
if (accessed_size != NULL) { *accessed_size = asize; }
|
||||
|
|
29
src/os.c
29
src/os.c
|
@ -109,7 +109,7 @@ size_t _mi_os_secure_guard_page_size(void) {
|
|||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// In secure mode, try to decommit an area and output a warning if this fails.
|
||||
bool _mi_os_secure_guard_page_set_at(void* addr, mi_memid_t memid) {
|
||||
if (addr == NULL) return true;
|
||||
|
@ -718,31 +718,32 @@ static void mi_os_free_huge_os_pages(void* p, size_t size) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Support NUMA aware allocation
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
static _Atomic(int) _mi_numa_node_count; // = 0 // cache the node count
|
||||
static _Atomic(size_t) mi_numa_node_count; // = 0 // cache the node count
|
||||
|
||||
int _mi_os_numa_node_count(void) {
|
||||
int count = mi_atomic_load_acquire(&_mi_numa_node_count);
|
||||
if mi_unlikely(count <= 0) {
|
||||
size_t count = mi_atomic_load_acquire(&mi_numa_node_count);
|
||||
if mi_unlikely(count == 0) {
|
||||
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
|
||||
if (ncount > 0 && ncount < INT_MAX) {
|
||||
count = (int)ncount;
|
||||
count = (size_t)ncount;
|
||||
}
|
||||
else {
|
||||
const size_t n = _mi_prim_numa_node_count(); // or detect dynamically
|
||||
if (n == 0 || n > INT_MAX) { count = 1; }
|
||||
else { count = (int)n; }
|
||||
else { count = n; }
|
||||
}
|
||||
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
|
||||
mi_atomic_store_release(&mi_numa_node_count, count); // save it
|
||||
_mi_verbose_message("using %zd numa regions\n", count);
|
||||
}
|
||||
return count;
|
||||
mi_assert_internal(count > 0 && count <= INT_MAX);
|
||||
return (int)count;
|
||||
}
|
||||
|
||||
|
||||
static int mi_os_numa_node_get(void) {
|
||||
int numa_count = _mi_os_numa_node_count();
|
||||
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
|
||||
|
@ -754,13 +755,15 @@ static int mi_os_numa_node_get(void) {
|
|||
}
|
||||
|
||||
int _mi_os_numa_node(void) {
|
||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
||||
else return mi_os_numa_node_get();
|
||||
if mi_likely(mi_atomic_load_relaxed(&mi_numa_node_count) == 1) {
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
return mi_os_numa_node_get();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Public API
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue