mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-05 15:09:31 +03:00
use exponetially sized arenas
This commit is contained in:
parent
8d56c155f9
commit
cb0369452d
7 changed files with 56 additions and 29 deletions
|
@ -92,6 +92,7 @@ void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to f
|
||||||
size_t _mi_os_page_size(void);
|
size_t _mi_os_page_size(void);
|
||||||
size_t _mi_os_good_alloc_size(size_t size);
|
size_t _mi_os_good_alloc_size(size_t size);
|
||||||
bool _mi_os_has_overcommit(void);
|
bool _mi_os_has_overcommit(void);
|
||||||
|
bool _mi_os_has_virtual_reserve(void);
|
||||||
|
|
||||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
||||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||||
|
|
|
@ -27,6 +27,7 @@ typedef struct mi_os_mem_config_s {
|
||||||
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
|
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
|
||||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||||
bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc)
|
bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc)
|
||||||
|
bool has_virtual_reserve; // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
|
||||||
} mi_os_mem_config_t;
|
} mi_os_mem_config_t;
|
||||||
|
|
||||||
// Initialize
|
// Initialize
|
||||||
|
|
54
src/arena.c
54
src/arena.c
|
@ -1,7 +1,5 @@
|
||||||
|
|
||||||
|
|
||||||
/* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
Copyright (c) 2019-2022, Microsoft Research, Daan Leijen
|
Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
|
||||||
This is free software; you can redistribute it and/or modify it under the
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
terms of the MIT license. A copy of the license can be found in the file
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
"LICENSE" at the root of this distribution.
|
"LICENSE" at the root of this distribution.
|
||||||
|
@ -38,7 +36,7 @@ The arena allocation needs to be thread safe and we use an atomic bitmap to allo
|
||||||
typedef uintptr_t mi_block_info_t;
|
typedef uintptr_t mi_block_info_t;
|
||||||
#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
|
#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
|
||||||
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB
|
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB
|
||||||
#define MI_MAX_ARENAS (64) // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
|
#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
|
||||||
|
|
||||||
// A memory arena descriptor
|
// A memory arena descriptor
|
||||||
typedef struct mi_arena_s {
|
typedef struct mi_arena_s {
|
||||||
|
@ -277,6 +275,35 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// try to reserve a fresh arena
|
||||||
|
static bool mi_arena_reserve(size_t size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
|
||||||
|
{
|
||||||
|
if (_mi_preloading()) return false;
|
||||||
|
if (req_arena_id != _mi_arena_id_none()) return false;
|
||||||
|
|
||||||
|
const size_t arena_count = mi_atomic_load_relaxed(&mi_arena_count);
|
||||||
|
if (arena_count > (MI_MAX_ARENAS - 4)) return false;
|
||||||
|
|
||||||
|
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
|
||||||
|
if (arena_reserve == 0) return false;
|
||||||
|
|
||||||
|
if (!_mi_os_has_virtual_reserve()) {
|
||||||
|
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
|
||||||
|
}
|
||||||
|
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
|
||||||
|
if (arena_count >= 8 && arena_count <= 128) {
|
||||||
|
arena_reserve = (1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
|
||||||
|
}
|
||||||
|
if (arena_reserve < size) return false;
|
||||||
|
|
||||||
|
// commit eagerly?
|
||||||
|
bool arena_commit = false;
|
||||||
|
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
|
||||||
|
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
|
||||||
|
|
||||||
|
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
|
||||||
|
}
|
||||||
|
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
|
@ -296,26 +323,13 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
|
|
||||||
// otherwise, try to first eagerly reserve a new arena
|
// otherwise, try to first eagerly reserve a new arena
|
||||||
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
|
|
||||||
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
|
|
||||||
if (arena_reserve > 0 && arena_reserve >= size && // eager reserve enabled and large enough?
|
|
||||||
req_arena_id == _mi_arena_id_none() && // not exclusive?
|
|
||||||
mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) && // not too many arenas already?
|
|
||||||
!_mi_preloading() ) // and not before main runs
|
|
||||||
{
|
|
||||||
mi_arena_id_t arena_id = 0;
|
mi_arena_id_t arena_id = 0;
|
||||||
|
if (mi_arena_reserve(size,*large,req_arena_id,&arena_id)) {
|
||||||
// commit eagerly?
|
// and try allocate in there
|
||||||
bool arena_commit = false;
|
|
||||||
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
|
|
||||||
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
|
|
||||||
|
|
||||||
if (mi_reserve_os_memory_ex(arena_reserve, arena_commit /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
|
|
||||||
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// finally, fall back to the OS
|
// finally, fall back to the OS
|
||||||
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
|
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
|
||||||
|
@ -334,6 +348,7 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, b
|
||||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
||||||
if (size != NULL) *size = 0;
|
if (size != NULL) *size = 0;
|
||||||
size_t arena_index = mi_arena_id_index(arena_id);
|
size_t arena_index = mi_arena_id_index(arena_id);
|
||||||
|
@ -344,6 +359,7 @@ void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
||||||
return arena->start;
|
return arena->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Arena purge
|
Arena purge
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
8
src/os.c
8
src/os.c
|
@ -21,13 +21,19 @@ static mi_os_mem_config_t mi_os_mem_config = {
|
||||||
0, // large page size (usually 2MiB)
|
0, // large page size (usually 2MiB)
|
||||||
4096, // allocation granularity
|
4096, // allocation granularity
|
||||||
true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
|
true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
|
||||||
false // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
|
false, // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
|
||||||
|
true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
|
||||||
};
|
};
|
||||||
|
|
||||||
bool _mi_os_has_overcommit(void) {
|
bool _mi_os_has_overcommit(void) {
|
||||||
return mi_os_mem_config.has_overcommit;
|
return mi_os_mem_config.has_overcommit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool _mi_os_has_virtual_reserve(void) {
|
||||||
|
return mi_os_mem_config.has_virtual_reserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// OS (small) page size
|
// OS (small) page size
|
||||||
size_t _mi_os_page_size(void) {
|
size_t _mi_os_page_size(void) {
|
||||||
return mi_os_mem_config.page_size;
|
return mi_os_mem_config.page_size;
|
||||||
|
|
|
@ -134,6 +134,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
|
||||||
config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
|
config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
|
||||||
config->has_overcommit = unix_detect_overcommit();
|
config->has_overcommit = unix_detect_overcommit();
|
||||||
config->must_free_whole = false; // mmap can free in parts
|
config->must_free_whole = false; // mmap can free in parts
|
||||||
|
config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
|
||||||
config->alloc_granularity = 16;
|
config->alloc_granularity = 16;
|
||||||
config->has_overcommit = false;
|
config->has_overcommit = false;
|
||||||
config->must_free_whole = true;
|
config->must_free_whole = true;
|
||||||
|
config->has_virtual_reserve = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------
|
//---------------------------------------------
|
||||||
|
|
|
@ -113,6 +113,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
|
||||||
{
|
{
|
||||||
config->has_overcommit = false;
|
config->has_overcommit = false;
|
||||||
config->must_free_whole = true;
|
config->must_free_whole = true;
|
||||||
|
config->has_virtual_reserve = true;
|
||||||
// get the page size
|
// get the page size
|
||||||
SYSTEM_INFO si;
|
SYSTEM_INFO si;
|
||||||
GetSystemInfo(&si);
|
GetSystemInfo(&si);
|
||||||
|
|
Loading…
Add table
Reference in a new issue