From cb0369452d93054d89008f3b3edb6254207a2d13 Mon Sep 17 00:00:00 2001 From: daanx Date: Tue, 4 Apr 2023 17:54:33 -0700 Subject: [PATCH] use exponetially sized arenas --- include/mimalloc/internal.h | 1 + include/mimalloc/prim.h | 11 ++++--- src/arena.c | 62 +++++++++++++++++++++++-------------- src/os.c | 8 ++++- src/prim/unix/prim.c | 1 + src/prim/wasi/prim.c | 1 + src/prim/windows/prim.c | 1 + 7 files changed, 56 insertions(+), 29 deletions(-) diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 9b73c92c..155fd862 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -92,6 +92,7 @@ void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to f size_t _mi_os_page_size(void); size_t _mi_os_good_alloc_size(size_t size); bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats); bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h index f07bb4bd..094d7ab9 100644 --- a/include/mimalloc/prim.h +++ b/include/mimalloc/prim.h @@ -22,11 +22,12 @@ terms of the MIT license. A copy of the license can be found in the file // OS memory configuration typedef struct mi_os_mem_config_s { - size_t page_size; // 4KiB - size_t large_page_size; // 2MiB - size_t alloc_granularity; // smallest allocation size (on Windows 64KiB) - bool has_overcommit; // can we reserve more memory than can be actually committed? - bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc) + size_t page_size; // 4KiB + size_t large_page_size; // 2MiB + size_t alloc_granularity; // smallest allocation size (on Windows 64KiB) + bool has_overcommit; // can we reserve more memory than can be actually committed? + bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc) + bool has_virtual_reserve; // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) } mi_os_mem_config_t; // Initialize diff --git a/src/arena.c b/src/arena.c index 5a3dfb91..6ad9a5a1 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1,7 +1,5 @@ - - /* ---------------------------------------------------------------------------- -Copyright (c) 2019-2022, Microsoft Research, Daan Leijen +Copyright (c) 2019-2023, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -25,7 +23,7 @@ The arena allocation needs to be thread safe and we use an atomic bitmap to allo #include "mimalloc/atomic.h" #include // memset -#include // ENOMEM +#include // ENOMEM #include "bitmap.h" // atomic bitmap @@ -38,7 +36,7 @@ The arena allocation needs to be thread safe and we use an atomic bitmap to allo typedef uintptr_t mi_block_info_t; #define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN) #define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB -#define MI_MAX_ARENAS (64) // not more than 126 (since we use 7 bits in the memid and an arena index + 1) +#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1) // A memory arena descriptor typedef struct mi_arena_s { @@ -277,6 +275,35 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size return NULL; } +// try to reserve a fresh arena +static bool mi_arena_reserve(size_t size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id) +{ + if (_mi_preloading()) return false; + if (req_arena_id != _mi_arena_id_none()) return false; + + const size_t arena_count = mi_atomic_load_relaxed(&mi_arena_count); + if (arena_count > (MI_MAX_ARENAS - 4)) return false; + + size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); + if (arena_reserve == 0) return false; + + if (!_mi_os_has_virtual_reserve()) { + arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example) + } + arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); + if (arena_count >= 8 && arena_count <= 128) { + arena_reserve = (1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially + } + if (arena_reserve < size) return false; + + // commit eagerly? + bool arena_commit = false; + if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } + else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } + + return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0); +} + void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) { @@ -296,24 +323,11 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset if (p != NULL) return p; // otherwise, try to first eagerly reserve a new arena - size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); - arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); - if (arena_reserve > 0 && arena_reserve >= size && // eager reserve enabled and large enough? - req_arena_id == _mi_arena_id_none() && // not exclusive? - mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) && // not too many arenas already? - !_mi_preloading() ) // and not before main runs - { - mi_arena_id_t arena_id = 0; - - // commit eagerly? - bool arena_commit = false; - if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } - else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } - - if (mi_reserve_os_memory_ex(arena_reserve, arena_commit /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) { - p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); - if (p != NULL) return p; - } + mi_arena_id_t arena_id = 0; + if (mi_arena_reserve(size,*large,req_arena_id,&arena_id)) { + // and try allocate in there + p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); + if (p != NULL) return p; } } @@ -334,6 +348,7 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, b return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); } + void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) { if (size != NULL) *size = 0; size_t arena_index = mi_arena_id_index(arena_id); @@ -344,6 +359,7 @@ void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) { return arena->start; } + /* ----------------------------------------------------------- Arena purge ----------------------------------------------------------- */ diff --git a/src/os.c b/src/os.c index e639c751..4710b809 100644 --- a/src/os.c +++ b/src/os.c @@ -21,13 +21,19 @@ static mi_os_mem_config_t mi_os_mem_config = { 0, // large page size (usually 2MiB) 4096, // allocation granularity true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems) - false // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) + false, // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) + true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) }; bool _mi_os_has_overcommit(void) { return mi_os_mem_config.has_overcommit; } +bool _mi_os_has_virtual_reserve(void) { + return mi_os_mem_config.has_virtual_reserve; +} + + // OS (small) page size size_t _mi_os_page_size(void) { return mi_os_mem_config.page_size; diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c index eec6ca6d..e3a6f8a9 100644 --- a/src/prim/unix/prim.c +++ b/src/prim/unix/prim.c @@ -134,6 +134,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) { config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this? config->has_overcommit = unix_detect_overcommit(); config->must_free_whole = false; // mmap can free in parts + config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE) } diff --git a/src/prim/wasi/prim.c b/src/prim/wasi/prim.c index 3f2659dd..bf78a258 100644 --- a/src/prim/wasi/prim.c +++ b/src/prim/wasi/prim.c @@ -21,6 +21,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) { config->alloc_granularity = 16; config->has_overcommit = false; config->must_free_whole = true; + config->has_virtual_reserve = false; } //--------------------------------------------- diff --git a/src/prim/windows/prim.c b/src/prim/windows/prim.c index 514fe647..af6af5fe 100644 --- a/src/prim/windows/prim.c +++ b/src/prim/windows/prim.c @@ -113,6 +113,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) { config->has_overcommit = false; config->must_free_whole = true; + config->has_virtual_reserve = true; // get the page size SYSTEM_INFO si; GetSystemInfo(&si);