mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-07 07:49:31 +03:00
small fixes; max object size 1/8th of a pages
This commit is contained in:
parent
9a4c264e76
commit
3746bf79ed
6 changed files with 21 additions and 17 deletions
|
@ -321,8 +321,8 @@ typedef struct mi_page_s {
|
|||
|
||||
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
||||
// (Except for large pages since huge objects are allocated in 4MiB chunks)
|
||||
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/6) // < 11 KiB
|
||||
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < 128 KiB
|
||||
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 11 KiB
|
||||
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 128 KiB
|
||||
#define MI_LARGE_MAX_OBJ_SIZE ((MI_LARGE_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < 1 MiB
|
||||
#define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE)
|
||||
|
||||
|
|
|
@ -313,7 +313,7 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
|
|||
|
||||
if (arena_count >= 1 && arena_count <= 128) {
|
||||
// scale up the arena sizes exponentially every 4 entries
|
||||
const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/4, 0, 16);
|
||||
const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/2, 0, 16);
|
||||
size_t reserve = 0;
|
||||
if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) {
|
||||
arena_reserve = reserve;
|
||||
|
|
10
src/bitmap.c
10
src/bitmap.c
|
@ -352,7 +352,7 @@ static inline bool mi_bchunk_clearN(mi_bchunk_t* chunk, size_t cidx, size_t n, b
|
|||
if (n==1) return mi_bchunk_clear(chunk, cidx, pmaybe_all_clear);
|
||||
if (n==MI_BFIELD_BITS) return mi_bchunk_clearX(chunk, cidx, pmaybe_all_clear);
|
||||
if (n <MI_BFIELD_BITS) return mi_bchunk_clearNX(chunk, cidx, n, pmaybe_all_clear);
|
||||
return mi_bchunk_xsetN_(MI_BIT_CLEAR, chunk, cidx, n, NULL, pmaybe_all_clear);
|
||||
return mi_bchunk_xsetN_(MI_BIT_CLEAR, chunk, cidx, n, NULL, pmaybe_all_clear);
|
||||
}
|
||||
|
||||
|
||||
|
@ -596,7 +596,7 @@ static inline bool mi_bchunk_try_find_and_clear_1(mi_bchunk_t* chunk, size_t n,
|
|||
return mi_bchunk_try_find_and_clear(chunk, pidx);
|
||||
}
|
||||
|
||||
#if !MI_OPT_SIMD
|
||||
#if !(MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==512))
|
||||
static inline bool mi_bchunk_try_find_and_clear8_at(mi_bchunk_t* chunk, size_t chunk_idx, size_t* pidx, bool allow_all_set) {
|
||||
const mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[chunk_idx]);
|
||||
if (!allow_all_set && (~b == 0)) return false;
|
||||
|
@ -1277,18 +1277,18 @@ bool _mi_bitmap_forall_setc_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* vis
|
|||
size_t rngcount = 0;
|
||||
#endif
|
||||
size_t bidx;
|
||||
while (mi_bfield_find_least_bit(b, &bidx)) {
|
||||
while (mi_bfield_find_least_bit(b, &bidx)) {
|
||||
const size_t rng = mi_ctz(~(b>>bidx)); // all the set bits from bidx
|
||||
#if MI_DEBUG > 1
|
||||
rngcount += rng;
|
||||
#endif
|
||||
#endif
|
||||
mi_assert_internal(rng>=1 && rng<=MI_BFIELD_BITS);
|
||||
const size_t idx = base_idx + bidx;
|
||||
mi_assert_internal((idx % MI_BFIELD_BITS) + rng <= MI_BFIELD_BITS);
|
||||
mi_assert_internal((idx / MI_BCHUNK_BITS) < mi_bitmap_chunk_count(bitmap));
|
||||
if (!visit(idx, rng, arena, arg)) return false;
|
||||
// clear rng bits in b
|
||||
b = b & ~mi_bfield_mask(rng, bidx);
|
||||
b = b & ~mi_bfield_mask(rng, bidx);
|
||||
}
|
||||
mi_assert_internal(rngcount == bpopcount);
|
||||
}
|
||||
|
|
|
@ -166,8 +166,12 @@ void mi_collect(bool force) mi_attr_noexcept {
|
|||
----------------------------------------------------------- */
|
||||
|
||||
mi_heap_t* mi_heap_get_default(void) {
|
||||
mi_thread_init();
|
||||
return mi_prim_get_default_heap();
|
||||
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||
if mi_unlikely(!mi_heap_is_initialized(heap)) {
|
||||
mi_thread_init();
|
||||
heap = mi_prim_get_default_heap();
|
||||
}
|
||||
return heap;
|
||||
}
|
||||
|
||||
static bool mi_heap_is_default(const mi_heap_t* heap) {
|
||||
|
|
12
src/init.c
12
src/init.c
|
@ -157,7 +157,7 @@ mi_decl_cache_align mi_heap_t _mi_heap_main = {
|
|||
MI_BIN_FULL, 0, // page retired min/max
|
||||
NULL, // next heap
|
||||
MI_MEMID_STATIC, // memid
|
||||
0,
|
||||
0,
|
||||
2, // full page retain
|
||||
true, // allow page reclaim
|
||||
true, // allow page abandon
|
||||
|
@ -289,7 +289,7 @@ mi_decl_noinline mi_tld_t* _mi_tld(void) {
|
|||
}
|
||||
if (mi_tld==NULL) {
|
||||
mi_tld = mi_tld_alloc();
|
||||
}
|
||||
}
|
||||
return mi_tld;
|
||||
}
|
||||
|
||||
|
@ -361,11 +361,11 @@ static bool _mi_thread_heap_init(void) {
|
|||
//mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap());
|
||||
}
|
||||
else {
|
||||
// allocates tld data
|
||||
// note: we cannot access thread-locals yet as that can cause (recursive) allocation
|
||||
// allocates tld data
|
||||
// note: we cannot access thread-locals yet as that can cause (recursive) allocation
|
||||
// (on macOS <= 14 for example where the loader allocates thread-local data on demand).
|
||||
mi_tld_t* tld = mi_tld_alloc();
|
||||
|
||||
mi_tld_t* tld = mi_tld_alloc();
|
||||
|
||||
// allocate and initialize the heap
|
||||
mi_heap_t* heap = _mi_heap_create(0 /* default tag */, false /* allow destroy? */, _mi_arena_id_none(), tld);
|
||||
|
||||
|
|
|
@ -870,7 +870,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
|||
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
|
||||
}
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
|
||||
|
||||
// collect every N generic mallocs
|
||||
if (heap->generic_count++ > 10000) {
|
||||
heap->generic_count = 0;
|
||||
|
|
Loading…
Add table
Reference in a new issue