From 58b726be6f814906870738ac225e119350c7e243 Mon Sep 17 00:00:00 2001 From: Daan Leijen Date: Tue, 17 Dec 2024 18:57:00 -0800 Subject: [PATCH] better stats for commit on overcommit systems (by not counting on-demand commit upfront) --- include/mimalloc/types.h | 12 ++++++------ src/arena.c | 39 +++++++++++++++++++++++++++++++++------ src/stats.c | 22 ++++++++++++---------- 3 files changed, 51 insertions(+), 22 deletions(-) diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 4c998f90..0cf909d0 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -488,8 +488,8 @@ typedef struct mi_stats_s { void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); // adjust stat in special cases to compensate for double counting -void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount); -void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount); +void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount, bool on_alloc); +void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount, bool on_free); // counters can just be increased void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); @@ -497,14 +497,14 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); #define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) #define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) -#define mi_stat_adjust_increase(stat,amount) _mi_stat_adjust_increase( &(stat), amount) -#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount) +#define mi_stat_adjust_increase(stat,amnt,b) _mi_stat_adjust_increase( &(stat), amnt, b) +#define mi_stat_adjust_decrease(stat,amnt,b) _mi_stat_adjust_decrease( &(stat), amnt, b) #else #define mi_stat_increase(stat,amount) ((void)0) #define mi_stat_decrease(stat,amount) ((void)0) #define mi_stat_counter_increase(stat,amount) ((void)0) -#define mi_stat_adjuct_increase(stat,amount) ((void)0) -#define mi_stat_adjust_decrease(stat,amount) ((void)0) +#define mi_stat_adjuct_increase(stat,amnt,b) ((void)0) +#define mi_stat_adjust_decrease(stat,amnt,b) ((void)0) #endif #define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) diff --git a/src/arena.c b/src/arena.c index e7564cd6..29279b86 100644 --- a/src/arena.c +++ b/src/arena.c @@ -222,9 +222,13 @@ static mi_decl_noinline void* mi_arena_try_alloc_at( *memid = mi_memid_create_arena(arena, slice_index, slice_count); memid->is_pinned = arena->memid.is_pinned; - // set the dirty bits + // set the dirty bits and track which slices become accessible + size_t touched_slices = slice_count; if (arena->memid.initially_zero) { - memid->initially_zero = mi_bitmap_setN(arena->slices_dirty, slice_index, slice_count, NULL); + size_t already_dirty = 0; + memid->initially_zero = mi_bitmap_setN(arena->slices_dirty, slice_index, slice_count, &already_dirty); + mi_assert_internal(already_dirty <= touched_slices); + touched_slices -= already_dirty; } // set commit state @@ -239,7 +243,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at( mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count); // adjust the stats so we don't double count the commits if (already_committed_count > 0) { - _mi_stat_adjust_decrease(&_mi_stats_main.committed, mi_size_of_slices(already_committed_count)); + _mi_stat_adjust_decrease(&_mi_stats_main.committed, mi_size_of_slices(already_committed_count), true /* on alloc */); } // now actually commit bool commit_zero = false; @@ -263,6 +267,15 @@ static mi_decl_noinline void* mi_arena_try_alloc_at( #endif } } + else { + // already fully commited. + // if the OS has overcommit, and this is the first time we access these pages, then + // count the commit now (as at arena reserve we didn't count those commits as these are on-demand) + if (_mi_os_has_overcommit() && touched_slices > 0) { + _mi_stat_increase(&_mi_stats_main.committed, mi_size_of_slices(touched_slices)); + } + } + // tool support if (memid->initially_zero) { mi_track_mem_defined(p, slice_count * MI_ARENA_SLICE_SIZE); } @@ -324,17 +337,25 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re // commit eagerly? bool arena_commit = false; - if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } + const bool overcommit = _mi_os_has_overcommit(); + if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = overcommit; } else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } + // on an OS with overcommit (Linux) we don't count the commit yet as it is on-demand. Once a slice + // is actually allocated for the first time it will be counted. + const bool adjust = (overcommit && arena_commit); + if (adjust) { _mi_stat_adjust_decrease(&_mi_stats_main.committed, arena_reserve, true /* on alloc */); } // and try to reserve the arena int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id); if (err != 0) { + if (adjust) { _mi_stat_adjust_increase(&_mi_stats_main.committed, arena_reserve, true); } // roll back // failed, try a smaller size? const size_t small_arena_reserve = (MI_SIZE_BITS == 32 ? 128*MI_MiB : 1*MI_GiB); + if (adjust) { _mi_stat_adjust_decrease(&_mi_stats_main.committed, arena_reserve, true); } if (arena_reserve > small_arena_reserve) { // try again err = mi_reserve_os_memory_ex(small_arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id); + if (err != 0 && adjust) { _mi_stat_adjust_increase(&_mi_stats_main.committed, arena_reserve, true); } // roll back } } return (err==0); @@ -851,7 +872,7 @@ bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page) { } else { _mi_stat_counter_increase(&_mi_stats_main.pages_reabandon_full, 1); - _mi_stat_adjust_decrease(&_mi_stats_main.pages_abandoned, 1); // adjust as we are not abandoning fresh + _mi_stat_adjust_decrease(&_mi_stats_main.pages_abandoned, 1, true /* on alloc */); // adjust as we are not abandoning fresh _mi_arena_page_abandon(page); return true; } @@ -1402,7 +1423,13 @@ static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_c const size_t size = mi_size_of_slices(slice_count); void* const p = mi_arena_slice_start(arena, slice_index); - const bool all_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count); + //const bool all_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count); + size_t already_committed; + mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed); + const bool all_committed = (already_committed == slice_count); + if (mi_option_is_enabled(mi_option_purge_decommits)) { + _mi_stat_adjust_increase(&_mi_stats_main.committed, mi_size_of_slices(already_committed), false /* on freed */); + } const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */); // update committed bitmap diff --git a/src/stats.c b/src/stats.c index 53937330..bb17b936 100644 --- a/src/stats.c +++ b/src/stats.c @@ -54,21 +54,23 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { // Adjust stats to compensate; for example before committing a range, // first adjust downwards with parts that were already committed so // we avoid double counting. -static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) { +static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount, bool on_alloc) { if (amount == 0) return; if mi_unlikely(mi_is_in_main(stat)) { // adjust atomically mi_atomic_addi64_relaxed(&stat->current, amount); - mi_atomic_addi64_relaxed(&stat->allocated, amount); - mi_atomic_addi64_relaxed(&stat->freed, amount); + mi_atomic_addi64_relaxed((on_alloc ? &stat->allocated : &stat->freed), amount); } else { // don't affect the peak stat->current += amount; - // add to both - stat->allocated += amount; - stat->freed += amount; + if (on_alloc) { + stat->allocated += amount; + } + else { + stat->freed += amount; + } } } @@ -91,12 +93,12 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, -((int64_t)amount)); } -void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount) { - mi_stat_adjust(stat, (int64_t)amount); +void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount, bool on_alloc) { + mi_stat_adjust(stat, (int64_t)amount, on_alloc); } -void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) { - mi_stat_adjust(stat, -((int64_t)amount)); +void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount, bool on_alloc) { + mi_stat_adjust(stat, -((int64_t)amount), on_alloc); } // must be thread safe as it is called from stats_merge