improve page commit on demand

This commit is contained in:
daanx 2024-12-24 11:37:52 -08:00
parent ba68810333
commit d21114b5f2
7 changed files with 101 additions and 56 deletions

View file

@ -207,12 +207,12 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
size_t already_committed_count = 0;
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count);
// adjust the stats so we don't double count the commits
if (already_committed_count > 0) {
mi_subproc_stat_adjust_decrease(arena->subproc, committed, mi_size_of_slices(already_committed_count), true /* on alloc */);
}
//if (already_committed_count > 0) {
// mi_subproc_stat_adjust_decrease(arena->subproc, committed, mi_size_of_slices(already_committed_count), true /* on alloc */);
//}
// now actually commit
bool commit_zero = false;
if (!_mi_os_commit(p, mi_size_of_slices(slice_count), &commit_zero)) {
if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed_count))) {
// failed to commit (todo: give warning?)
if (already_committed_count > 0) {
mi_subproc_stat_increase(arena->subproc, committed, mi_size_of_slices(already_committed_count));
@ -686,7 +686,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
page->reserved = (uint16_t)reserved;
page->page_start = (uint8_t*)page + block_start;
page->block_size = block_size;
page->page_committed = (commit_size == 0 ? 0 : commit_size - block_start); mi_assert(commit_size == 0 || commit_size >= block_start + block_size);
page->slice_committed = commit_size;
page->memid = memid;
page->free_is_zero = memid.initially_zero;
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
@ -720,8 +720,10 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou
}
// 2. find a free block, potentially allocating a new arena
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq,
!mi_option_is_enabled(mi_option_page_commit_on_demand));
const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages
_mi_os_has_overcommit() || // no need to commit on demand on an OS that already does this for us
!mi_option_is_enabled(mi_option_page_commit_on_demand));
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq, commit);
if (page != NULL) {
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
_mi_page_init(heap, page);
@ -818,13 +820,18 @@ void _mi_arenas_page_free(mi_page_t* page) {
if (page->memid.memkind == MI_MEM_ARENA) {
mi_arena_t* arena = page->memid.mem.arena.arena;
mi_bitmap_clear(arena->pages, page->memid.mem.arena.slice_index);
if (page->page_committed > 0) {
if (page->slice_committed > 0) {
// if committed on-demand, set the commit bits to account commit properly
const size_t total_committed = (page->page_start - (uint8_t*)page) + page->page_committed;
mi_assert_internal(mi_memid_size(page->memid) >= total_committed);
const size_t total_slices = _mi_divide_up(total_committed, MI_ARENA_SLICE_SIZE);
mi_assert_internal(mi_memid_size(page->memid) >= page->slice_committed);
const size_t total_slices = page->slice_committed / MI_ARENA_SLICE_SIZE; // conservative
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices));
mi_assert_internal(page->memid.mem.arena.slice_count >= total_slices);
mi_bitmap_setN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices, NULL);
if (total_slices > 0) {
mi_bitmap_setN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices, NULL);
}
}
else {
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, page->memid.mem.arena.slice_index, page->memid.mem.arena.slice_count));
}
}
_mi_arenas_free(page, mi_memid_size(page->memid), page->memid);
@ -1005,8 +1012,8 @@ void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arenas_collect(bool force_purge, mi_tld_t* tld) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, tld);
void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld) {
mi_arenas_try_purge(force_purge, visit_all, tld);
}
@ -1062,7 +1069,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) {
mi_arenas_unsafe_destroy(_mi_subproc());
_mi_arenas_collect(true /* force purge */, tld); // purge non-owned arenas
_mi_arenas_collect(true /* force purge */, true /* visit all*/, tld); // purge non-owned arenas
}
@ -1462,15 +1469,23 @@ static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_c
void* const p = mi_arena_slice_start(arena, slice_index);
//const bool all_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count);
size_t already_committed;
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed);
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed); // pretend all committed.. (as we lack a clearN call that counts the already set bits..)
const bool all_committed = (already_committed == slice_count);
const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */);
const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */, mi_size_of_slices(already_committed));
// update committed bitmap
if (needs_recommit) {
mi_subproc_stat_adjust_decrease( arena->subproc, committed, mi_size_of_slices(slice_count - already_committed), false /* on freed */);
// no longer committed
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
// we just counted in the purge to decommit all, but the some part was not committed so adjust that here
// mi_os_stat_decrease(committed, mi_size_of_slices(slice_count - already_committed));
}
else if (!all_committed) {
// we cannot assume any of these are committed any longer (even with reset since we did setN and may have marked uncommitted slices as committed)
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
// we adjust the commit count as parts will be re-committed
// mi_os_stat_decrease(committed, mi_size_of_slices(already_committed));
}
return needs_recommit;
}
@ -1493,6 +1508,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_
if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) {
// expiration was not yet set
// maybe set the global arenas expire as well (if it wasn't set already)
mi_assert_internal(expire0==0);
mi_atomic_casi64_strong_acq_rel(&arena->subproc->purge_expire, &expire0, expire);
}
else {
@ -1554,8 +1570,8 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (!force && (expire == 0 || expire > now)) return false;
// reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
// reset expire
mi_atomic_store_release(&arena->purge_expire, (mi_msecs_t)0);
mi_subproc_stat_counter_increase(arena->subproc, arena_purges, 1);
// go through all purge info's (with max MI_BFIELD_BITS ranges at a time)
@ -1570,33 +1586,36 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld)
{
// try purge can be called often so try to only run when needed
const long delay = mi_arena_purge_delay();
if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled
// check if any arena needs purging?
mi_subproc_t* subproc = tld->subproc;
const mi_msecs_t now = _mi_clock_now();
mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
if (!force && (arenas_expire == 0 || arenas_expire > now)) return;
const mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
if (!visit_all && !force && (arenas_expire == 0 || arenas_expire > now)) return;
const size_t max_arena = mi_arenas_get_count(subproc);
if (max_arena == 0) return;
// allow only one thread to purge at a time
// allow only one thread to purge at a time (todo: allow concurrent purging?)
static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard)
{
// increase global expire: at most one purge per delay cycle
mi_atomic_store_release(&subproc->purge_expire, now + delay);
if (arenas_expire > now) { mi_atomic_store_release(&subproc->purge_expire, now + (delay/10)); }
const size_t arena_start = tld->thread_seq % max_arena;
size_t max_purge_count = (visit_all ? max_arena : 2);
size_t max_purge_count = (visit_all ? max_arena : (max_arena/4)+1);
bool all_visited = true;
bool any_purged = false;
for (size_t _i = 0; _i < max_arena; _i++) {
size_t i = _i + arena_start;
if (i >= max_arena) { i -= max_arena; }
mi_arena_t* arena = mi_arena_from_index(subproc,i);
if (arena != NULL) {
if (mi_arena_try_purge(arena, now, force)) {
any_purged = true;
if (max_purge_count <= 1) {
all_visited = false;
break;
@ -1605,8 +1624,8 @@ static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld)
}
}
}
if (all_visited) {
mi_atomic_store_release(&subproc->purge_expire, (mi_msecs_t)0);
if (all_visited && !any_purged) {
mi_atomic_store_release(&subproc->purge_expire, 0);
}
}
}