improve page commit on demand

This commit is contained in:
daanx 2024-12-24 11:37:52 -08:00
parent ba68810333
commit d21114b5f2
7 changed files with 101 additions and 56 deletions

View file

@ -128,7 +128,8 @@ bool _mi_os_decommit(void* addr, size_t size);
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size);
bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size);
size_t _mi_os_secure_guard_page_size(void);
bool _mi_os_secure_guard_page_set_at(void* addr, bool is_pinned);
@ -155,7 +156,7 @@ void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit,
void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, mi_memid_t* memid);
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid);
bool _mi_arenas_contain(const void* p);
void _mi_arenas_collect(bool force_purge, mi_tld_t* tld);
void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld);
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld);
mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment);
@ -534,9 +535,12 @@ static inline uint8_t* mi_page_start(const mi_page_t* page) {
return page->page_start;
}
static inline size_t mi_page_size(const mi_page_t* page) {
return mi_page_block_size(page) * page->reserved;
}
static inline uint8_t* mi_page_area(const mi_page_t* page, size_t* size) {
if (size) { *size = mi_page_block_size(page) * page->reserved; }
if (size) { *size = mi_page_size(page); }
return mi_page_start(page);
}
@ -564,6 +568,21 @@ static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
// This may change if we locate page info outside the page data slices
static inline uint8_t* mi_page_slice_start(const mi_page_t* page) {
return (uint8_t*)page;
}
// This gives the offset relative to the start slice of a page. This may change if we ever
// locate page info outside the page-data itself.
static inline size_t mi_page_slice_offset_of(const mi_page_t* page, size_t offset_relative_to_page_start) {
return (page->page_start - mi_page_slice_start(page)) + offset_relative_to_page_start;
}
static inline size_t mi_page_committed(const mi_page_t* page) {
return (page->slice_committed == 0 ? mi_page_size(page) : page->slice_committed - (page->page_start - mi_page_slice_start(page)));
}
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
return page->heap;
}

View file

@ -139,6 +139,7 @@ terms of the MIT license. A copy of the license can be found in the file
// We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
// Minimal commit for a page on-demand commit (should be >= OS page size, and >= MI_ARENA_SLICE_SIZE for correct stats)
#define MI_PAGE_MIN_COMMIT_SIZE MI_ARENA_SLICE_SIZE
// ------------------------------------------------------
@ -303,7 +304,7 @@ typedef struct mi_page_s {
mi_heap_t* heap; // the heap owning this page (or NULL for abandoned pages)
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
size_t page_committed; // committed size relative to `page_start`.
size_t slice_committed; // committed size relative to the first arena slice of the page data
mi_memid_t memid; // provenance of the page memory
} mi_page_t;

View file

@ -207,12 +207,12 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
size_t already_committed_count = 0;
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count);
// adjust the stats so we don't double count the commits
if (already_committed_count > 0) {
mi_subproc_stat_adjust_decrease(arena->subproc, committed, mi_size_of_slices(already_committed_count), true /* on alloc */);
}
//if (already_committed_count > 0) {
// mi_subproc_stat_adjust_decrease(arena->subproc, committed, mi_size_of_slices(already_committed_count), true /* on alloc */);
//}
// now actually commit
bool commit_zero = false;
if (!_mi_os_commit(p, mi_size_of_slices(slice_count), &commit_zero)) {
if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed_count))) {
// failed to commit (todo: give warning?)
if (already_committed_count > 0) {
mi_subproc_stat_increase(arena->subproc, committed, mi_size_of_slices(already_committed_count));
@ -686,7 +686,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
page->reserved = (uint16_t)reserved;
page->page_start = (uint8_t*)page + block_start;
page->block_size = block_size;
page->page_committed = (commit_size == 0 ? 0 : commit_size - block_start); mi_assert(commit_size == 0 || commit_size >= block_start + block_size);
page->slice_committed = commit_size;
page->memid = memid;
page->free_is_zero = memid.initially_zero;
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
@ -720,8 +720,10 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou
}
// 2. find a free block, potentially allocating a new arena
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq,
!mi_option_is_enabled(mi_option_page_commit_on_demand));
const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages
_mi_os_has_overcommit() || // no need to commit on demand on an OS that already does this for us
!mi_option_is_enabled(mi_option_page_commit_on_demand));
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq, commit);
if (page != NULL) {
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
_mi_page_init(heap, page);
@ -818,13 +820,18 @@ void _mi_arenas_page_free(mi_page_t* page) {
if (page->memid.memkind == MI_MEM_ARENA) {
mi_arena_t* arena = page->memid.mem.arena.arena;
mi_bitmap_clear(arena->pages, page->memid.mem.arena.slice_index);
if (page->page_committed > 0) {
if (page->slice_committed > 0) {
// if committed on-demand, set the commit bits to account commit properly
const size_t total_committed = (page->page_start - (uint8_t*)page) + page->page_committed;
mi_assert_internal(mi_memid_size(page->memid) >= total_committed);
const size_t total_slices = _mi_divide_up(total_committed, MI_ARENA_SLICE_SIZE);
mi_assert_internal(mi_memid_size(page->memid) >= page->slice_committed);
const size_t total_slices = page->slice_committed / MI_ARENA_SLICE_SIZE; // conservative
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices));
mi_assert_internal(page->memid.mem.arena.slice_count >= total_slices);
mi_bitmap_setN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices, NULL);
if (total_slices > 0) {
mi_bitmap_setN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices, NULL);
}
}
else {
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, page->memid.mem.arena.slice_index, page->memid.mem.arena.slice_count));
}
}
_mi_arenas_free(page, mi_memid_size(page->memid), page->memid);
@ -1005,8 +1012,8 @@ void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arenas_collect(bool force_purge, mi_tld_t* tld) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, tld);
void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld) {
mi_arenas_try_purge(force_purge, visit_all, tld);
}
@ -1062,7 +1069,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) {
mi_arenas_unsafe_destroy(_mi_subproc());
_mi_arenas_collect(true /* force purge */, tld); // purge non-owned arenas
_mi_arenas_collect(true /* force purge */, true /* visit all*/, tld); // purge non-owned arenas
}
@ -1462,15 +1469,23 @@ static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_c
void* const p = mi_arena_slice_start(arena, slice_index);
//const bool all_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count);
size_t already_committed;
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed);
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed); // pretend all committed.. (as we lack a clearN call that counts the already set bits..)
const bool all_committed = (already_committed == slice_count);
const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */);
const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */, mi_size_of_slices(already_committed));
// update committed bitmap
if (needs_recommit) {
mi_subproc_stat_adjust_decrease( arena->subproc, committed, mi_size_of_slices(slice_count - already_committed), false /* on freed */);
// no longer committed
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
// we just counted in the purge to decommit all, but the some part was not committed so adjust that here
// mi_os_stat_decrease(committed, mi_size_of_slices(slice_count - already_committed));
}
else if (!all_committed) {
// we cannot assume any of these are committed any longer (even with reset since we did setN and may have marked uncommitted slices as committed)
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
// we adjust the commit count as parts will be re-committed
// mi_os_stat_decrease(committed, mi_size_of_slices(already_committed));
}
return needs_recommit;
}
@ -1493,6 +1508,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_
if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) {
// expiration was not yet set
// maybe set the global arenas expire as well (if it wasn't set already)
mi_assert_internal(expire0==0);
mi_atomic_casi64_strong_acq_rel(&arena->subproc->purge_expire, &expire0, expire);
}
else {
@ -1554,8 +1570,8 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (!force && (expire == 0 || expire > now)) return false;
// reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
// reset expire
mi_atomic_store_release(&arena->purge_expire, (mi_msecs_t)0);
mi_subproc_stat_counter_increase(arena->subproc, arena_purges, 1);
// go through all purge info's (with max MI_BFIELD_BITS ranges at a time)
@ -1570,33 +1586,36 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld)
{
// try purge can be called often so try to only run when needed
const long delay = mi_arena_purge_delay();
if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled
// check if any arena needs purging?
mi_subproc_t* subproc = tld->subproc;
const mi_msecs_t now = _mi_clock_now();
mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
if (!force && (arenas_expire == 0 || arenas_expire > now)) return;
const mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
if (!visit_all && !force && (arenas_expire == 0 || arenas_expire > now)) return;
const size_t max_arena = mi_arenas_get_count(subproc);
if (max_arena == 0) return;
// allow only one thread to purge at a time
// allow only one thread to purge at a time (todo: allow concurrent purging?)
static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard)
{
// increase global expire: at most one purge per delay cycle
mi_atomic_store_release(&subproc->purge_expire, now + delay);
if (arenas_expire > now) { mi_atomic_store_release(&subproc->purge_expire, now + (delay/10)); }
const size_t arena_start = tld->thread_seq % max_arena;
size_t max_purge_count = (visit_all ? max_arena : 2);
size_t max_purge_count = (visit_all ? max_arena : (max_arena/4)+1);
bool all_visited = true;
bool any_purged = false;
for (size_t _i = 0; _i < max_arena; _i++) {
size_t i = _i + arena_start;
if (i >= max_arena) { i -= max_arena; }
mi_arena_t* arena = mi_arena_from_index(subproc,i);
if (arena != NULL) {
if (mi_arena_try_purge(arena, now, force)) {
any_purged = true;
if (max_purge_count <= 1) {
all_visited = false;
break;
@ -1605,8 +1624,8 @@ static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld)
}
}
}
if (all_visited) {
mi_atomic_store_release(&subproc->purge_expire, (mi_msecs_t)0);
if (all_visited && !any_purged) {
mi_atomic_store_release(&subproc->purge_expire, 0);
}
}
}

View file

@ -120,7 +120,8 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
// collect arenas (this is program wide so don't force purges on abandonment of threads)
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, heap->tld);
//mi_atomic_storei64_release(&heap->tld->subproc->purge_expire, 1);
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, true /* visit all? */, heap->tld);
}
void _mi_heap_collect_abandon(mi_heap_t* heap) {

View file

@ -144,7 +144,7 @@ static mi_option_desc_t options[_mi_option_last] =
#else
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
#endif
{ 250, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
{ 0, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
@ -175,7 +175,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(max_vabits) },
{ MI_DEFAULT_PAGEMAP_COMMIT,
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
{ 0, UNINIT, MI_OPTION(page_commit_on_demand) },
{ 1, UNINIT, MI_OPTION(page_commit_on_demand) },
};
static void mi_option_init(mi_option_desc_t* desc);

View file

@ -429,9 +429,9 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize);
}
bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size) {
if (is_zero != NULL) { *is_zero = false; }
mi_os_stat_increase(committed, size); // use size for precise commit vs. decommit
mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit
mi_os_stat_counter_increase(commit_calls, 1);
// page align range
@ -458,9 +458,13 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
return true;
}
static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit) {
bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
return _mi_os_commit_ex(addr, size, is_zero, size);
}
static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stats_size) {
mi_assert_internal(needs_recommit!=NULL);
mi_os_stat_decrease(committed, size);
mi_os_stat_decrease(committed, stats_size);
// page align
size_t csize;
@ -479,7 +483,7 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit) {
bool _mi_os_decommit(void* addr, size_t size) {
bool needs_recommit;
return mi_os_decommit_ex(addr, size, &needs_recommit);
return mi_os_decommit_ex(addr, size, &needs_recommit, size);
}
@ -509,7 +513,7 @@ bool _mi_os_reset(void* addr, size_t size) {
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset)
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size)
{
if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
mi_os_stat_counter_increase(purge_calls, 1);
@ -519,7 +523,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset)
!_mi_preloading()) // don't decommit during preloading (unsafe)
{
bool needs_recommit = true;
mi_os_decommit_ex(p, size, &needs_recommit);
mi_os_decommit_ex(p, size, &needs_recommit, stats_size);
return needs_recommit;
}
else {
@ -533,7 +537,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset)
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
bool _mi_os_purge(void* p, size_t size) {
return _mi_os_purge_ex(p, size, true);
return _mi_os_purge_ex(p, size, true, size);
}

View file

@ -251,8 +251,10 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
}
else {
mi_page_queue_remove(pq, page);
mi_tld_t* tld = page->heap->tld;
mi_page_set_heap(page, NULL);
_mi_arenas_page_abandon(page);
_mi_arenas_collect(false, false, tld); // allow purging
}
}
@ -359,7 +361,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) {
mi_heap_t* heap = page->heap;
mi_page_set_heap(page,NULL);
_mi_arenas_page_free(page);
_mi_arenas_collect(false, heap->tld); // allow purging
_mi_arenas_collect(false, false, heap->tld); // allow purging
}
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
@ -607,14 +609,13 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(extend < (1UL<<16));
// commit on demand?
if (page->page_committed > 0) {
if (page->slice_committed > 0) {
const size_t needed_size = (page->capacity + extend)*bsize;
if (needed_size > page->page_committed) {
size_t commit_size = _mi_align_up(needed_size, MI_PAGE_MIN_COMMIT_SIZE);
const size_t max_size = page->reserved * bsize;
if (commit_size > max_size) { commit_size = max_size; }
mi_assert(commit_size > page->page_committed);
_mi_os_commit(mi_page_start(page) + page->page_committed, commit_size - page->page_committed, NULL);
const size_t needed_commit = _mi_align_up( mi_page_slice_offset_of(page, needed_size), MI_PAGE_MIN_COMMIT_SIZE );
if (needed_commit > page->slice_committed) {
mi_assert_internal(((needed_commit - page->slice_committed) % _mi_os_page_size()) == 0);
_mi_os_commit(mi_page_slice_start(page) + page->slice_committed, needed_commit - page->slice_committed, NULL);
page->slice_committed = needed_commit;
}
}
@ -647,8 +648,8 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
#endif
#if MI_DEBUG>2
if (page->memid.initially_zero) {
mi_track_mem_defined(page->page_start, (page->page_committed == 0 ? page_size : page->page_committed));
mi_assert_expensive(mi_mem_is_zero(page_start, (page->page_committed == 0 ? page_size : page->page_committed)));
mi_track_mem_defined(page->page_start, mi_page_committed(page));
mi_assert_expensive(mi_mem_is_zero(page_start, mi_page_committed(page)));
}
#endif