wip: purgeable arenas, various fixes

This commit is contained in:
daanx 2023-04-03 17:58:28 -07:00
parent a9f42376b7
commit d22a13c990
5 changed files with 18 additions and 26 deletions

View file

@ -350,7 +350,7 @@ typedef enum mi_segment_kind_e {
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
// ------------------------------------------------------
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) // most fine-grained
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS

View file

@ -182,7 +182,7 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
if (any_uncommitted) {
bool commit_zero;
_mi_os_commit(p, needed_bcount * MI_ARENA_BLOCK_SIZE, &commit_zero, tld->stats);
if (commit_zero) *is_zero = true;
if (commit_zero) { *is_zero = true; }
}
}
else {
@ -190,7 +190,7 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
*commit = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
}
mi_track_mem_undefined(p,needed_bcount*MI_ARENA_BLOCK_SIZE);
// mi_track_mem_undefined(p,needed_bcount*MI_ARENA_BLOCK_SIZE);
return p;
}
@ -297,7 +297,11 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) ) // not too many arenas already?
{
mi_arena_id_t arena_id = 0;
const bool arena_commit = _mi_os_has_overcommit() || mi_option_is_enabled(mi_option_eager_arena_commit);
bool arena_commit = _mi_os_has_overcommit();
if (mi_option_get(mi_option_eager_arena_commit) == 1) { arena_commit = true; }
else if (mi_option_get(mi_option_eager_arena_commit) == 0) { arena_commit = false; }
if (mi_reserve_os_memory_ex(arena_reserve, arena_commit /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
if (p != NULL) return p;
@ -513,6 +517,9 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
return;
}
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
mi_track_mem_undefined(p,size);
// potentially decommit
if (!arena->allow_decommit || arena->blocks_committed == NULL) {
mi_assert_internal(all_committed);
@ -523,6 +530,7 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
if (!all_committed) {
// assume the entire range as no longer committed
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
mi_track_mem_noaccess(p,size);
}
// (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);

View file

@ -60,7 +60,7 @@ static mi_option_desc_t options[_mi_option_last] =
// Some of the following options are experimental and not all combinations are valid. Use with care.
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
{ 0, UNINIT, MI_OPTION_LEGACY(eager_arena_commit,eager_region_commit) },
{ 2, UNINIT, MI_OPTION_LEGACY(eager_arena_commit,eager_region_commit) },
{ 0, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) },
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages

View file

@ -342,22 +342,6 @@ static void unix_mprotect_hint(int err) {
int _mi_prim_commit(void* start, size_t size, bool commit) {
/*
#if 0 && defined(MAP_FIXED) && !defined(__APPLE__)
// Linux: disabled for now as mmap fixed seems much more expensive than MADV_DONTNEED (and splits VMA's?)
if (commit) {
// commit: just change the protection
err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; }
}
else {
// decommit: use mmap with MAP_FIXED to discard the existing memory (and reduce rss)
const int fd = mi_unix_mmap_fd();
void* p = mmap(start, csize, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
if (p != start) { err = errno; }
}
#else
*/
int err = 0;
if (commit) {
// commit: ensure we can access the area

View file

@ -756,7 +756,7 @@ static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, siz
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
size_t next_index = mi_slice_index(slice) + slice_count;
size_t next_count = slice->slice_count - slice_count;
mi_segment_span_free(segment, next_index, next_count, false /* don't decommit left-over part */, tld);
mi_segment_span_free(segment, next_index, next_count, false /* don't purge left-over part */, tld);
slice->slice_count = (uint32_t)slice_count;
}
@ -915,7 +915,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
}
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
segment->allow_decommit = !segment->mem_is_pinned && !segment->mem_is_large;
segment->allow_decommit = !segment->mem_is_pinned && !segment->mem_is_large;
segment->allow_purge = mi_option_is_enabled(mi_option_allow_purge) && segment->allow_decommit;
if (segment->allow_purge) {
segment->purge_expire = 0; // don't decommit just committed memory // _mi_clock_now() + mi_option_get(mi_option_purge_delay);
@ -969,7 +969,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
// initialize initial free pages
if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
mi_assert_internal(huge_page==NULL);
mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't decommit */, tld);
mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld);
}
else {
mi_assert_internal(huge_page!=NULL);
@ -1585,7 +1585,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
mi_assert_internal(psize - (aligned_p - start) >= size);
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
_mi_os_decommit(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
_mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
}
return page;
@ -1630,7 +1630,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
if (segment->allow_decommit) {
const size_t csize = mi_usable_size(block) - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
_mi_os_decommit(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
_mi_os_reset(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
}
}
#endif