wip: increase commit mask resolution

This commit is contained in:
daan 2021-11-09 20:19:31 -08:00
parent c17878d1a7
commit 49c75a3157
6 changed files with 255 additions and 127 deletions

View file

@ -66,7 +66,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(verbose) },
// the following options are experimental and not all combinations make sense.
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
{ 0, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
#if defined(_WIN32) || (MI_INTPTR_SIZE <= 4) // and other OS's without overcommit?
{ 0, UNINIT, MI_OPTION(eager_region_commit) },
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
@ -89,7 +89,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
#endif
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after reset_delay milli-seconds)
{ 500, UNINIT, MI_OPTION(reset_delay) }, // page reset delay in milli-seconds (= decommit)
{ 10, UNINIT, MI_OPTION(reset_delay) }, // page reset delay in milli-seconds (= decommit)
{ 1000, UNINIT, MI_OPTION(segment_decommit_delay) },// decommit delay in milli-seconds for freed segments
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)

View file

@ -76,11 +76,10 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
*memid = slot->memid;
*is_pinned = slot->is_pinned;
*is_zero = false;
mi_commit_mask_t cmask = slot->commit_mask; // copy
*commit_mask = slot->commit_mask;
slot->p = NULL;
mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0);
*commit_mask = cmask;
// mark the slot as free again
mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx));
_mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx);
@ -90,27 +89,26 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats)
{
if (mi_commit_mask_is_empty(*cmask)) {
if (mi_commit_mask_is_empty(cmask)) {
// nothing
}
else if (mi_commit_mask_is_full(*cmask)) {
else if (mi_commit_mask_is_full(cmask)) {
_mi_os_decommit(p, total, stats);
}
else {
// todo: one call to decommit the whole at once?
mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
size_t part = total/MI_COMMIT_MASK_BITS;
uintptr_t idx;
uintptr_t count;
mi_commit_mask_t mask = *cmask;
mi_commit_mask_foreach(mask, idx, count) {
ptrdiff_t idx;
ptrdiff_t count;
mi_commit_mask_foreach(cmask, idx, count) {
void* start = (uint8_t*)p + (idx*part);
size_t size = count*part;
_mi_os_decommit(start, size, stats);
}
mi_commit_mask_foreach_end()
}
*cmask = mi_commit_mask_empty();
mi_commit_mask_create_empty(cmask);
}
#define MI_MAX_PURGE_PER_PUSH (4)
@ -135,7 +133,7 @@ static mi_decl_noinline void mi_segment_cache_purge(mi_os_tld_t* tld)
if (expire != 0 && now >= expire) { // safe read
// still expired, decommit it
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
mi_assert_internal(!mi_commit_mask_is_empty(slot->commit_mask) && _mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
mi_assert_internal(!mi_commit_mask_is_empty(&slot->commit_mask) && _mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
_mi_abandoned_await_readers(); // wait until safe to decommit
// decommit committed parts
// TODO: instead of decommit, we could also free to the OS?
@ -148,7 +146,7 @@ static mi_decl_noinline void mi_segment_cache_purge(mi_os_tld_t* tld)
}
}
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE
return false;
@ -187,7 +185,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
slot->memid = memid;
slot->is_pinned = is_pinned;
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
slot->commit_mask = commit_mask;
slot->commit_mask = *commit_mask;
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) {
long delay = mi_option_get(mi_option_segment_decommit_delay);
if (delay == 0) {

View file

@ -127,7 +127,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(segment->abandoned <= segment->used);
mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id());
mi_assert_internal(mi_commit_mask_all_set(segment->commit_mask, segment->decommit_mask)); // can only decommit committed blocks
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // can only decommit committed blocks
//mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0);
mi_slice_t* slice = &segment->slices[0];
const mi_slice_t* end = mi_segment_slices_end(segment);
@ -256,8 +256,8 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
const size_t size = mi_segment_size(segment);
if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, segment->commit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) {
const size_t csize = mi_commit_mask_committed_size(segment->commit_mask, size);
if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) {
const size_t csize = mi_commit_mask_committed_size(&segment->commit_mask, size);
if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
_mi_abandoned_await_readers(); // wait until safe to free
_mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->os);
@ -331,74 +331,85 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
Span management
----------------------------------------------------------- */
static mi_commit_mask_t mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size) {
mi_assert_internal(_mi_ptr_segment(p) == segment);
if (size == 0 || size > MI_SEGMENT_SIZE) return 0;
if (p >= (uint8_t*)segment + mi_segment_size(segment)) return 0;
static ptrdiff_t _mi_aligni_up(ptrdiff_t sz, size_t alignment) {
return (ptrdiff_t)_mi_align_up(sz, alignment);
}
uintptr_t diff = (p - (uint8_t*)segment);
uintptr_t start;
uintptr_t end;
static ptrdiff_t _mi_aligni_down(ptrdiff_t sz, size_t alignment) {
return (ptrdiff_t)_mi_align_down(sz, alignment);
}
static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
mi_assert_internal(_mi_ptr_segment(p) == segment);
mi_commit_mask_create_empty(cm);
if (size == 0 || size > MI_SEGMENT_SIZE) return;
if (p >= (uint8_t*)segment + mi_segment_size(segment)) return;
ptrdiff_t diff = (p - (uint8_t*)segment);
ptrdiff_t start;
ptrdiff_t end;
if (conservative) {
start = _mi_align_up(diff, MI_COMMIT_SIZE);
end = _mi_align_down(diff + size, MI_COMMIT_SIZE);
start = _mi_aligni_up(diff, MI_COMMIT_SIZE);
end = _mi_aligni_down(diff + size, MI_COMMIT_SIZE);
}
else {
start = _mi_align_down(diff, MI_COMMIT_SIZE);
end = _mi_align_up(diff + size, MI_COMMIT_SIZE);
start = _mi_aligni_down(diff, MI_COMMIT_SIZE);
end = _mi_aligni_up(diff + size, MI_COMMIT_SIZE);
}
mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0);
*start_p = (uint8_t*)segment + start;
*full_size = (end > start ? end - start : 0);
if (*full_size == 0) return 0;
if (*full_size == 0) return;
uintptr_t bitidx = start / MI_COMMIT_SIZE;
mi_assert_internal(bitidx < (MI_INTPTR_SIZE*8));
ptrdiff_t bitidx = start / MI_COMMIT_SIZE;
mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
uintptr_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0
if (bitidx + bitcount > MI_INTPTR_SIZE*8) {
ptrdiff_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0
if (bitidx + bitcount > MI_COMMIT_MASK_BITS) {
_mi_warning_message("commit mask overflow: %zu %zu %zu %zu 0x%p %zu\n", bitidx, bitcount, start, end, p, size);
}
mi_assert_internal((bitidx + bitcount) <= (MI_INTPTR_SIZE*8));
return mi_commit_mask_create(bitidx, bitcount);
mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
mi_commit_mask_create(bitidx, bitcount, cm);
}
static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, size_t size, mi_stats_t* stats) {
// commit liberal, but decommit conservative
uint8_t* start;
size_t full_size;
mi_commit_mask_t mask = mi_segment_commit_mask(segment,!commit/*conservative*/,p,size,&start,&full_size);
if (mi_commit_mask_is_empty(mask) || full_size==0) return true;
mi_commit_mask_t mask;
mi_segment_commit_mask(segment, !commit/*conservative*/, p, size, &start, &full_size, &mask);
if (mi_commit_mask_is_empty(&mask) || full_size==0) return true;
if (commit && !mi_commit_mask_all_set(segment->commit_mask, mask)) {
if (commit && !mi_commit_mask_all_set(&segment->commit_mask, &mask)) {
bool is_zero = false;
mi_commit_mask_t cmask = mi_commit_mask_intersect(segment->commit_mask, mask);
_mi_stat_decrease(&_mi_stats_main.committed, mi_commit_mask_committed_size(cmask, MI_SEGMENT_SIZE)); // adjust for overlap
mi_commit_mask_t cmask;
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
_mi_stat_decrease(&_mi_stats_main.committed, mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
if (!_mi_os_commit(start,full_size,&is_zero,stats)) return false;
mi_commit_mask_set(&segment->commit_mask,mask);
mi_commit_mask_set(&segment->commit_mask, &mask);
}
else if (!commit && mi_commit_mask_any_set(segment->commit_mask,mask)) {
else if (!commit && mi_commit_mask_any_set(&segment->commit_mask, &mask)) {
mi_assert_internal((void*)start != (void*)segment);
mi_commit_mask_t cmask = mi_commit_mask_intersect(segment->commit_mask, mask);
_mi_stat_increase(&_mi_stats_main.committed, full_size - mi_commit_mask_committed_size(cmask, MI_SEGMENT_SIZE)); // adjust for overlap
mi_commit_mask_t cmask;
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
_mi_stat_increase(&_mi_stats_main.committed, full_size - mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
if (segment->allow_decommit) { _mi_os_decommit(start, full_size, stats); } // ok if this fails
mi_commit_mask_clear(&segment->commit_mask, mask);
mi_commit_mask_clear(&segment->commit_mask, &mask);
}
// increase expiration of reusing part of the delayed decommit
if (commit && mi_commit_mask_any_set(segment->decommit_mask, mask)) {
if (commit && mi_commit_mask_any_set(&segment->decommit_mask, &mask)) {
segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_reset_delay);
}
// always undo delayed decommits
mi_commit_mask_clear(&segment->decommit_mask, mask);
mi_assert_internal((segment->commit_mask & segment->decommit_mask) == segment->decommit_mask);
mi_commit_mask_clear(&segment->decommit_mask, &mask);
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
return true;
}
static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
mi_assert_internal(mi_commit_mask_all_set(segment->commit_mask, segment->decommit_mask));
if (mi_commit_mask_is_full(segment->commit_mask) && mi_commit_mask_is_empty(segment->decommit_mask)) return true; // fully committed
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->decommit_mask)) return true; // fully committed
return mi_segment_commitx(segment,true,p,size,stats);
}
@ -411,27 +422,30 @@ static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_
// register for future decommit in the decommit mask
uint8_t* start;
size_t full_size;
mi_commit_mask_t mask = mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size);
if (mi_commit_mask_is_empty(mask) || full_size==0) return;
mi_commit_mask_t mask;
mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask);
if (mi_commit_mask_is_empty(&mask) || full_size==0) return;
// update delayed commit
mi_commit_mask_set(&segment->decommit_mask, mi_commit_mask_intersect(mask,segment->commit_mask)); // only decommit what is committed; span_free may try to decommit more
mi_commit_mask_t cmask;
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only decommit what is committed; span_free may try to decommit more
mi_commit_mask_set(&segment->decommit_mask, &cmask);
segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_reset_delay);
}
}
static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats) {
if (!segment->allow_decommit || mi_commit_mask_is_empty(segment->decommit_mask)) return;
if (!segment->allow_decommit || mi_commit_mask_is_empty(&segment->decommit_mask)) return;
mi_msecs_t now = _mi_clock_now();
if (!force && now < segment->decommit_expire) return;
mi_commit_mask_t mask = segment->decommit_mask;
segment->decommit_expire = 0;
segment->decommit_mask = mi_commit_mask_empty();
mi_commit_mask_create_empty(&segment->decommit_mask);
uintptr_t idx;
uintptr_t count;
mi_commit_mask_foreach(mask, idx, count) {
ptrdiff_t idx;
ptrdiff_t count;
mi_commit_mask_foreach(&mask, idx, count) {
// if found, decommit that sequence
if (count > 0) {
uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE);
@ -439,8 +453,7 @@ static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_st
mi_segment_commitx(segment, false, p, size, stats);
}
}
mi_commit_mask_foreach_end()
mi_assert_internal(mi_commit_mask_is_empty(segment->decommit_mask));
mi_commit_mask_foreach_end()
}
@ -649,7 +662,14 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
// Try to get from our cache first
bool is_zero = false;
const bool commit_info_still_good = (segment != NULL);
mi_commit_mask_t commit_mask = (segment != NULL ? segment->commit_mask : mi_commit_mask_empty());
mi_commit_mask_t commit_mask;
if (segment != NULL) {
commit_mask = segment->commit_mask;
}
else {
mi_commit_mask_create_empty(&commit_mask);
}
if (segment==NULL) {
// Allocate the segment from the OS
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
@ -659,23 +679,30 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
if (segment==NULL) {
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate
commit_mask = (commit ? mi_commit_mask_full() : mi_commit_mask_empty());
if (commit) {
mi_commit_mask_create_full(&commit_mask);
}
else {
mi_commit_mask_create_empty(&commit_mask);
}
}
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_assert_internal(commit_needed>0);
if (!mi_commit_mask_all_set(commit_mask,mi_commit_mask_create(0, commit_needed))) {
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
if (!mi_commit_mask_all_set(&commit_mask, &commit_needed_mask)) {
// at least commit the info slices
mi_assert_internal(commit_needed*MI_COMMIT_SIZE > info_slices*MI_SEGMENT_SLICE_SIZE);
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= info_slices*MI_SEGMENT_SLICE_SIZE);
bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, &is_zero, tld->stats);
if (!ok) return NULL; // failed to commit
mi_commit_mask_set(&commit_mask,mi_commit_mask_create(0, commit_needed));
mi_commit_mask_set(&commit_mask, &commit_needed_mask);
}
segment->memid = memid;
segment->mem_is_pinned = is_pinned;
segment->mem_is_large = mem_large;
segment->mem_is_committed = mi_commit_mask_is_full(commit_mask);
segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask);
mi_segments_track_size((long)(segment_size), tld);
_mi_segment_map_allocated_at(segment);
}
@ -692,7 +719,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
segment->decommit_expire = 0;
segment->decommit_mask = mi_commit_mask_empty();
mi_commit_mask_create_empty( &segment->decommit_mask );
}
// initialize segment info