fix purging with ranges

This commit is contained in:
daanx 2024-12-17 11:54:26 -08:00
parent adfeb1f6f2
commit c585753dce
3 changed files with 22 additions and 15 deletions

View file

@ -1398,14 +1398,8 @@ static void mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_c
const size_t size = mi_size_of_slices(slice_count);
void* const p = mi_arena_slice_start(arena, slice_index);
bool needs_recommit = false; // reset needs no recommit, decommit does need it
if (mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)) {
// all slices are committed, we can purge the entire range
needs_recommit = _mi_os_purge(p, size);
}
else {
mi_assert_internal(false); // can this happen?
}
const bool all_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count);
const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed);
// update committed bitmap
if (needs_recommit) {
@ -1450,11 +1444,13 @@ static bool mi_arena_try_purge_range(mi_arena_t* arena, size_t slice_index, size
if (mi_bitmap_try_clearN(arena->slices_free, slice_index, slice_count)) {
// purge
mi_arena_purge(arena, slice_index, slice_count);
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_committed, slice_index, slice_count));
// and reset the free range
mi_bitmap_setN(arena->slices_free, slice_index, slice_count, NULL);
return true;
}
else {
// was allocated again already
return false;
}
}
@ -1463,12 +1459,15 @@ static bool mi_arena_try_purge_visitor(size_t slice_index, size_t slice_count, m
mi_purge_visit_info_t* vinfo = (mi_purge_visit_info_t*)arg;
// try to purge: first claim the free blocks
if (mi_arena_try_purge_range(arena, slice_index, slice_count)) {
vinfo->any_purged = true;
vinfo->any_purged = true;
vinfo->all_purged = true;
}
else {
// failed to claim the full range, try per slice instead
for (size_t i = 0; i < slice_count; i++) {
vinfo->any_purged = vinfo->any_purged || mi_arena_try_purge_range(arena, slice_index + i, 1);
const bool purged = mi_arena_try_purge_range(arena, slice_index + i, 1);
vinfo->any_purged = vinfo->any_purged || purged;
vinfo->all_purged = vinfo->all_purged && purged;
}
}
// done: clear the purge bits

View file

@ -81,7 +81,7 @@ static inline mi_bfield_t mi_bfield_mask(size_t bit_count, size_t shiftl) {
// Set a bit atomically. Returns `true` if the bit transitioned from 0 to 1
static inline bool mi_bfield_atomic_set(_Atomic(mi_bfield_t)*b, size_t idx) {
mi_assert_internal(idx < MI_BFIELD_BITS);
const mi_bfield_t mask = mi_bfield_one()<<idx;
const mi_bfield_t mask = mi_bfield_mask(1, idx);;
const mi_bfield_t old = mi_atomic_or_acq_rel(b, mask);
return ((old&mask) == 0);
}
@ -90,7 +90,7 @@ static inline bool mi_bfield_atomic_set(_Atomic(mi_bfield_t)*b, size_t idx) {
// `all_clear` is set if the new bfield is zero.
static inline bool mi_bfield_atomic_clear(_Atomic(mi_bfield_t)*b, size_t idx, bool* all_clear) {
mi_assert_internal(idx < MI_BFIELD_BITS);
const mi_bfield_t mask = mi_bfield_one()<<idx;
const mi_bfield_t mask = mi_bfield_mask(1, idx);;
mi_bfield_t old = mi_atomic_and_acq_rel(b, ~mask);
if (all_clear != NULL) { *all_clear = ((old&~mask)==0); }
return ((old&mask) == mask);
@ -101,7 +101,7 @@ static inline bool mi_bfield_atomic_clear(_Atomic(mi_bfield_t)*b, size_t idx, bo
// happen almost never (and is accounted for in the stats)
static inline void mi_bfield_atomic_clear_once_set(_Atomic(mi_bfield_t)*b, size_t idx) {
mi_assert_internal(idx < MI_BFIELD_BITS);
const mi_bfield_t mask = mi_bfield_one()<<idx;
const mi_bfield_t mask = mi_bfield_mask(1, idx);;
mi_bfield_t old = mi_atomic_load_relaxed(b);
do {
if mi_unlikely((old&mask) == 0) {
@ -1085,7 +1085,7 @@ bool mi_bitmap_is_xsetN(mi_xset_t set, mi_bitmap_t* bitmap, size_t idx, size_t n
#define mi_bfield_iterate(bfield,start,cycle,name_idx,SUF) { \
mi_assert_internal(start <= cycle); \
mi_assert_internal(start < MI_BFIELD_BITS); \
mi_assert_internal(cycle < MI_BFIELD_BITS); \
mi_assert_internal(cycle <= MI_BFIELD_BITS); \
mi_bfield_t _cycle_mask##SUF = mi_bfield_mask(cycle - start, start); \
size_t _bcount##SUF = mi_bfield_popcount(bfield); \
mi_bfield_t _b##SUF = bfield & _cycle_mask##SUF; /* process [start, cycle> first*/\
@ -1332,9 +1332,16 @@ bool _mi_bitmap_forall_set_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visi
for (size_t j = 0; j < MI_BCHUNK_FIELDS; j++) {
const size_t base_idx = (chunk_idx*MI_BCHUNK_BITS) + (j*MI_BFIELD_BITS);
mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[j]);
#if MI_DEBUG > 1
const size_t bpopcount = mi_popcount(b);
size_t rngcount = 0;
#endif
size_t bidx;
while (mi_bfield_find_least_bit(b, &bidx)) {
const size_t rng = mi_ctz(~(b>>bidx)); // all the set bits from bidx
#if MI_DEBUG > 1
rngcount += rng;
#endif
mi_assert_internal(rng>=1 && rng<=MI_BFIELD_BITS);
const size_t idx = base_idx + bidx;
mi_assert_internal((idx % MI_BFIELD_BITS) + rng <= MI_BFIELD_BITS);
@ -1343,6 +1350,7 @@ bool _mi_bitmap_forall_set_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visi
// clear rng bits in b
b = b & ~mi_bfield_mask(rng, bidx);
}
mi_assert_internal(rngcount == bpopcount);
}
}
}

View file

@ -262,7 +262,7 @@ static void test_stress(void) {
#if !defined(NDEBUG) || defined(MI_TSAN)
if ((n + 1) % 10 == 0) {
printf("- iterations left: %3d\n", ITER - (n + 1));
//mi_debug_show_arenas(true, false, false);
mi_debug_show_arenas(true, false, false);
//mi_collect(true);
//mi_debug_show_arenas(true, false, false);
}