mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 14:39:31 +03:00
maintain abandoned_count more robustly
This commit is contained in:
parent
1b3eb8ef28
commit
71bcf1c76b
2 changed files with 16 additions and 18 deletions
21
src/arena.c
21
src/arena.c
|
@ -738,6 +738,9 @@ bool _mi_arena_contains(const void* p) {
|
|||
the arena bitmaps.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Maintain these for debug purposes
|
||||
static mi_decl_cache_align _Atomic(size_t)abandoned_count;
|
||||
|
||||
// reclaim a specific abandoned segment; `true` on success.
|
||||
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
|
||||
{
|
||||
|
@ -748,11 +751,12 @@ bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
|
|||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
bool was_abandoned = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
|
||||
// mi_assert_internal(was_abandoned);
|
||||
mi_assert_internal(!was_abandoned || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
|
||||
if (was_marked) { mi_atomic_decrement_relaxed(&abandoned_count); }
|
||||
// mi_assert_internal(was_marked);
|
||||
mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
||||
return was_abandoned;
|
||||
return was_marked;
|
||||
}
|
||||
|
||||
// mark a specific segment as abandoned
|
||||
|
@ -765,15 +769,17 @@ void _mi_arena_segment_mark_abandoned(mi_memid_t memid)
|
|||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
const bool was_unset = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
||||
MI_UNUSED_RELEASE(was_unset);
|
||||
mi_assert_internal(was_unset);
|
||||
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
||||
if (was_unmarked) { mi_atomic_increment_relaxed(&abandoned_count); }
|
||||
mi_assert_internal(was_unmarked);
|
||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
}
|
||||
|
||||
// reclaim abandoned segments
|
||||
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_id_t* previous_id, size_t* previous_idx )
|
||||
{
|
||||
if (mi_atomic_load_relaxed(&abandoned_count) == 0) return false;
|
||||
|
||||
const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count);
|
||||
int arena_idx = *previous_id;
|
||||
size_t field_idx = mi_bitmap_index_field(*previous_idx);
|
||||
|
@ -794,6 +800,7 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_id_t* previous_id,
|
|||
mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
|
||||
// try to reclaim it atomically
|
||||
if (_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) {
|
||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
||||
*previous_idx = bitmap_idx;
|
||||
*previous_id = arena_idx;
|
||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
|
|
|
@ -753,10 +753,6 @@ by scanning the arena memory
|
|||
(segments outside arena memoryare only reclaimed by a free).
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Maintain these for debug purposes
|
||||
static mi_decl_cache_align _Atomic(size_t)abandoned_count;
|
||||
|
||||
|
||||
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
|
||||
void _mi_abandoned_await_readers(void) {
|
||||
// nothing needed
|
||||
|
@ -782,7 +778,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
mi_segments_track_size(-((long)segment->segment_size), tld);
|
||||
segment->thread_id = 0;
|
||||
segment->abandoned_visits = 0;
|
||||
_mi_arena_segment_mark_abandoned(segment->memid); mi_atomic_increment_relaxed(&abandoned_count);
|
||||
_mi_arena_segment_mark_abandoned(segment->memid);
|
||||
}
|
||||
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||
|
@ -905,7 +901,6 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
|||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
|
||||
if (_mi_arena_segment_clear_abandoned(segment->memid)) { // atomically unabandon
|
||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
||||
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
|
||||
mi_assert_internal(res == segment);
|
||||
return (res != NULL);
|
||||
|
@ -918,7 +913,6 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
|||
mi_arena_id_t current_id = 0;
|
||||
size_t current_idx = 0;
|
||||
while ((segment = _mi_arena_segment_clear_abandoned_next(¤t_id, ¤t_idx)) != NULL) {
|
||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||
}
|
||||
}
|
||||
|
@ -926,15 +920,13 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
|||
static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, bool* reclaimed, mi_segments_tld_t* tld)
|
||||
{
|
||||
*reclaimed = false;
|
||||
if (mi_atomic_load_relaxed(&abandoned_count) == 0) return NULL;
|
||||
|
||||
|
||||
mi_segment_t* segment;
|
||||
mi_arena_id_t current_id = 0;
|
||||
size_t current_idx = 0;
|
||||
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 1024); // limit the work to bound allocation times
|
||||
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t_id, ¤t_idx)) != NULL))
|
||||
{
|
||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
||||
segment->abandoned_visits++;
|
||||
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
|
||||
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
|
||||
|
@ -962,7 +954,6 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
|||
else {
|
||||
// otherwise, mark it back as abandoned
|
||||
// todo: reset delayed pages in the segment?
|
||||
mi_atomic_increment_relaxed(&abandoned_count);
|
||||
_mi_arena_segment_mark_abandoned(segment->memid);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue