mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-08 16:29:31 +03:00
merge from dev-abandon
This commit is contained in:
commit
e5d1cb3092
3 changed files with 51 additions and 40 deletions
|
@ -125,9 +125,16 @@ bool _mi_arena_contains(const void* p);
|
||||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
||||||
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
|
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
|
||||||
|
|
||||||
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid);
|
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid);
|
||||||
void _mi_arena_segment_mark_abandoned(mi_memid_t memid);
|
void _mi_arena_segment_mark_abandoned(mi_memid_t memid);
|
||||||
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_id_t* current_id, size_t* current_idx);
|
|
||||||
|
typedef struct mi_arena_field_cursor_s { // abstract
|
||||||
|
mi_arena_id_t start;
|
||||||
|
int count;
|
||||||
|
size_t bitmap_idx;
|
||||||
|
} mi_arena_field_cursor_t;
|
||||||
|
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current);
|
||||||
|
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous);
|
||||||
|
|
||||||
// "segment-map.c"
|
// "segment-map.c"
|
||||||
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
||||||
|
|
49
src/arena.c
49
src/arena.c
|
@ -741,6 +741,9 @@ bool _mi_arena_contains(const void* p) {
|
||||||
the arena bitmaps.
|
the arena bitmaps.
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
|
// Maintain these for debug purposes
|
||||||
|
static mi_decl_cache_align _Atomic(size_t)abandoned_count;
|
||||||
|
|
||||||
// reclaim a specific abandoned segment; `true` on success.
|
// reclaim a specific abandoned segment; `true` on success.
|
||||||
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
|
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
|
||||||
{
|
{
|
||||||
|
@ -751,11 +754,12 @@ bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
|
||||||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||||
mi_assert_internal(arena != NULL);
|
mi_assert_internal(arena != NULL);
|
||||||
bool was_abandoned = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
|
bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
|
||||||
// mi_assert_internal(was_abandoned);
|
if (was_marked) { mi_atomic_decrement_relaxed(&abandoned_count); }
|
||||||
mi_assert_internal(!was_abandoned || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
// mi_assert_internal(was_marked);
|
||||||
|
mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||||
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
||||||
return was_abandoned;
|
return was_marked;
|
||||||
}
|
}
|
||||||
|
|
||||||
// mark a specific segment as abandoned
|
// mark a specific segment as abandoned
|
||||||
|
@ -768,21 +772,33 @@ void _mi_arena_segment_mark_abandoned(mi_memid_t memid)
|
||||||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||||
mi_assert_internal(arena != NULL);
|
mi_assert_internal(arena != NULL);
|
||||||
const bool was_unset = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
||||||
MI_UNUSED_RELEASE(was_unset);
|
if (was_unmarked) { mi_atomic_increment_relaxed(&abandoned_count); }
|
||||||
mi_assert_internal(was_unset);
|
mi_assert_internal(was_unmarked);
|
||||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start a cursor at a randomized arena
|
||||||
|
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current) {
|
||||||
|
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||||
|
current->start = (max_arena == 0 ? 0 : (mi_arena_id_t)( _mi_heap_random_next(heap) % max_arena));
|
||||||
|
current->count = 0;
|
||||||
|
current->bitmap_idx = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// reclaim abandoned segments
|
// reclaim abandoned segments
|
||||||
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_id_t* previous_id, size_t* previous_idx )
|
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous )
|
||||||
{
|
{
|
||||||
const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count);
|
const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count);
|
||||||
int arena_idx = *previous_id;
|
if (max_arena <= 0 || mi_atomic_load_relaxed(&abandoned_count) == 0) return NULL;
|
||||||
size_t field_idx = mi_bitmap_index_field(*previous_idx);
|
|
||||||
size_t bit_idx = mi_bitmap_index_bit_in_field(*previous_idx) + 1;
|
int count = previous->count;
|
||||||
|
size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx);
|
||||||
|
size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx) + 1;
|
||||||
// visit arena's (from previous)
|
// visit arena's (from previous)
|
||||||
for( ; arena_idx < max_arena; arena_idx++, field_idx = 0, bit_idx = 0) {
|
for (; count < max_arena; count++, field_idx = 0, bit_idx = 0) {
|
||||||
|
mi_arena_id_t arena_idx = previous->start + count;
|
||||||
|
if (arena_idx >= max_arena) { arena_idx = arena_idx % max_arena; } // wrap around
|
||||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||||
if (arena != NULL) {
|
if (arena != NULL) {
|
||||||
// visit the abandoned fields (starting at previous_idx)
|
// visit the abandoned fields (starting at previous_idx)
|
||||||
|
@ -797,8 +813,9 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_id_t* previous_id,
|
||||||
mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
|
mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
|
||||||
// try to reclaim it atomically
|
// try to reclaim it atomically
|
||||||
if (_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) {
|
if (_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) {
|
||||||
*previous_idx = bitmap_idx;
|
mi_atomic_decrement_relaxed(&abandoned_count);
|
||||||
*previous_id = arena_idx;
|
previous->bitmap_idx = bitmap_idx;
|
||||||
|
previous->count = count;
|
||||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||||
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
||||||
return (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx);
|
return (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx);
|
||||||
|
@ -810,8 +827,8 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_id_t* previous_id,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// no more found
|
// no more found
|
||||||
*previous_idx = 0;
|
previous->bitmap_idx = 0;
|
||||||
*previous_id = 0;
|
previous->count = 0;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1042,10 +1042,6 @@ When a block is freed in an abandoned segment, the segment
|
||||||
is reclaimed into that thread.
|
is reclaimed into that thread.
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
// Maintain these for debug purposes
|
|
||||||
static mi_decl_cache_align _Atomic(size_t)abandoned_count;
|
|
||||||
|
|
||||||
|
|
||||||
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
|
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
|
||||||
void _mi_abandoned_await_readers(void) {
|
void _mi_abandoned_await_readers(void) {
|
||||||
// nothing needed
|
// nothing needed
|
||||||
|
@ -1082,7 +1078,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||||
mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
|
mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
|
||||||
segment->thread_id = 0;
|
segment->thread_id = 0;
|
||||||
segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
|
segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
|
||||||
_mi_arena_segment_mark_abandoned(segment->memid); mi_atomic_increment_relaxed(&abandoned_count);
|
_mi_arena_segment_mark_abandoned(segment->memid);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||||
|
@ -1226,7 +1222,6 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||||
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
|
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
|
||||||
if (_mi_arena_segment_clear_abandoned(segment->memid)) { // atomically unabandon
|
if (_mi_arena_segment_clear_abandoned(segment->memid)) { // atomically unabandon
|
||||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
|
||||||
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
|
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
|
||||||
mi_assert_internal(res == segment);
|
mi_assert_internal(res == segment);
|
||||||
return (res != NULL);
|
return (res != NULL);
|
||||||
|
@ -1236,10 +1231,8 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||||
|
|
||||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
||||||
mi_segment_t* segment;
|
mi_segment_t* segment;
|
||||||
mi_arena_id_t current_id = 0;
|
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t);
|
||||||
size_t current_idx = 0;
|
while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) {
|
||||||
while ((segment = _mi_arena_segment_clear_abandoned_next(¤t_id, ¤t_idx)) != NULL) {
|
|
||||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
|
||||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1248,12 +1241,10 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
{
|
{
|
||||||
*reclaimed = false;
|
*reclaimed = false;
|
||||||
mi_segment_t* segment;
|
mi_segment_t* segment;
|
||||||
mi_arena_id_t current_id = 0;
|
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap,¤t);
|
||||||
size_t current_idx = 0;
|
|
||||||
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 1024); // limit the work to bound allocation times
|
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 1024); // limit the work to bound allocation times
|
||||||
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t_id, ¤t_idx)) != NULL))
|
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
||||||
{
|
{
|
||||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
|
||||||
segment->abandoned_visits++;
|
segment->abandoned_visits++;
|
||||||
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
|
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
|
||||||
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
|
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
|
||||||
|
@ -1280,7 +1271,6 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
else {
|
else {
|
||||||
// otherwise, push on the visited list so it gets not looked at too quickly again
|
// otherwise, push on the visited list so it gets not looked at too quickly again
|
||||||
mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again
|
mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again
|
||||||
mi_atomic_increment_relaxed(&abandoned_count);
|
|
||||||
_mi_arena_segment_mark_abandoned(segment->memid);
|
_mi_arena_segment_mark_abandoned(segment->memid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1291,11 +1281,9 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_segment_t* segment;
|
mi_segment_t* segment;
|
||||||
mi_arena_id_t current_id = 0;
|
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t);
|
||||||
size_t current_idx = 0;
|
|
||||||
int max_tries = (force ? 16*1024 : 1024); // limit latency
|
int max_tries = (force ? 16*1024 : 1024); // limit latency
|
||||||
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t_id,¤t_idx)) != NULL)) {
|
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) {
|
||||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
|
||||||
mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
|
mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
|
||||||
if (segment->used == 0) {
|
if (segment->used == 0) {
|
||||||
// free the segment (by forced reclaim) to make it available to other threads.
|
// free the segment (by forced reclaim) to make it available to other threads.
|
||||||
|
@ -1307,7 +1295,6 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
||||||
// otherwise, purge if needed and push on the visited list
|
// otherwise, purge if needed and push on the visited list
|
||||||
// note: forced purge can be expensive if many threads are destroyed/created as in mstress.
|
// note: forced purge can be expensive if many threads are destroyed/created as in mstress.
|
||||||
mi_segment_try_purge(segment, force, tld->stats);
|
mi_segment_try_purge(segment, force, tld->stats);
|
||||||
mi_atomic_increment_relaxed(&abandoned_count);
|
|
||||||
_mi_arena_segment_mark_abandoned(segment->memid);
|
_mi_arena_segment_mark_abandoned(segment->memid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue