diff --git a/src/arena.c b/src/arena.c index fc8a79c6..c971c12e 100644 --- a/src/arena.c +++ b/src/arena.c @@ -749,6 +749,7 @@ size_t _mi_arena_segment_abandoned_count(void) { } // reclaim a specific abandoned segment; `true` on success. +// sets the thread_id. bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment ) { if (segment->memid.memkind != MI_MEM_ARENA) { @@ -783,6 +784,7 @@ bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment ) } // mark a specific segment as abandoned +// clears the thread_id. void _mi_arena_segment_mark_abandoned(mi_segment_t* segment) { mi_atomic_store_release(&segment->thread_id, 0); @@ -813,6 +815,7 @@ void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* curre } // reclaim abandoned segments +// this does not set the thread id (so it appears as still abandoned) mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous ) { const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count); @@ -845,7 +848,6 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* pr mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx); mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); - mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); return segment; } diff --git a/src/segment.c b/src/segment.c index 3db4e813..a50f0190 100644 --- a/src/segment.c +++ b/src/segment.c @@ -845,7 +845,9 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size, bool // set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full. static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) { if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; } - mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id()); + // can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free. + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id()); + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); segment->abandoned_visits = 0; mi_segments_track_size((long)segment->segment_size, tld); mi_assert_internal(segment->next == NULL && segment->prev == NULL);