mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 14:39:31 +03:00
fix potential race on subproc field in the segment
This commit is contained in:
parent
76b0873ce2
commit
b1188ea336
5 changed files with 19 additions and 21 deletions
|
@ -397,9 +397,10 @@ typedef struct mi_segment_s {
|
||||||
bool allow_decommit;
|
bool allow_decommit;
|
||||||
bool allow_purge;
|
bool allow_purge;
|
||||||
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
|
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
|
||||||
|
mi_subproc_t* subproc; // segment belongs to sub process
|
||||||
|
|
||||||
// segment fields
|
// segment fields
|
||||||
struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
|
struct mi_segment_s* next; // must be the first (non-constant) segment field -- see `segment.c:segment_init`
|
||||||
struct mi_segment_s* prev;
|
struct mi_segment_s* prev;
|
||||||
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
|
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
|
||||||
|
|
||||||
|
@ -410,7 +411,6 @@ typedef struct mi_segment_s {
|
||||||
size_t capacity; // count of available pages (`#free + used`)
|
size_t capacity; // count of available pages (`#free + used`)
|
||||||
size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
|
size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
|
||||||
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
|
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
|
||||||
mi_subproc_t* subproc; // segment belongs to sub process
|
|
||||||
|
|
||||||
struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
|
struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
|
||||||
struct mi_segment_s* abandoned_os_prev;
|
struct mi_segment_s* abandoned_os_prev;
|
||||||
|
|
|
@ -162,8 +162,9 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment)
|
||||||
mi_arena_t* arena = mi_arena_from_index(arena_idx);
|
mi_arena_t* arena = mi_arena_from_index(arena_idx);
|
||||||
mi_assert_internal(arena != NULL);
|
mi_assert_internal(arena != NULL);
|
||||||
// set abandonment atomically
|
// set abandonment atomically
|
||||||
|
mi_subproc_t* const subproc = segment->subproc; // don't access the segment after setting it abandoned
|
||||||
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
||||||
if (was_unmarked) { mi_atomic_increment_relaxed(&segment->subproc->abandoned_count); }
|
if (was_unmarked) { mi_atomic_increment_relaxed(&subproc->abandoned_count); }
|
||||||
mi_assert_internal(was_unmarked);
|
mi_assert_internal(was_unmarked);
|
||||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ const mi_page_t _mi_page_empty = {
|
||||||
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
||||||
MI_ATOMIC_VAR_INIT(0), // xheap
|
MI_ATOMIC_VAR_INIT(0), // xheap
|
||||||
NULL, NULL
|
NULL, NULL
|
||||||
#if MI_INTPTR_SIZE==4
|
#if MI_INTPTR_SIZE==4
|
||||||
, { NULL }
|
, { NULL }
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -129,7 +129,7 @@ static mi_decl_cache_align mi_subproc_t mi_subproc_default;
|
||||||
|
|
||||||
static mi_decl_cache_align mi_tld_t tld_main = {
|
static mi_decl_cache_align mi_tld_t tld_main = {
|
||||||
0, false,
|
0, false,
|
||||||
&_mi_heap_main, &_mi_heap_main,
|
&_mi_heap_main, &_mi_heap_main,
|
||||||
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
|
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
|
||||||
0, 0, 0, 0, 0, &mi_subproc_default,
|
0, 0, 0, 0, 0, &mi_subproc_default,
|
||||||
&tld_main.stats, &tld_main.os
|
&tld_main.stats, &tld_main.os
|
||||||
|
@ -171,7 +171,7 @@ static void mi_heap_main_init(void) {
|
||||||
#endif
|
#endif
|
||||||
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
||||||
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
mi_lock_init(&mi_subproc_default.abandoned_os_lock);
|
mi_lock_init(&mi_subproc_default.abandoned_os_lock);
|
||||||
mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
|
mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,7 @@ static bool _mi_thread_heap_init(void) {
|
||||||
mi_heap_t* heap = &td->heap;
|
mi_heap_t* heap = &td->heap;
|
||||||
_mi_tld_init(tld, heap); // must be before `_mi_heap_init`
|
_mi_tld_init(tld, heap); // must be before `_mi_heap_init`
|
||||||
_mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */);
|
_mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */);
|
||||||
_mi_heap_set_default_direct(heap);
|
_mi_heap_set_default_direct(heap);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -512,7 +512,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
||||||
_mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats);
|
_mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
// called from `heap_collect`.
|
// called from `heap_collect`.
|
||||||
void _mi_segments_collect(bool force, mi_segments_tld_t* tld) {
|
void _mi_segments_collect(bool force, mi_segments_tld_t* tld) {
|
||||||
mi_pages_try_purge(force,tld);
|
mi_pages_try_purge(force,tld);
|
||||||
#if MI_DEBUG>=2
|
#if MI_DEBUG>=2
|
||||||
|
@ -563,6 +563,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
|
||||||
segment->allow_decommit = !memid.is_pinned;
|
segment->allow_decommit = !memid.is_pinned;
|
||||||
segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
|
segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
|
||||||
segment->segment_size = segment_size;
|
segment->segment_size = segment_size;
|
||||||
|
segment->subproc = tld->subproc;
|
||||||
mi_segments_track_size((long)(segment_size), tld);
|
mi_segments_track_size((long)(segment_size), tld);
|
||||||
_mi_segment_map_allocated_at(segment);
|
_mi_segment_map_allocated_at(segment);
|
||||||
return segment;
|
return segment;
|
||||||
|
@ -628,7 +629,6 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
||||||
segment->segment_info_size = pre_size;
|
segment->segment_info_size = pre_size;
|
||||||
segment->thread_id = _mi_thread_id();
|
segment->thread_id = _mi_thread_id();
|
||||||
segment->cookie = _mi_ptr_cookie(segment);
|
segment->cookie = _mi_ptr_cookie(segment);
|
||||||
segment->subproc = tld->subproc;
|
|
||||||
|
|
||||||
// set protection
|
// set protection
|
||||||
mi_segment_protect(segment, true, tld->os);
|
mi_segment_protect(segment, true, tld->os);
|
||||||
|
@ -896,7 +896,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||||
segment->abandoned--;
|
segment->abandoned--;
|
||||||
mi_assert(page->next == NULL);
|
mi_assert(page->next == NULL);
|
||||||
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
|
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
|
||||||
// get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap)
|
// get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap)
|
||||||
mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects
|
mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects
|
||||||
if (target_heap == NULL) {
|
if (target_heap == NULL) {
|
||||||
target_heap = heap;
|
target_heap = heap;
|
||||||
|
@ -961,7 +961,7 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||||
|
|
||||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
||||||
mi_segment_t* segment;
|
mi_segment_t* segment;
|
||||||
mi_arena_field_cursor_t current;
|
mi_arena_field_cursor_t current;
|
||||||
_mi_arena_field_cursor_init(heap, tld->subproc, true /* visit all, blocking */, ¤t);
|
_mi_arena_field_cursor_init(heap, tld->subproc, true /* visit all, blocking */, ¤t);
|
||||||
while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) {
|
while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) {
|
||||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||||
|
@ -989,7 +989,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
||||||
|
|
||||||
mi_segment_t* result = NULL;
|
mi_segment_t* result = NULL;
|
||||||
mi_segment_t* segment = NULL;
|
mi_segment_t* segment = NULL;
|
||||||
mi_arena_field_cursor_t current;
|
mi_arena_field_cursor_t current;
|
||||||
_mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t);
|
_mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t);
|
||||||
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
||||||
{
|
{
|
||||||
|
@ -1264,7 +1264,7 @@ static bool mi_segment_visit_page(mi_page_t* page, bool visit_blocks, mi_block_v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
|
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
|
||||||
for (size_t i = 0; i < segment->capacity; i++) {
|
for (size_t i = 0; i < segment->capacity; i++) {
|
||||||
mi_page_t* const page = &segment->pages[i];
|
mi_page_t* const page = &segment->pages[i];
|
||||||
if (page->segment_in_use) {
|
if (page->segment_in_use) {
|
||||||
|
|
|
@ -25,17 +25,14 @@ terms of the MIT license.
|
||||||
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
||||||
//
|
//
|
||||||
// argument defaults
|
// argument defaults
|
||||||
|
#if !defined(MI_TSAN)
|
||||||
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
||||||
static int SCALE = 25; // scaling factor
|
#else // with thread-sanitizer reduce the defaults for azure pipeline limits
|
||||||
|
static int THREADS = 8;
|
||||||
#if defined(MI_TSAN)
|
|
||||||
static int ITER = 10; // N full iterations destructing and re-creating all threads (on tsan reduce for azure pipeline limits)
|
|
||||||
#else
|
|
||||||
static int ITER = 50; // N full iterations destructing and re-creating all threads
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// static int THREADS = 8; // more repeatable if THREADS <= #processors
|
static int SCALE = 25; // scaling factor
|
||||||
// static int SCALE = 100; // scaling factor
|
static int ITER = 50; // N full iterations destructing and re-creating all threads
|
||||||
|
|
||||||
#define STRESS // undefine for leak test
|
#define STRESS // undefine for leak test
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue