From 76b0873ce2ebe3bc28543cb2293c0c62e4617243 Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 3 Jun 2024 20:28:47 -0700 Subject: [PATCH 1/2] fix asan tracking by explicitly setting memory to undefined before a free --- src/arena.c | 6 +++--- src/page.c | 2 +- test/test-stress.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/arena.c b/src/arena.c index 8a5e8952..0bd58aaa 100644 --- a/src/arena.c +++ b/src/arena.c @@ -627,6 +627,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi if (size==0) return; const bool all_committed = (committed_size == size); + // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) + mi_track_mem_undefined(p,size); + if (mi_memkind_is_os(memid.memkind)) { // was a direct OS allocation, pass through if (!all_committed && committed_size > 0) { @@ -656,9 +659,6 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi return; } - // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) - mi_track_mem_undefined(p,size); - // potentially decommit if (arena->memid.is_pinned || arena->blocks_committed == NULL) { mi_assert_internal(all_committed); diff --git a/src/page.c b/src/page.c index 5a18b780..96d1b24c 100644 --- a/src/page.c +++ b/src/page.c @@ -857,7 +857,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme // huge allocation? const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` if mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) { - if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) { + if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) { _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); return NULL; } diff --git a/test/test-stress.c b/test/test-stress.c index 675c54bc..2c031894 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -133,9 +133,9 @@ static void free_items(void* p) { custom_free(p); } -#ifdef HEAP_WALK +#ifdef HEAP_WALK static bool visit_blocks(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { - (void)(heap); (void)(area); + (void)(heap); (void)(area); size_t* total = (size_t*)arg; if (block != NULL) { *total += block_size; @@ -260,7 +260,7 @@ static void test_leak(void) { int main(int argc, char** argv) { #ifdef HEAP_WALK - mi_option_enable(mi_option_visit_abandoned); + mi_option_enable(mi_option_visit_abandoned); #endif #ifndef NDEBUG mi_option_set(mi_option_arena_reserve, 32 * 1024 /* in kib = 32MiB */); From b1188ea33624f407e580cb278cb896a42521273e Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 3 Jun 2024 20:57:00 -0700 Subject: [PATCH 2/2] fix potential race on subproc field in the segment --- include/mimalloc/types.h | 4 ++-- src/arena-abandoned.c | 3 ++- src/init.c | 8 ++++---- src/segment.c | 12 ++++++------ test/test-stress.c | 13 +++++-------- 5 files changed, 19 insertions(+), 21 deletions(-) diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 2fc83a30..31ed35f8 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -397,9 +397,10 @@ typedef struct mi_segment_s { bool allow_decommit; bool allow_purge; size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE` + mi_subproc_t* subproc; // segment belongs to sub process // segment fields - struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init` + struct mi_segment_s* next; // must be the first (non-constant) segment field -- see `segment.c:segment_init` struct mi_segment_s* prev; bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation) @@ -410,7 +411,6 @@ typedef struct mi_segment_s { size_t capacity; // count of available pages (`#free + used`) size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages. uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie` - mi_subproc_t* subproc; // segment belongs to sub process struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled struct mi_segment_s* abandoned_os_prev; diff --git a/src/arena-abandoned.c b/src/arena-abandoned.c index 3b1b1823..465a074d 100644 --- a/src/arena-abandoned.c +++ b/src/arena-abandoned.c @@ -162,8 +162,9 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment) mi_arena_t* arena = mi_arena_from_index(arena_idx); mi_assert_internal(arena != NULL); // set abandonment atomically + mi_subproc_t* const subproc = segment->subproc; // don't access the segment after setting it abandoned const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL); - if (was_unmarked) { mi_atomic_increment_relaxed(&segment->subproc->abandoned_count); } + if (was_unmarked) { mi_atomic_increment_relaxed(&subproc->abandoned_count); } mi_assert_internal(was_unmarked); mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); } diff --git a/src/init.c b/src/init.c index 08208ea7..ead5a147 100644 --- a/src/init.c +++ b/src/init.c @@ -34,7 +34,7 @@ const mi_page_t _mi_page_empty = { MI_ATOMIC_VAR_INIT(0), // xthread_free MI_ATOMIC_VAR_INIT(0), // xheap NULL, NULL - #if MI_INTPTR_SIZE==4 + #if MI_INTPTR_SIZE==4 , { NULL } #endif }; @@ -129,7 +129,7 @@ static mi_decl_cache_align mi_subproc_t mi_subproc_default; static mi_decl_cache_align mi_tld_t tld_main = { 0, false, - &_mi_heap_main, &_mi_heap_main, + &_mi_heap_main, &_mi_heap_main, { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats, &tld_main.os @@ -171,7 +171,7 @@ static void mi_heap_main_init(void) { #endif _mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main); _mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main); - _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main); + _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main); mi_lock_init(&mi_subproc_default.abandoned_os_lock); mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock); } @@ -341,7 +341,7 @@ static bool _mi_thread_heap_init(void) { mi_heap_t* heap = &td->heap; _mi_tld_init(tld, heap); // must be before `_mi_heap_init` _mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */); - _mi_heap_set_default_direct(heap); + _mi_heap_set_default_direct(heap); } return false; } diff --git a/src/segment.c b/src/segment.c index 7746bd0f..54a917ea 100644 --- a/src/segment.c +++ b/src/segment.c @@ -512,7 +512,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se _mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats); } -// called from `heap_collect`. +// called from `heap_collect`. void _mi_segments_collect(bool force, mi_segments_tld_t* tld) { mi_pages_try_purge(force,tld); #if MI_DEBUG>=2 @@ -563,6 +563,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme segment->allow_decommit = !memid.is_pinned; segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0); segment->segment_size = segment_size; + segment->subproc = tld->subproc; mi_segments_track_size((long)(segment_size), tld); _mi_segment_map_allocated_at(segment); return segment; @@ -628,7 +629,6 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, segment->segment_info_size = pre_size; segment->thread_id = _mi_thread_id(); segment->cookie = _mi_ptr_cookie(segment); - segment->subproc = tld->subproc; // set protection mi_segment_protect(segment, true, tld->os); @@ -896,7 +896,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, segment->abandoned--; mi_assert(page->next == NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); - // get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap) + // get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap) mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects if (target_heap == NULL) { target_heap = heap; @@ -961,7 +961,7 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) { void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { mi_segment_t* segment; - mi_arena_field_cursor_t current; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, tld->subproc, true /* visit all, blocking */, ¤t); while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) { mi_segment_reclaim(segment, heap, 0, NULL, tld); @@ -989,7 +989,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_segment_t* result = NULL; mi_segment_t* segment = NULL; - mi_arena_field_cursor_t current; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t); while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { @@ -1264,7 +1264,7 @@ static bool mi_segment_visit_page(mi_page_t* page, bool visit_blocks, mi_block_v } } -bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { +bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { for (size_t i = 0; i < segment->capacity; i++) { mi_page_t* const page = &segment->pages[i]; if (page->segment_in_use) { diff --git a/test/test-stress.c b/test/test-stress.c index 2c031894..2d1927cc 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -25,17 +25,14 @@ terms of the MIT license. // > mimalloc-test-stress [THREADS] [SCALE] [ITER] // // argument defaults +#if !defined(MI_TSAN) static int THREADS = 32; // more repeatable if THREADS <= #processors -static int SCALE = 25; // scaling factor - -#if defined(MI_TSAN) -static int ITER = 10; // N full iterations destructing and re-creating all threads (on tsan reduce for azure pipeline limits) -#else -static int ITER = 50; // N full iterations destructing and re-creating all threads +#else // with thread-sanitizer reduce the defaults for azure pipeline limits +static int THREADS = 8; #endif -// static int THREADS = 8; // more repeatable if THREADS <= #processors -// static int SCALE = 100; // scaling factor +static int SCALE = 25; // scaling factor +static int ITER = 50; // N full iterations destructing and re-creating all threads #define STRESS // undefine for leak test