diff --git a/src/arena.c b/src/arena.c index 7f472606..2fafd0bf 100644 --- a/src/arena.c +++ b/src/arena.c @@ -237,7 +237,9 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar } // set the dirty bits (todo: no need for an atomic op here?) - memid->was_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); + if (arena->memid.was_zero && arena->blocks_dirty != NULL) { + memid->was_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); + } // set commit state if (arena->blocks_committed == NULL) { @@ -373,7 +375,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); - if (p != NULL) return p; + if (p != NULL) return p; // otherwise, try to first eagerly reserve a new arena if (req_arena_id == _mi_arena_id_none()) { @@ -399,7 +401,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset } else { return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); - } + } } void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) diff --git a/src/os.c b/src/os.c index 88204a9d..48d25446 100644 --- a/src/os.c +++ b/src/os.c @@ -144,6 +144,7 @@ static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_st mi_assert_internal((size % _mi_os_page_size()) == 0); if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) int err = _mi_prim_free(addr, size); + mi_track_mem_noaccess(addr,size); if (err != 0) { _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); } @@ -207,7 +208,12 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo _mi_stat_increase(&stats->reserved, size); if (commit) { _mi_stat_increase(&stats->committed, size); - mi_track_mem_defined(p,size); // seems needed for asan (or `mimalloc-test-api` fails) + // seems needed for asan (or `mimalloc-test-api` fails) + if (*is_zero) { mi_track_mem_defined(p,size); } + else { mi_track_mem_undefined(p,size); } + } + else { + mi_track_mem_noaccess(p,size); } } return p; @@ -272,7 +278,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); } // we can return the aligned pointer on `mmap` (and sbrk) systems p = aligned_p; - *base = aligned_p; // since we freed the pre part, `*base == p`. + *base = aligned_p; // since we freed the pre part, `*base == p`. } } @@ -406,7 +412,10 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats if (os_is_zero && is_zero != NULL) { *is_zero = true; mi_assert_expensive(mi_mem_is_zero(start, csize)); - } + } + // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails) + if (os_is_zero) { mi_track_mem_defined(start,csize); } + else { mi_track_mem_undefined(start,csize); } return true; } @@ -428,6 +437,7 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_ _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); } mi_assert_internal(err == 0); + mi_track_mem_noaccess(start,csize); return (err == 0); } @@ -457,9 +467,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { if (err != 0) { _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); } - else { - mi_track_mem_undefined(start, csize); - } + mi_track_mem_undefined(start,csize); return (err == 0); } @@ -508,6 +516,8 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) { if (err != 0) { _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize); } + if (protect) { mi_track_mem_noaccess(start,csize); } + else { mi_track_mem_undefined(start,csize); } return (err == 0); } @@ -630,6 +640,8 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse *memid = _mi_memid_create_os(true /* is committed */, all_zero, true /* is_large */); memid->memkind = MI_MEM_OS_HUGE; mi_assert(memid->is_pinned); + if (all_zero) { mi_track_mem_defined(start,size); } + else { mi_track_mem_undefined(start,size); } } return (page == 0 ? NULL : start); } diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c index 5da5f96b..71f16915 100644 --- a/src/prim/unix/prim.c +++ b/src/prim/unix/prim.c @@ -190,7 +190,11 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p if (hint != NULL) { p = mmap(hint, size, protect_flags, flags, fd, 0); if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? + int err = 0; + #else int err = errno; + #endif _mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint); } if (p!=MAP_FAILED) return p; diff --git a/src/segment.c b/src/segment.c index a08fd3de..442b187a 100644 --- a/src/segment.c +++ b/src/segment.c @@ -878,12 +878,17 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi if (segment == NULL) return NULL; // zero the segment info? -- not always needed as it may be zero initialized from the OS + ptrdiff_t ofs = offsetof(mi_segment_t, next); + size_t prefix = offsetof(mi_segment_t, slices) - ofs; + size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more if (!segment->memid.was_zero) { - ptrdiff_t ofs = offsetof(mi_segment_t, next); - size_t prefix = offsetof(mi_segment_t, slices) - ofs; - size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more _mi_memzero((uint8_t*)segment + ofs, zsize); } + else { + mi_track_mem_defined((uint8_t*)segment + ofs,zsize); + mi_assert(mi_mem_is_zero((uint8_t*)segment + ofs, zsize)); + } + // initialize the rest of the segment info const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);