diff --git a/ide/vs2022/mimalloc-test-api.vcxproj b/ide/vs2022/mimalloc-test-api.vcxproj
index 6023c251..d9b9cae4 100644
--- a/ide/vs2022/mimalloc-test-api.vcxproj
+++ b/ide/vs2022/mimalloc-test-api.vcxproj
@@ -141,7 +141,14 @@
+
+ true
+ true
+ true
+ true
+
+ false
diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
index 2916483d..c298550a 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -243,7 +243,6 @@
-
diff --git a/ide/vs2022/mimalloc.vcxproj.filters b/ide/vs2022/mimalloc.vcxproj.filters
index 28ba20b1..b3cdb3b3 100644
--- a/ide/vs2022/mimalloc.vcxproj.filters
+++ b/ide/vs2022/mimalloc.vcxproj.filters
@@ -69,9 +69,6 @@
Headers
-
- Headers
-
Headers
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index 8d0c6f6a..1cd809f1 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -223,19 +223,13 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
return p; // reallocation still fits, is aligned and not more than 50% waste
}
else {
+ // note: we don't zero allocate upfront so we only zero initialize the expanded part
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
if (newp != NULL) {
if (zero && newsize > size) {
- const mi_page_t* page = _mi_ptr_page(newp);
- if (page->free_is_zero) {
- // already zero initialized
- mi_assert_expensive(mi_mem_is_zero(newp,newsize));
- }
- else {
- // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
- size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
- memset((uint8_t*)newp + start, 0, newsize - start);
- }
+ // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
+ size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
+ _mi_memzero((uint8_t*)newp + start, newsize - start);
}
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
mi_free(p); // only free if successful
diff --git a/src/alloc.c b/src/alloc.c
index bf0fd5ee..ffc1747d 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -37,6 +37,11 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->used++;
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
+ #if MI_DEBUG>3
+ if (page->free_is_zero) {
+ mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
+ }
+ #endif
// allow use of the block internally
// note: when tracking we need to avoid ever touching the MI_PADDING since
@@ -53,7 +58,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
else {
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
- }
+ }
}
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
@@ -116,7 +121,11 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
- if (zero && p != NULL) { mi_assert_internal(mi_mem_is_zero(p, size)); }
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
return p;
}
@@ -146,7 +155,11 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
- if (zero && p != NULL) { mi_assert_internal(mi_mem_is_zero(p, size)); }
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
return p;
}
}
@@ -709,6 +722,9 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
_mi_memzero((uint8_t*)newp + start, newsize - start);
}
+ else if (newsize == 0) {
+ ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
+ }
if mi_likely(p != NULL) {
const size_t copysize = (newsize > size ? size : newsize);
mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking..
diff --git a/src/page.c b/src/page.c
index d076c6cc..8ac0a715 100644
--- a/src/page.c
+++ b/src/page.c
@@ -66,6 +66,14 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
if (p < start || p >= end) return false;
p = mi_block_next(page, p);
}
+#if MI_DEBUG>3 // generally too expensive to check this
+ if (page->free_is_zero) {
+ const size_t ubsize = mi_page_usable_block_size(page);
+ for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
+ mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
+ }
+ }
+#endif
return true;
}
diff --git a/test/test-api.c b/test/test-api.c
index 829d7d35..8dd24e1b 100644
--- a/test/test-api.c
+++ b/test/test-api.c
@@ -46,6 +46,14 @@ bool test_heap2(void);
bool test_stl_allocator1(void);
bool test_stl_allocator2(void);
+bool mem_is_zero(uint8_t* p, size_t size) {
+ if (p==NULL) return false;
+ for (size_t i = 0; i < size; ++i) {
+ if (p[i] != 0) return false;
+ }
+ return true;
+}
+
// ---------------------------------------------------------------------------
// Main testing
// ---------------------------------------------------------------------------
@@ -232,6 +240,21 @@ int main(void) {
}
result = ok;
};
+ CHECK_BODY("zalloc-aligned-small1") {
+ size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
+ uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2);
+ result = mem_is_zero(p, zalloc_size);
+ mi_free(p);
+ };
+ CHECK_BODY("rezalloc_aligned-small1") {
+ size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
+ uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2);
+ result = mem_is_zero(p, zalloc_size);
+ zalloc_size *= 3;
+ p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2);
+ result = result && mem_is_zero(p, zalloc_size);
+ mi_free(p);
+ };
// ---------------------------------------------------
// Reallocation