mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge from dev
This commit is contained in:
commit
063f25ba11
7 changed files with 61 additions and 17 deletions
|
@ -223,19 +223,13 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
|
|||
return p; // reallocation still fits, is aligned and not more than 50% waste
|
||||
}
|
||||
else {
|
||||
// note: we don't zero allocate upfront so we only zero initialize the expanded part
|
||||
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
|
||||
if (newp != NULL) {
|
||||
if (zero && newsize > size) {
|
||||
const mi_page_t* page = _mi_ptr_page(newp);
|
||||
if (page->free_is_zero) {
|
||||
// already zero initialized
|
||||
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
|
||||
}
|
||||
else {
|
||||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
|
||||
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
memset((uint8_t*)newp + start, 0, newsize - start);
|
||||
}
|
||||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
|
||||
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
_mi_memzero((uint8_t*)newp + start, newsize - start);
|
||||
}
|
||||
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
|
||||
mi_free(p); // only free if successful
|
||||
|
|
22
src/alloc.c
22
src/alloc.c
|
@ -37,6 +37,11 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
page->used++;
|
||||
page->free = mi_block_next(page, block);
|
||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||
#if MI_DEBUG>3
|
||||
if (page->free_is_zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
|
||||
}
|
||||
#endif
|
||||
|
||||
// allow use of the block internally
|
||||
// note: when tracking we need to avoid ever touching the MI_PADDING since
|
||||
|
@ -53,7 +58,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
}
|
||||
else {
|
||||
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
||||
|
@ -116,7 +121,11 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
if (zero && p != NULL) { mi_assert_internal(mi_mem_is_zero(p, size)); }
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
}
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -146,7 +155,11 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
|
|||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
if (zero && p != NULL) { mi_assert_internal(mi_mem_is_zero(p, size)); }
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
}
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
@ -709,6 +722,9 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
|
|||
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
_mi_memzero((uint8_t*)newp + start, newsize - start);
|
||||
}
|
||||
else if (newsize == 0) {
|
||||
((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
|
||||
}
|
||||
if mi_likely(p != NULL) {
|
||||
const size_t copysize = (newsize > size ? size : newsize);
|
||||
mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking..
|
||||
|
|
|
@ -66,6 +66,14 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
|||
if (p < start || p >= end) return false;
|
||||
p = mi_block_next(page, p);
|
||||
}
|
||||
#if MI_DEBUG>3 // generally too expensive to check this
|
||||
if (page->free_is_zero) {
|
||||
const size_t ubsize = mi_page_usable_block_size(page);
|
||||
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue