merge from dev-track

This commit is contained in:
daan 2022-10-30 14:31:21 -07:00
commit 66525ccae3
15 changed files with 221 additions and 45 deletions

View file

@ -12,6 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc-internal.h"
#include "mimalloc-atomic.h"
#include <string.h> // memset, strlen
#include <stdlib.h> // malloc, exit
@ -37,15 +38,20 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
// allow use of the block internally
// note: when tracking we need to avoid ever touching the MI_PADDING since
// that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`)
mi_track_mem_undefined(block, mi_page_usable_block_size(page));
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
_mi_memzero_aligned(block, zsize);
const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size);
_mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
}
#if (MI_DEBUG>0)
if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size); }
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED
if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); }
#elif (MI_SECURE!=0)
if (!zero) { block->next = 0; } // don't leak internal data
#endif
@ -62,10 +68,13 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
#endif
#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
#if (MI_DEBUG>1)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
#endif
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
uint8_t* fill = (uint8_t*)padding - delta;
@ -94,6 +103,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
mi_track_malloc(p,size,zero);
return p;
}
@ -122,6 +132,7 @@ extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
mi_track_malloc(p,size,zero);
return p;
}
}
@ -176,16 +187,19 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
return false;
}
#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
bool is_double_free = false;
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
{
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
// (continue in separate function to improve code generation)
return mi_check_is_double_freex(page, block);
is_double_free = mi_check_is_double_freex(page, block);
}
return false;
return is_double_free;
}
#else
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
@ -199,12 +213,19 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
// Check for heap block overflow by setting up padding at the end of the block
// ---------------------------------------------------------------------------
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
mi_track_mem_defined(padding,sizeof(mi_padding_t));
*delta = padding->delta;
return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
uint32_t canary = padding->canary;
uintptr_t keys[2];
keys[0] = page->keys[0];
keys[1] = page->keys[1];
bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
return ok;
}
// Return the exact usable size of a block.
@ -212,7 +233,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
return (ok ? bsize - delta : 0);
}
@ -226,13 +247,16 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
*size = bsize - delta;
uint8_t* fill = (uint8_t*)block + bsize - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
mi_track_mem_defined(fill,maxpad);
for (size_t i = 0; i < maxpad; i++) {
if (fill[i] != MI_DEBUG_PADDING) {
*wrong = bsize - delta + i;
return false;
ok = false;
break;
}
}
return true;
mi_track_mem_noaccess(fill,maxpad);
return ok;
}
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
@ -331,14 +355,14 @@ static void mi_stat_huge_free(const mi_page_t* page) {
// Free
// ------------------------------------------------------
// multi-threaded free
// multi-threaded free (or free in huge block)
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
#if (MI_DEBUG!=0)
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif
@ -393,11 +417,12 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
// and push it on the free list
//const size_t bsize = mi_page_block_size(page);
if mi_likely(local) {
// owning thread can free a block directly
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_block_set_next(page, block, page->local_free);
@ -428,8 +453,9 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept {
mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
mi_stat_free(page, block);
_mi_free_block(page, local, block);
mi_stat_free(page, block); // stat_free may access the padding
mi_track_free(p);
_mi_free_block(page, local, block);
}
// Get the segment data belonging to a pointer
@ -481,20 +507,21 @@ void mi_free(void* p) mi_attr_noexcept
if mi_unlikely(mi_check_is_double_free(page,block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0)
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_track_free(p);
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
_mi_page_retire(page);
}
}
}
else {
// non-local, aligned blocks, or a full page; use the more generic path
// note: recalc page in generic to improve code generation
mi_free_generic(segment, tid == segment->thread_id, p);
}
}
}
bool _mi_free_delayed_block(mi_block_t* block) {
@ -627,10 +654,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
#if !MI_TRACK_ENABLED
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
// todo: adjust potential padding to reflect the new size?
return p; // reallocation still fits and not more than 50% waste
}
#endif
void* newp = mi_heap_malloc(heap,newsize);
if mi_likely(newp != NULL) {
if (zero && newsize > size) {