merge from dev-trace

This commit is contained in:
Daan Leijen 2021-12-10 11:54:41 -08:00
commit d86fc87fa1
7 changed files with 168 additions and 94 deletions

View file

@ -130,7 +130,8 @@ if(MI_DEBUG_TRACE)
set(MI_DEBUG_TRACE OFF) set(MI_DEBUG_TRACE OFF)
else() else()
message(STATUS "Enable allocation trace in each heap block (MI_DEBUG_TRACE=ON)") message(STATUS "Enable allocation trace in each heap block (MI_DEBUG_TRACE=ON)")
list(APPEND mi_defines MI_DEBUG_TRACE=1) list(APPEND mi_defines MI_DEBUG_TRACE=1)
set(CMAKE_ENABLE_EXPORTS TRUE)
endif() endif()
endif() endif()
@ -296,6 +297,7 @@ else()
endif() endif()
message(STATUS "Compiler flags : ${mi_cflags}") message(STATUS "Compiler flags : ${mi_cflags}")
message(STATUS "Compiler defines : ${mi_defines}") message(STATUS "Compiler defines : ${mi_defines}")
message(STATUS "Link libraries : ${mi_libraries}")
message(STATUS "Build targets : ${mi_build_targets}") message(STATUS "Build targets : ${mi_build_targets}")
message(STATUS "") message(STATUS "")

View file

@ -55,7 +55,7 @@ void _mi_error_message(int err, const char* fmt, ...);
#if MI_DEBUG_TRACE > 0 #if MI_DEBUG_TRACE > 0
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip); void _mi_stack_trace_capture(void** strace, size_t len, size_t skip);
void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail); void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail);
#endif #endif
// random.c // random.c
@ -152,6 +152,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block); bool _mi_free_delayed_block(mi_block_t* block);
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size); void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
void _mi_error_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg);
#if MI_DEBUG>1 #if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page); bool _mi_page_is_valid(mi_page_t* page);
@ -416,7 +417,7 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) {
----------------------------------------------------------- */ ----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + 2*sizeof(void*))); // +2 for the minimal padding (see MI_PAGES_DIRECT) mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE));
const size_t idx = _mi_wsize_from_size(size); const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT); mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx]; return heap->pages_free_direct[idx];
@ -484,7 +485,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
// Get the block size of a page (special case for huge objects) // Get the block size of a page (special case for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) { static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size; const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0); mi_assert_internal(bsize > 0);
if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) { if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
return bsize; return bsize;
} }
@ -680,7 +681,8 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
// check for free list corruption: is `next` at least in the same page? // check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned? // TODO: check if `next` is `page->block_size` aligned?
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) { if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); _mi_error_trace_with_predecessor(page, block, "free block");
_mi_error_message(EFAULT, "corrupted free list entry of size %zu at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
next = NULL; next = NULL;
} }
return next; return next;

View file

@ -60,7 +60,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_PADDING 1 #define MI_PADDING 1
#endif #endif
#if !defined(MI_DEBUG_TRACE) // store stack trace at each allocation #if !defined(MI_DEBUG_TRACE) // store stack trace at each allocation
#define MI_DEBUG_TRACE MI_DEBUG #define MI_DEBUG_TRACE MI_DEBUG
#endif #endif
@ -79,6 +79,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Encoded free lists allow detection of corrupted free lists // Encoded free lists allow detection of corrupted free lists
// and can detect buffer overflows, modify after free, and double `free`s. // and can detect buffer overflows, modify after free, and double `free`s.
// (It must be enabled if MI_PADDING is enabled as the same mechanism is used to encode the canary.)
#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) #if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0)
#define MI_ENCODE_FREELIST 1 #define MI_ENCODE_FREELIST 1
#endif #endif
@ -418,11 +419,20 @@ typedef struct mi_random_cxt_s {
} mi_random_ctx_t; } mi_random_ctx_t;
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows // If MI_PADDING is enabled, there is a padding structure at the end of the blocks to check for buffer overflows
// The full layout is of a block becomes:
//
// |--- data ---------|--- fill ----------|--- struct padding_s -----------------------------------------|
// |.. actual data .. | .. delta bytes .. | canary_lo | .. extra .. | canary | delta | .. stack trace .. |
//
// where the delta bytes are used to align the padding structure and to detect byte precise overflow.
// The `canary` is used to see if `delta` and `strace` are not corrupted, while `canary_lo` can
// detect overflow into the `extra` padding (where the stack trace could remain valid)
#if (MI_PADDING) #if (MI_PADDING)
typedef struct mi_padding_s { typedef struct mi_padding_s {
#if MI_PADDING_EXTRA > 0 #if MI_PADDING_EXTRA > 0
uint32_t canary_lo; uint32_t canary_lo; // extra canary to detect initial overflow
uint8_t extra[MI_PADDING_EXTRA]; uint8_t extra[MI_PADDING_EXTRA];
#endif #endif
uint32_t canary; // encoded block value to check validity of the delat (in case of overflow) uint32_t canary; // encoded block value to check validity of the delat (in case of overflow)
@ -431,7 +441,7 @@ typedef struct mi_padding_s {
void* strace[MI_DEBUG_TRACE_LEN]; // stack trace at allocation time void* strace[MI_DEBUG_TRACE_LEN]; // stack trace at allocation time
#endif #endif
} mi_padding_t; } mi_padding_t;
#define MI_PADDING_MINSIZE (8) // 2*sizeof(uint32_t) #define MI_PADDING_MINSIZE (8) // 2*sizeof(uint32_t)
#define MI_PADDING_SIZE (sizeof(mi_padding_t)) #define MI_PADDING_SIZE (sizeof(mi_padding_t))
#else #else
#define MI_PADDING_MINSIZE (0) #define MI_PADDING_MINSIZE (0)
@ -439,6 +449,8 @@ typedef struct mi_padding_s {
#endif #endif
// add 2 more for minimal padding (MI_PADDING && !MI_DEBUG_TRACE && MI_PADDING_EXTRA==0) // add 2 more for minimal padding (MI_PADDING && !MI_DEBUG_TRACE && MI_PADDING_EXTRA==0)
// since this is used in secure mode, we optimize this case by allowing
// `heap_malloc_small` to also work with `MI_WSMALL_SIZE_MAX + MI_PADDING_MINSIZE` sizes.
// see `init.c` where all are initialized with an empty page and the check at `heap_malloc_small`. // see `init.c` where all are initialized with an empty page and the check at `heap_malloc_small`.
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + 1 + 2) #define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + 1 + 2)

View file

@ -66,7 +66,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
memset(padding->extra, 0, sizeof(padding->extra)); memset(padding->extra, 0, sizeof(padding->extra));
#endif #endif
#if (MI_DEBUG_TRACE) #if (MI_DEBUG_TRACE)
_mi_stack_trace_capture(padding->strace, MI_DEBUG_TRACE_LEN, 2); _mi_stack_trace_capture(padding->strace, MI_DEBUG_TRACE_LEN, 2 /*frames to skip*/);
#endif #endif
uint8_t* fill = (uint8_t*)padding - delta; uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
@ -88,7 +88,7 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
} }
#endif #endif
#if (MI_PADDING_EXTRA > 0 || MI_DEBUG_TRACE > 0) #if (MI_PADDING_EXTRA > 0 || MI_DEBUG_TRACE > 0)
// with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX, so we need to check // with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE, so we need an extra check
if (size + MI_PADDING_SIZE > MI_SMALL_SIZE_MAX) { if (size + MI_PADDING_SIZE > MI_SMALL_SIZE_MAX) {
p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);
} }
@ -114,7 +114,7 @@ extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexce
// The main allocation function // The main allocation function
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
if (mi_likely(size + MI_PADDING_SIZE - MI_PADDING_MINSIZE <= MI_SMALL_SIZE_MAX)) { if (mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE)) {
return mi_heap_malloc_small(heap, size); return mi_heap_malloc_small(heap, size);
} }
else else
@ -182,59 +182,13 @@ mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
} }
// ------------------------------------------------------
// Check for double free in secure and debug mode
// This is somewhat expensive so only enabled for secure mode 4
// ------------------------------------------------------
#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
// linear check if the free list contains a specific element
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
while (list != NULL) {
if (elem==list) return true;
list = mi_block_next(page, list);
}
return false;
}
static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
// The decoded value is in the same page (or NULL).
// Walk the free lists to verify positively if it is already freed
if (mi_list_contains(page, page->free, block) ||
mi_list_contains(page, page->local_free, block) ||
mi_list_contains(page, mi_page_thread_free(page), block))
{
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
return true;
}
return false;
}
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
{
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
// (continue in separate function to improve code generation)
return mi_check_is_double_freex(page, block);
}
return false;
}
#else
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page);
MI_UNUSED(block);
return false;
}
#endif
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Check for heap block overflow by setting up padding at the end of the block // Check for heap block overflow by setting up padding at the end of the block
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) #if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
static mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { static mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
if (page->capacity == 0) return NULL; // page may have been freed in double free check
*bsize = mi_page_usable_block_size(page); *bsize = mi_page_usable_block_size(page);
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
*delta = padding->delta; *delta = padding->delta;
@ -247,12 +201,12 @@ static mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_bloc
} }
#if MI_DEBUG_TRACE > 0 #if MI_DEBUG_TRACE > 0
static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block) { static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) {
size_t bsize; size_t bsize;
size_t delta; size_t delta;
const mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize); mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize);
if (padding != NULL) { if (padding != NULL) {
_mi_stack_trace_print(padding->strace, MI_DEBUG_TRACE_LEN, block, bsize, bsize - delta); _mi_stack_trace_print(msg, &padding->strace[0], MI_DEBUG_TRACE_LEN, block, bsize, bsize - delta);
} }
} }
#else #else
@ -261,7 +215,7 @@ static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block) {
} }
#endif #endif
// Return the exact usable size of a block. // Return the exact usable size of a block. (whereas `mi_page_usable_block_size` returns the total available size without padding)
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
size_t bsize; size_t bsize;
size_t delta; size_t delta;
@ -298,8 +252,8 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
size_t size; size_t size;
size_t wrong; size_t wrong;
if (!mi_verify_padding(page,block,&size,&wrong)) { if (mi_unlikely(!mi_verify_padding(page,block,&size,&wrong))) {
_mi_error_trace(page, block); _mi_error_trace(page, block, NULL);
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
} }
} }
@ -316,29 +270,99 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
if (padding == NULL) return; if (padding == NULL) return;
if ((bsize - delta) >= min_size) return; // usually already enough space if ((bsize - delta) >= min_size) return; // usually already enough space
mi_assert_internal(bsize >= min_size); mi_assert_internal(bsize >= min_size);
if (bsize < min_size) return; // should never happen if (bsize < min_size) return; // should never happen
size_t new_delta = (bsize - min_size); size_t new_delta = (bsize - min_size);
mi_assert_internal(new_delta < bsize); mi_assert_internal(new_delta < bsize);
padding->delta = (uint32_t)new_delta; padding->delta = (uint32_t)new_delta;
} }
#else #else
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page); MI_UNUSED(page); MI_UNUSED(block);
MI_UNUSED(block);
} }
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(block); MI_UNUSED(block);
return mi_page_usable_block_size(page); return mi_page_usable_block_size(page);
} }
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
MI_UNUSED(page); MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(min_size);
MI_UNUSED(block); }
MI_UNUSED(min_size);
static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) {
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(msg);
} }
#endif #endif
static const mi_block_t* mi_block_predecessor(const mi_page_t* page, const mi_block_t* block) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0);
if (bsize >= MI_HUGE_BLOCK_SIZE) return NULL;
const mi_block_t* prev = (const mi_block_t*)((uint8_t*)block - bsize);
uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, NULL);
if (pstart > (uint8_t*)prev) return NULL;
return prev;
}
// Used if a free list is corrupted which is usually caused by the previous block(s)
void _mi_error_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg) {
const mi_block_t* prev = mi_block_predecessor(page,block);
if (prev != NULL) {
_mi_error_trace(page, prev, "predecessor block");
}
_mi_error_trace(page, block, msg);
}
// ------------------------------------------------------
// Check for double free in secure and debug mode
// This is somewhat expensive so only enabled for secure mode 4
// ------------------------------------------------------
#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
// linear check if the free list contains a specific element
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
while (list != NULL) {
if (elem==list) return true;
list = mi_block_next(page, list);
}
return false;
}
static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
// The decoded value is in the same page (or NULL).
// Walk the free lists to verify positively if it is already freed
if (mi_list_contains(page, page->free, block) ||
mi_list_contains(page, page->local_free, block) ||
mi_list_contains(page, mi_page_thread_free(page), block))
{
_mi_error_trace(page, block, NULL);
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_usable_size_of(page,block));
return true;
}
return false;
}
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
{
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
// (continue in separate function to improve code generation)
return mi_check_is_double_freex(page, block);
}
return false;
}
#else
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page);
MI_UNUSED(block);
return false;
}
#endif
// only maintain stats for smaller objects if requested // only maintain stats for smaller objects if requested
#if (MI_STAT>0) #if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
@ -394,7 +418,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_check_padding(page, block); mi_check_padding(page, block);
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_usable_size(block)); memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
#endif #endif
// huge page segments are always abandoned and can be freed immediately // huge page segments are always abandoned and can be freed immediately
@ -450,10 +474,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// and push it on the free list // and push it on the free list
if (mi_likely(local)) { if (mi_likely(local)) {
// owning thread can free a block directly // owning thread can free a block directly
mi_check_padding(page, block);
if (mi_unlikely(mi_check_is_double_free(page, block))) return; if (mi_unlikely(mi_check_is_double_free(page, block))) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
#endif #endif
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
@ -532,11 +556,11 @@ void mi_free(void* p) mi_attr_noexcept
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned // local, and not full or aligned
mi_block_t* block = (mi_block_t*)(p); mi_block_t* block = (mi_block_t*)(p);
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
mi_check_padding(page, block); mi_check_padding(page, block);
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_stat_free(page, block); mi_stat_free(page, block);
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
#endif #endif
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
@ -677,7 +701,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start); memset((uint8_t*)newp + start, 0, newsize - start);
} }
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); if (mi_likely((uintptr_t)p % MI_INTPTR_SIZE == 0)) {
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
}
else {
_mi_memcpy(newp, p, (newsize > size ? size : newsize));
}
mi_free(p); // only free if successful mi_free(p); // only free if successful
} }
return newp; return newp;

View file

@ -360,8 +360,9 @@ void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
#include <dbghelp.h> #include <dbghelp.h>
#pragma comment(lib,"dbghelp") #pragma comment(lib,"dbghelp")
void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
_mi_fprintf(NULL, NULL, "trace for block %p of size %zu (%zu total in block), allocated at:\n", block, avail, bsize); _mi_fprintf(NULL, NULL, "trace %s at %p of size %zu (%zub total available), backtrace:\n",
(msg==NULL ? "block" : msg), block, avail, bsize);
HANDLE current_process = GetCurrentProcess(); HANDLE current_process = GetCurrentProcess();
SymInitialize(current_process, NULL, TRUE); SymInitialize(current_process, NULL, TRUE);
PSYMBOL_INFO info = (PSYMBOL_INFO)_malloca(sizeof(SYMBOL_INFO) + 256 * sizeof(TCHAR)); PSYMBOL_INFO info = (PSYMBOL_INFO)_malloca(sizeof(SYMBOL_INFO) + 256 * sizeof(TCHAR));
@ -371,10 +372,10 @@ void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block
info->SizeOfStruct = sizeof(SYMBOL_INFO); info->SizeOfStruct = sizeof(SYMBOL_INFO);
for (size_t i = 0; i < len && strace[i] != NULL; i++) { for (size_t i = 0; i < len && strace[i] != NULL; i++) {
if (SymFromAddr(current_process, (DWORD64)(strace[i]), 0, info)) { if (SymFromAddr(current_process, (DWORD64)(strace[i]), 0, info)) {
_mi_fprintf(NULL, NULL, " frame %2zu: %8p: %s\n", i, strace[i], info->Name); _mi_fprintf(NULL, NULL, " %2zu: %8p: %s\n", i, strace[i], info->Name);
} }
else { else {
_mi_fprintf(NULL, NULL, " frame %2zu: %8p: <unknown address: error: 0x%04x>\n", i, strace[i], GetLastError()); _mi_fprintf(NULL, NULL, " %2zu: %8p: <unknown address: error: 0x%04x>\n", i, strace[i], GetLastError());
} }
} }
} }
@ -383,30 +384,31 @@ void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block
#define MI_TRACE_LEN (64) #define MI_TRACE_LEN (64)
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) { void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
if (_mi_preloading()) return; if (_mi_preloading()) return;
if (!mi_recurse_enter()) return; if (!mi_recurse_enter()) return; // needed for pthreads
void* trace[MI_TRACE_LEN]; void* trace[MI_TRACE_LEN];
backtrace(trace, MI_TRACE_LEN); backtrace(trace, MI_TRACE_LEN);
skip += 4;
for (size_t i = 0; i < len; i++) { for (size_t i = 0; i < len; i++) {
void* p = (i + skip < MI_TRACE_LEN ? trace[i+skip] : NULL); void* p = (i + skip < MI_TRACE_LEN ? trace[i+skip] : NULL);
strace[i] = p; strace[i] = p;
} }
mi_recurse_exit(); mi_recurse_exit();
} }
void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
_mi_fprintf(NULL, NULL, "trace for block %p of size %zu (%zu total in block), allocated at:\n", block, avail, bsize); _mi_fprintf(NULL, NULL, "trace %s at %p of size %zu (%zub total available), backtrace:\n",
char** names = backtrace_symbols((void**)strace, len); (msg==NULL ? "block" : msg), block, avail, bsize);
char** names = backtrace_symbols(strace, len);
for (size_t i = 0; i < len && strace[i] != NULL; i++) { for (size_t i = 0; i < len && strace[i] != NULL; i++) {
_mi_fprintf(NULL, NULL, " frame %2zu: %8p: %s\n", i, strace[i], (names[i] == NULL ? "<unknown>" : names[i])); _mi_fprintf(NULL, NULL, " %2zu: %8p: %s\n", i, strace[i], (names == NULL || names[i] == NULL ? "<unknown>" : names[i]));
} }
// free(names); // avoid potential recursion and leak the trace
} }
#else #else
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) { void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(skip); MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(skip);
} }
void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(block); MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(block);
MI_UNUSED(bsize); MI_UNUSED(avail); MI_UNUSED(bsize); MI_UNUSED(avail); MI_UNUSED(msg);
} }
#endif #endif

View file

@ -15,6 +15,7 @@ if (NOT CMAKE_BUILD_TYPE)
endif() endif()
endif() endif()
# Import mimalloc (if installed) # Import mimalloc (if installed)
find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH)
message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR}") message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR}")

View file

@ -176,7 +176,9 @@ void mi_bins() {
static void double_free1(); static void double_free1();
static void double_free2(); static void double_free2();
static void corrupt_free(); static void double_free3();
static void corrupt_free1();
static void corrupt_free2();
static void block_overflow1(); static void block_overflow1();
static void block_overflow2(); static void block_overflow2();
static void invalid_free(); static void invalid_free();
@ -192,7 +194,9 @@ int main() {
// detect double frees and heap corruption // detect double frees and heap corruption
// double_free1(); // double_free1();
// double_free2(); // double_free2();
// corrupt_free(); // double_free3();
// corrupt_free1();
// corrupt_free2();
// block_overflow1(); // block_overflow1();
// block_overflow2(); // block_overflow2();
// test_aslr(); // test_aslr();
@ -281,13 +285,35 @@ static void double_free2() {
fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432); fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432);
} }
static void double_free3() {
void* p1 = malloc(32);
void* p2 = malloc(32);
void* p3 = malloc(32);
free(p2);
free(p1);
free(p2);
free(p3);
}
static void corrupt_free1() {
void* p1 = malloc(32);
void* p2 = malloc(32);
void* p3 = malloc(32);
free(p2);
memset(p2, 0, 8); // corrupt free list entry
mi_collect(true);
p2 = malloc(32); // should trigger corrupted free list
free(p1);
free(p2);
free(p3);
}
// Try to corrupt the heap through buffer overflow // Try to corrupt the heap through buffer overflow
#define N 256 #define N 256
#define SZ 64 #define SZ 64
#define OVF_SZ 32 #define OVF_SZ 32
static void corrupt_free() { static void corrupt_free2() {
void* p[N]; void* p[N];
// allocate // allocate
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {