diff --git a/CMakeLists.txt b/CMakeLists.txt index fc250fc0..f67d3323 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -130,7 +130,8 @@ if(MI_DEBUG_TRACE) set(MI_DEBUG_TRACE OFF) else() message(STATUS "Enable allocation trace in each heap block (MI_DEBUG_TRACE=ON)") - list(APPEND mi_defines MI_DEBUG_TRACE=1) + list(APPEND mi_defines MI_DEBUG_TRACE=1) + set(CMAKE_ENABLE_EXPORTS TRUE) endif() endif() @@ -296,6 +297,7 @@ else() endif() message(STATUS "Compiler flags : ${mi_cflags}") message(STATUS "Compiler defines : ${mi_defines}") +message(STATUS "Link libraries : ${mi_libraries}") message(STATUS "Build targets : ${mi_build_targets}") message(STATUS "") diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index c019b9b5..ffe17b56 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -55,7 +55,7 @@ void _mi_error_message(int err, const char* fmt, ...); #if MI_DEBUG_TRACE > 0 void _mi_stack_trace_capture(void** strace, size_t len, size_t skip); -void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail); +void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail); #endif // random.c @@ -152,6 +152,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); bool _mi_free_delayed_block(mi_block_t* block); void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size); +void _mi_error_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg); #if MI_DEBUG>1 bool _mi_page_is_valid(mi_page_t* page); @@ -416,7 +417,7 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) { ----------------------------------------------------------- */ static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { - mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + 2*sizeof(void*))); // +2 for the minimal padding (see MI_PAGES_DIRECT) + mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE)); const size_t idx = _mi_wsize_from_size(size); mi_assert_internal(idx < MI_PAGES_DIRECT); return heap->pages_free_direct[idx]; @@ -484,7 +485,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) { // Get the block size of a page (special case for huge objects) static inline size_t mi_page_block_size(const mi_page_t* page) { const size_t bsize = page->xblock_size; - mi_assert_internal(bsize > 0); + mi_assert_internal(bsize > 0); if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) { return bsize; } @@ -680,7 +681,8 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* // check for free list corruption: is `next` at least in the same page? // TODO: check if `next` is `page->block_size` aligned? if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) { - _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); + _mi_error_trace_with_predecessor(page, block, "free block"); + _mi_error_message(EFAULT, "corrupted free list entry of size %zu at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); next = NULL; } return next; diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index baabaa96..82ceaac4 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -60,7 +60,7 @@ terms of the MIT license. A copy of the license can be found in the file #define MI_PADDING 1 #endif -#if !defined(MI_DEBUG_TRACE) // store stack trace at each allocation +#if !defined(MI_DEBUG_TRACE) // store stack trace at each allocation #define MI_DEBUG_TRACE MI_DEBUG #endif @@ -79,6 +79,7 @@ terms of the MIT license. A copy of the license can be found in the file // Encoded free lists allow detection of corrupted free lists // and can detect buffer overflows, modify after free, and double `free`s. +// (It must be enabled if MI_PADDING is enabled as the same mechanism is used to encode the canary.) #if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) #define MI_ENCODE_FREELIST 1 #endif @@ -418,11 +419,20 @@ typedef struct mi_random_cxt_s { } mi_random_ctx_t; -// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows +// If MI_PADDING is enabled, there is a padding structure at the end of the blocks to check for buffer overflows +// The full layout is of a block becomes: +// +// |--- data ---------|--- fill ----------|--- struct padding_s -----------------------------------------| +// |.. actual data .. | .. delta bytes .. | canary_lo | .. extra .. | canary | delta | .. stack trace .. | +// +// where the delta bytes are used to align the padding structure and to detect byte precise overflow. +// The `canary` is used to see if `delta` and `strace` are not corrupted, while `canary_lo` can +// detect overflow into the `extra` padding (where the stack trace could remain valid) + #if (MI_PADDING) typedef struct mi_padding_s { #if MI_PADDING_EXTRA > 0 - uint32_t canary_lo; + uint32_t canary_lo; // extra canary to detect initial overflow uint8_t extra[MI_PADDING_EXTRA]; #endif uint32_t canary; // encoded block value to check validity of the delat (in case of overflow) @@ -431,7 +441,7 @@ typedef struct mi_padding_s { void* strace[MI_DEBUG_TRACE_LEN]; // stack trace at allocation time #endif } mi_padding_t; -#define MI_PADDING_MINSIZE (8) // 2*sizeof(uint32_t) +#define MI_PADDING_MINSIZE (8) // 2*sizeof(uint32_t) #define MI_PADDING_SIZE (sizeof(mi_padding_t)) #else #define MI_PADDING_MINSIZE (0) @@ -439,6 +449,8 @@ typedef struct mi_padding_s { #endif // add 2 more for minimal padding (MI_PADDING && !MI_DEBUG_TRACE && MI_PADDING_EXTRA==0) +// since this is used in secure mode, we optimize this case by allowing +// `heap_malloc_small` to also work with `MI_WSMALL_SIZE_MAX + MI_PADDING_MINSIZE` sizes. // see `init.c` where all are initialized with an empty page and the check at `heap_malloc_small`. #define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + 1 + 2) diff --git a/src/alloc.c b/src/alloc.c index 79d90a85..9c52f7d1 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -66,7 +66,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz memset(padding->extra, 0, sizeof(padding->extra)); #endif #if (MI_DEBUG_TRACE) - _mi_stack_trace_capture(padding->strace, MI_DEBUG_TRACE_LEN, 2); + _mi_stack_trace_capture(padding->strace, MI_DEBUG_TRACE_LEN, 2 /*frames to skip*/); #endif uint8_t* fill = (uint8_t*)padding - delta; const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes @@ -88,7 +88,7 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_ } #endif #if (MI_PADDING_EXTRA > 0 || MI_DEBUG_TRACE > 0) - // with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX, so we need to check + // with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE, so we need an extra check if (size + MI_PADDING_SIZE > MI_SMALL_SIZE_MAX) { p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); } @@ -114,7 +114,7 @@ extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexce // The main allocation function extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { - if (mi_likely(size + MI_PADDING_SIZE - MI_PADDING_MINSIZE <= MI_SMALL_SIZE_MAX)) { + if (mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE)) { return mi_heap_malloc_small(heap, size); } else @@ -182,59 +182,13 @@ mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { } -// ------------------------------------------------------ -// Check for double free in secure and debug mode -// This is somewhat expensive so only enabled for secure mode 4 -// ------------------------------------------------------ - -#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) -// linear check if the free list contains a specific element -static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { - while (list != NULL) { - if (elem==list) return true; - list = mi_block_next(page, list); - } - return false; -} - -static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { - // The decoded value is in the same page (or NULL). - // Walk the free lists to verify positively if it is already freed - if (mi_list_contains(page, page->free, block) || - mi_list_contains(page, page->local_free, block) || - mi_list_contains(page, mi_page_thread_free(page), block)) - { - _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); - return true; - } - return false; -} - -static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { - mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field - if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? - (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? - { - // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? - // (continue in separate function to improve code generation) - return mi_check_is_double_freex(page, block); - } - return false; -} -#else -static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(page); - MI_UNUSED(block); - return false; -} -#endif - // --------------------------------------------------------------------------- // Check for heap block overflow by setting up padding at the end of the block // --------------------------------------------------------------------------- #if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) static mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { + if (page->capacity == 0) return NULL; // page may have been freed in double free check *bsize = mi_page_usable_block_size(page); mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); *delta = padding->delta; @@ -247,12 +201,12 @@ static mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_bloc } #if MI_DEBUG_TRACE > 0 -static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block) { +static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) { size_t bsize; size_t delta; - const mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize); + mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize); if (padding != NULL) { - _mi_stack_trace_print(padding->strace, MI_DEBUG_TRACE_LEN, block, bsize, bsize - delta); + _mi_stack_trace_print(msg, &padding->strace[0], MI_DEBUG_TRACE_LEN, block, bsize, bsize - delta); } } #else @@ -261,7 +215,7 @@ static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block) { } #endif -// Return the exact usable size of a block. +// Return the exact usable size of a block. (whereas `mi_page_usable_block_size` returns the total available size without padding) static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { size_t bsize; size_t delta; @@ -298,8 +252,8 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { size_t size; size_t wrong; - if (!mi_verify_padding(page,block,&size,&wrong)) { - _mi_error_trace(page, block); + if (mi_unlikely(!mi_verify_padding(page,block,&size,&wrong))) { + _mi_error_trace(page, block, NULL); _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); } } @@ -316,29 +270,99 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co if (padding == NULL) return; if ((bsize - delta) >= min_size) return; // usually already enough space mi_assert_internal(bsize >= min_size); - if (bsize < min_size) return; // should never happen + if (bsize < min_size) return; // should never happen size_t new_delta = (bsize - min_size); mi_assert_internal(new_delta < bsize); padding->delta = (uint32_t)new_delta; } #else static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(page); - MI_UNUSED(block); + MI_UNUSED(page); MI_UNUSED(block); } static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(block); + MI_UNUSED(block); return mi_page_usable_block_size(page); } static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { - MI_UNUSED(page); - MI_UNUSED(block); - MI_UNUSED(min_size); + MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(min_size); +} + +static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) { + MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(msg); } #endif +static const mi_block_t* mi_block_predecessor(const mi_page_t* page, const mi_block_t* block) { + const size_t bsize = page->xblock_size; + mi_assert_internal(bsize > 0); + if (bsize >= MI_HUGE_BLOCK_SIZE) return NULL; + const mi_block_t* prev = (const mi_block_t*)((uint8_t*)block - bsize); + uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, NULL); + if (pstart > (uint8_t*)prev) return NULL; + return prev; +} + +// Used if a free list is corrupted which is usually caused by the previous block(s) +void _mi_error_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg) { + const mi_block_t* prev = mi_block_predecessor(page,block); + if (prev != NULL) { + _mi_error_trace(page, prev, "predecessor block"); + } + _mi_error_trace(page, block, msg); +} + + +// ------------------------------------------------------ +// Check for double free in secure and debug mode +// This is somewhat expensive so only enabled for secure mode 4 +// ------------------------------------------------------ + +#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) +// linear check if the free list contains a specific element +static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { + while (list != NULL) { + if (elem==list) return true; + list = mi_block_next(page, list); + } + return false; +} + +static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { + // The decoded value is in the same page (or NULL). + // Walk the free lists to verify positively if it is already freed + if (mi_list_contains(page, page->free, block) || + mi_list_contains(page, page->local_free, block) || + mi_list_contains(page, mi_page_thread_free(page), block)) + { + _mi_error_trace(page, block, NULL); + _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_usable_size_of(page,block)); + return true; + } + return false; +} + +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field + if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? + (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? + { + // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? + // (continue in separate function to improve code generation) + return mi_check_is_double_freex(page, block); + } + return false; +} +#else +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); + return false; +} +#endif + + // only maintain stats for smaller objects if requested #if (MI_STAT>0) static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { @@ -394,7 +418,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc mi_check_padding(page, block); mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection #if (MI_DEBUG!=0) - memset(block, MI_DEBUG_FREED, mi_usable_size(block)); + memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page)); #endif // huge page segments are always abandoned and can be freed immediately @@ -450,10 +474,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block // and push it on the free list if (mi_likely(local)) { // owning thread can free a block directly - mi_check_padding(page, block); if (mi_unlikely(mi_check_is_double_free(page, block))) return; + mi_check_padding(page, block); #if (MI_DEBUG!=0) - memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); + memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); page->local_free = block; @@ -532,11 +556,11 @@ void mi_free(void* p) mi_attr_noexcept if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned mi_block_t* block = (mi_block_t*)(p); + if (mi_unlikely(mi_check_is_double_free(page, block))) return; mi_check_padding(page, block); - if (mi_unlikely(mi_check_is_double_free(page,block))) return; mi_stat_free(page, block); #if (MI_DEBUG!=0) - memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); + memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); page->local_free = block; @@ -677,7 +701,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); memset((uint8_t*)newp + start, 0, newsize - start); } - _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); + if (mi_likely((uintptr_t)p % MI_INTPTR_SIZE == 0)) { + _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); + } + else { + _mi_memcpy(newp, p, (newsize > size ? size : newsize)); + } mi_free(p); // only free if successful } return newp; diff --git a/src/options.c b/src/options.c index ccfb051b..10953092 100644 --- a/src/options.c +++ b/src/options.c @@ -360,8 +360,9 @@ void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) { #include #pragma comment(lib,"dbghelp") -void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { - _mi_fprintf(NULL, NULL, "trace for block %p of size %zu (%zu total in block), allocated at:\n", block, avail, bsize); +void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { + _mi_fprintf(NULL, NULL, "trace %s at %p of size %zu (%zub total available), backtrace:\n", + (msg==NULL ? "block" : msg), block, avail, bsize); HANDLE current_process = GetCurrentProcess(); SymInitialize(current_process, NULL, TRUE); PSYMBOL_INFO info = (PSYMBOL_INFO)_malloca(sizeof(SYMBOL_INFO) + 256 * sizeof(TCHAR)); @@ -371,10 +372,10 @@ void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block info->SizeOfStruct = sizeof(SYMBOL_INFO); for (size_t i = 0; i < len && strace[i] != NULL; i++) { if (SymFromAddr(current_process, (DWORD64)(strace[i]), 0, info)) { - _mi_fprintf(NULL, NULL, " frame %2zu: %8p: %s\n", i, strace[i], info->Name); + _mi_fprintf(NULL, NULL, " %2zu: %8p: %s\n", i, strace[i], info->Name); } else { - _mi_fprintf(NULL, NULL, " frame %2zu: %8p: \n", i, strace[i], GetLastError()); + _mi_fprintf(NULL, NULL, " %2zu: %8p: \n", i, strace[i], GetLastError()); } } } @@ -383,30 +384,31 @@ void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block #define MI_TRACE_LEN (64) void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) { if (_mi_preloading()) return; - if (!mi_recurse_enter()) return; + if (!mi_recurse_enter()) return; // needed for pthreads void* trace[MI_TRACE_LEN]; backtrace(trace, MI_TRACE_LEN); - skip += 4; for (size_t i = 0; i < len; i++) { void* p = (i + skip < MI_TRACE_LEN ? trace[i+skip] : NULL); strace[i] = p; } mi_recurse_exit(); } -void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { - _mi_fprintf(NULL, NULL, "trace for block %p of size %zu (%zu total in block), allocated at:\n", block, avail, bsize); - char** names = backtrace_symbols((void**)strace, len); +void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { + _mi_fprintf(NULL, NULL, "trace %s at %p of size %zu (%zub total available), backtrace:\n", + (msg==NULL ? "block" : msg), block, avail, bsize); + char** names = backtrace_symbols(strace, len); for (size_t i = 0; i < len && strace[i] != NULL; i++) { - _mi_fprintf(NULL, NULL, " frame %2zu: %8p: %s\n", i, strace[i], (names[i] == NULL ? "" : names[i])); - } + _mi_fprintf(NULL, NULL, " %2zu: %8p: %s\n", i, strace[i], (names == NULL || names[i] == NULL ? "" : names[i])); + } + // free(names); // avoid potential recursion and leak the trace } #else void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) { MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(skip); } -void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { +void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) { MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(block); - MI_UNUSED(bsize); MI_UNUSED(avail); + MI_UNUSED(bsize); MI_UNUSED(avail); MI_UNUSED(msg); } #endif diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index a454669e..5e2c63dd 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -15,6 +15,7 @@ if (NOT CMAKE_BUILD_TYPE) endif() endif() + # Import mimalloc (if installed) find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR}") diff --git a/test/main-override-static.c b/test/main-override-static.c index 2944219e..42e23118 100644 --- a/test/main-override-static.c +++ b/test/main-override-static.c @@ -176,7 +176,9 @@ void mi_bins() { static void double_free1(); static void double_free2(); -static void corrupt_free(); +static void double_free3(); +static void corrupt_free1(); +static void corrupt_free2(); static void block_overflow1(); static void block_overflow2(); static void invalid_free(); @@ -192,7 +194,9 @@ int main() { // detect double frees and heap corruption // double_free1(); // double_free2(); - // corrupt_free(); + // double_free3(); + // corrupt_free1(); + // corrupt_free2(); // block_overflow1(); // block_overflow2(); // test_aslr(); @@ -281,13 +285,35 @@ static void double_free2() { fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432); } +static void double_free3() { + void* p1 = malloc(32); + void* p2 = malloc(32); + void* p3 = malloc(32); + free(p2); + free(p1); + free(p2); + free(p3); +} + +static void corrupt_free1() { + void* p1 = malloc(32); + void* p2 = malloc(32); + void* p3 = malloc(32); + free(p2); + memset(p2, 0, 8); // corrupt free list entry + mi_collect(true); + p2 = malloc(32); // should trigger corrupted free list + free(p1); + free(p2); + free(p3); +} // Try to corrupt the heap through buffer overflow #define N 256 #define SZ 64 #define OVF_SZ 32 -static void corrupt_free() { +static void corrupt_free2() { void* p[N]; // allocate for (int i = 0; i < N; i++) {