Compare commits

...

16 commits

Author SHA1 Message Date
Nathaniel Brough
51bbb18319
Merge f40cd9cd60 into 951538d469 2025-04-12 04:17:41 +02:00
Daan
951538d469 fix prctl.h includes for alpine linux/musl (hopefully fixes #1065, #1066, #1067) 2025-04-08 13:56:31 -07:00
Daan
bc8eca8bf2 typo 2025-04-02 12:09:09 -07:00
daanx
8c99ac1bbd fix typo 2025-04-02 11:16:33 -07:00
daanx
d767dbfbb4 use C++ compilation with clang-cl (as well as msvc) on Windows 2025-04-02 10:50:36 -07:00
daanx
5a58df6534 fix signed compare warning 2025-04-02 10:40:30 -07:00
daanx
3c3600f85f add atomic_cas_ptr_strong_acq_rel 2025-04-02 10:36:01 -07:00
daanx
235a0390ee refactor numa_node_count 2025-04-02 10:34:00 -07:00
Daan
77b622511a fix alpine compilation with prctl.h (issue #1059) 2025-03-31 14:44:46 -07:00
Daan
e1110cdb9f nicer cmake logic for windows override test 2025-03-31 11:02:10 -07:00
Daan
a9e9467429 make dynamic override test verbose 2025-03-31 11:00:05 -07:00
Daan
07743454e5 fix dynamic override test on non-windows platforms 2025-03-31 10:57:16 -07:00
Daan
cbab63f6c9 fix release build warning (unused mi_stat_total_print) 2025-03-30 16:15:27 -07:00
Daan
f40cd9cd60
Fix signedness warning 2024-12-31 11:03:44 -08:00
Daan
9f4bd23085
Fix build with ubsan and tsan 2024-12-31 11:03:18 -08:00
Nathaniel Brough
611bf61d82 test: Add a fuzzing harness for heap allocator
Signed-off-by: Nathaniel Brough <nathaniel.brough@gmail.com>
2024-08-29 17:41:56 +10:00
9 changed files with 232 additions and 51 deletions

View file

@ -173,7 +173,8 @@ if(CMAKE_C_COMPILER_ID MATCHES "Intel")
list(APPEND mi_cflags -Wall) list(APPEND mi_cflags -Wall)
endif() endif()
if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel") # force C++ compilation with msvc or clang-cl to use modern C++ atomics
if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel" OR MI_CLANG_CL)
set(MI_USE_CXX "ON") set(MI_USE_CXX "ON")
endif() endif()
@ -724,21 +725,19 @@ if (MI_BUILD_TESTS)
if(MI_BUILD_SHARED AND NOT (MI_TRACK_ASAN OR MI_DEBUG_TSAN OR MI_DEBUG_UBSAN)) if(MI_BUILD_SHARED AND NOT (MI_TRACK_ASAN OR MI_DEBUG_TSAN OR MI_DEBUG_UBSAN))
add_executable(mimalloc-test-stress-dynamic test/test-stress.c) add_executable(mimalloc-test-stress-dynamic test/test-stress.c)
target_compile_definitions(mimalloc-test-stress-dynamic PRIVATE ${mi_defines} "USE_STD_MALLOC=1") target_compile_definitions(mimalloc-test-stress-dynamic PRIVATE ${mi_defines} "USE_STD_MALLOC=1")
if(WIN32)
target_compile_definitions(mimalloc-test-stress-dynamic PRIVATE "MI_LINK_VERSION=1")
endif()
target_compile_options(mimalloc-test-stress-dynamic PRIVATE ${mi_cflags}) target_compile_options(mimalloc-test-stress-dynamic PRIVATE ${mi_cflags})
target_include_directories(mimalloc-test-stress-dynamic PRIVATE include) target_include_directories(mimalloc-test-stress-dynamic PRIVATE include)
target_link_libraries(mimalloc-test-stress-dynamic PRIVATE mimalloc ${mi_libraries}) # mi_version
if(WIN32) if(WIN32)
add_test(NAME test-stress-dynamic COMMAND ${CMAKE_COMMAND} -E env MIMALLOC_SHOW_STATS=1 $<TARGET_FILE:mimalloc-test-stress-dynamic>) target_compile_definitions(mimalloc-test-stress-dynamic PRIVATE "MI_LINK_VERSION=1") # link mi_version
target_link_libraries(mimalloc-test-stress-dynamic PRIVATE mimalloc ${mi_libraries}) # link mi_version
add_test(NAME test-stress-dynamic COMMAND ${CMAKE_COMMAND} -E env MIMALLOC_VERBOSE=1 $<TARGET_FILE:mimalloc-test-stress-dynamic>)
else() else()
if(APPLE) if(APPLE)
set(LD_PRELOAD "DYLD_INSERT_LIBRARIES") set(LD_PRELOAD "DYLD_INSERT_LIBRARIES")
else() else()
set(LD_PRELOAD "LD_PRELOAD") set(LD_PRELOAD "LD_PRELOAD")
endif() endif()
add_test(NAME test-stress-dynamic COMMAND ${CMAKE_COMMAND} -E env MIMALLOC_SHOW_STATS=1 ${LD_PRELOAD}=$<TARGET_FILE:mimalloc> $<TARGET_FILE:mimalloc-test-stress-dynamic>) add_test(NAME test-stress-dynamic COMMAND ${CMAKE_COMMAND} -E env MIMALLOC_VERBOSE=1 ${LD_PRELOAD}=$<TARGET_FILE:mimalloc> $<TARGET_FILE:mimalloc-test-stress-dynamic>)
endif() endif()
endif() endif()
endif() endif()
@ -760,3 +759,25 @@ if (MI_OVERRIDE)
endif() endif()
endif() endif()
endif() endif()
# -----------------------------------------------------------------------------
# Build fuzz tests
# -----------------------------------------------------------------------------
if(DEFINED ENV{LIB_FUZZING_ENGINE})
set(FUZZING_ENGINE $ENV{LIB_FUZZING_ENGINE})
set(FUZZING_COMPILE_FLAGS "")
set(FUZZING_LINK_FLAGS "${FUZZING_ENGINE}")
else()
set(FUZZING_COMPILE_FLAGS "-fsanitize=fuzzer,address")
set(FUZZING_LINK_FLAGS "-fsanitize=fuzzer,address")
endif()
if(('${CMAKE_CXX_COMPILER_ID}' STREQUAL 'Clang') AND NOT MI_DEBUG_TSAN)
## fuzz testing
add_executable(mimalloc-test-fuzz test/test-fuzz.c)
target_compile_definitions(mimalloc-test-fuzz PRIVATE ${mi_defines})
target_compile_options(mimalloc-test-fuzz PRIVATE ${mi_cflags} "${FUZZING_COMPILE_FLAGS}")
target_include_directories(mimalloc-test-fuzz PRIVATE include)
target_link_libraries(mimalloc-test-fuzz PRIVATE mimalloc ${mi_libraries} "${FUZZING_LINK_FLAGS}")
endif()

View file

@ -111,6 +111,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des) #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,(tp*)des)
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x) #define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
@ -120,6 +121,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,des)
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x) #define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
@ -303,6 +305,7 @@ static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p,
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)

View file

@ -140,9 +140,11 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
bool _mi_os_use_large_page(size_t size, size_t alignment); bool _mi_os_use_large_page(size_t size, size_t alignment);
size_t _mi_os_large_page_size(void); size_t _mi_os_large_page_size(void);
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
int _mi_os_numa_node_count(void);
int _mi_os_numa_node(void);
// arena.c // arena.c
mi_arena_id_t _mi_arena_id_none(void); mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid); void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid);
@ -813,24 +815,6 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
return x; return x;
} }
// -------------------------------------------------------------------
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
int _mi_os_numa_node_get(void);
size_t _mi_os_numa_node_count_get(void);
extern mi_decl_hidden _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get();
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
if mi_likely(count > 0) { return count; }
else return _mi_os_numa_node_count_get();
}
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------

View file

@ -84,7 +84,7 @@ Enjoy!
### Releases ### Releases
* 2025-03-28, `v1.9.3`, `v2.2.3`, `v3.0.3` (beta): Various small bug and build fixes, including: * 2025-03-28, `v1.9.3`, `v2.2.3`, `v3.0.3` (beta) : Various small bug and build fixes, including:
fix arm32 pre v7 builds, fix mingw build, get runtime statistics, improve statistic commit counts, fix arm32 pre v7 builds, fix mingw build, get runtime statistics, improve statistic commit counts,
fix execution on non BMI1 x64 systems. fix execution on non BMI1 x64 systems.
* 2025-03-06, `v1.9.2`, `v2.2.2`, `v3.0.2-beta`: Various small bug and build fixes. * 2025-03-06, `v1.9.2`, `v2.2.2`, `v3.0.2-beta`: Various small bug and build fixes.
@ -177,7 +177,7 @@ mimalloc is used in various large scale low-latency services and programs, for e
Open `ide/vs2022/mimalloc.sln` in Visual Studio 2022 and build. Open `ide/vs2022/mimalloc.sln` in Visual Studio 2022 and build.
The `mimalloc-lib` project builds a static library (in `out/msvc-x64`), while the The `mimalloc-lib` project builds a static library (in `out/msvc-x64`), while the
`mimalloc-override-dll` project builds DLL for overriding malloc `mimalloc-override-dll` project builds a DLL for overriding malloc
in the entire program. in the entire program.
## Linux, macOS, BSD, etc. ## Linux, macOS, BSD, etc.

View file

@ -1007,17 +1007,17 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t
if (pages == 0) return 0; if (pages == 0) return 0;
// pages per numa node // pages per numa node
size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count()); int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? (int)numa_nodes : _mi_os_numa_node_count());
if (numa_count <= 0) numa_count = 1; if (numa_count == 0) numa_count = 1;
const size_t pages_per = pages / numa_count; const size_t pages_per = pages / numa_count;
const size_t pages_mod = pages % numa_count; const size_t pages_mod = pages % numa_count;
const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50); const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
// reserve evenly among numa nodes // reserve evenly among numa nodes
for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { for (int numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
size_t node_pages = pages_per; // can be 0 size_t node_pages = pages_per; // can be 0
if (numa_node < pages_mod) node_pages++; if ((size_t)numa_node < pages_mod) node_pages++;
int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per); int err = mi_reserve_huge_os_pages_at(node_pages, numa_node, timeout_per);
if (err) return err; if (err) return err;
if (pages < node_pages) { if (pages < node_pages) {
pages = 0; pages = 0;

View file

@ -696,34 +696,47 @@ static void mi_os_free_huge_os_pages(void* p, size_t size) {
} }
} }
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
Support NUMA aware allocation Support NUMA aware allocation
-----------------------------------------------------------------------------*/ -----------------------------------------------------------------------------*/
_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count static _Atomic(size_t) mi_numa_node_count; // = 0 // cache the node count
size_t _mi_os_numa_node_count_get(void) { int _mi_os_numa_node_count(void) {
size_t count = mi_atomic_load_acquire(&_mi_numa_node_count); size_t count = mi_atomic_load_acquire(&mi_numa_node_count);
if (count <= 0) { if mi_unlikely(count == 0) {
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly? long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
if (ncount > 0) { if (ncount > 0 && ncount < INT_MAX) {
count = (size_t)ncount; count = (size_t)ncount;
} }
else { else {
count = _mi_prim_numa_node_count(); // or detect dynamically const size_t n = _mi_prim_numa_node_count(); // or detect dynamically
if (count == 0) count = 1; if (n == 0 || n > INT_MAX) { count = 1; }
else { count = n; }
} }
mi_atomic_store_release(&_mi_numa_node_count, count); // save it mi_atomic_store_release(&mi_numa_node_count, count); // save it
_mi_verbose_message("using %zd numa regions\n", count); _mi_verbose_message("using %zd numa regions\n", count);
} }
return count; mi_assert_internal(count > 0 && count <= INT_MAX);
return (int)count;
} }
int _mi_os_numa_node_get(void) { static int mi_os_numa_node_get(void) {
size_t numa_count = _mi_os_numa_node_count(); int numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0 // never more than the node count and >= 0
size_t numa_node = _mi_prim_numa_node(); const size_t n = _mi_prim_numa_node();
int numa_node = (n < INT_MAX ? (int)n : 0);
if (numa_node >= numa_count) { numa_node = numa_node % numa_count; } if (numa_node >= numa_count) { numa_node = numa_node % numa_count; }
return (int)numa_node; return numa_node;
}
int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&mi_numa_node_count) == 1) {
return 0;
}
else {
return mi_os_numa_node_get();
}
} }

View file

@ -31,10 +31,10 @@ terms of the MIT license. A copy of the license can be found in the file
#if defined(__linux__) #if defined(__linux__)
#include <features.h> #include <features.h>
#include <linux/prctl.h> // PR_SET_VMA #include <sys/prctl.h> // THP disable, PR_SET_VMA
//#if defined(MI_NO_THP) #if !defined(PR_SET_VMA)
#include <sys/prctl.h> // THP disable #include <linux/prctl.h>
//#endif #endif
#if defined(__GLIBC__) #if defined(__GLIBC__)
#include <linux/mman.h> // linux mmap flags #include <linux/mman.h> // linux mmap flags
#else #else
@ -208,7 +208,7 @@ static int unix_madvise(void* addr, size_t size, int advice) {
static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) { static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) {
void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */); void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */);
#if (defined(__linux__) && defined(PR_SET_VMA)) #if defined(__linux__) && defined(PR_SET_VMA)
if (p!=MAP_FAILED && p!=NULL) { if (p!=MAP_FAILED && p!=NULL) {
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc"); prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc");
} }

View file

@ -218,12 +218,14 @@ static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int
_mi_fprintf(out, arg, "\n"); _mi_fprintf(out, arg, "\n");
} }
#if MI_STAT>1
static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) {
_mi_fprintf(out, arg, "%10s:", msg); _mi_fprintf(out, arg, "%10s:", msg);
_mi_fprintf(out, arg, "%12s", " "); // no peak _mi_fprintf(out, arg, "%12s", " "); // no peak
mi_print_amount(stat->total, unit, out, arg); mi_print_amount(stat->total, unit, out, arg);
_mi_fprintf(out, arg, "\n"); _mi_fprintf(out, arg, "\n");
} }
#endif
static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) { static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) {
_mi_fprintf(out, arg, "%10s:", msg); _mi_fprintf(out, arg, "%10s:", msg);

158
test/fuzz-random-alloc.c Normal file
View file

@ -0,0 +1,158 @@
#include <mimalloc.h>
#include <stddef.h>
#include <stdint.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#define MAX_ALLOC (1024 * 512)
#define MAX_COUNT 32
#define ALLOCATION_POINTERS 1024
#define DEBUG 0
#define debug_print(fmt, ...) \
do { if (DEBUG) fprintf(stderr, fmt, __VA_ARGS__); } while (0)
typedef enum {
CALLOC = 0,
FREE,
MALLOC,
MALLOCN,
REALLOC,
REALLOCF,
REALLOCN,
ZALLOC,
LAST_NOP,
} allocation_op_t;
typedef struct {
uint32_t count;
uint32_t size;
} arg_t;
typedef struct {
// The index of the pointer to apply this operation too.
uint32_t index;
// The arguments to use in the operation.
arg_t any_arg;
// The type of operation to apply.
allocation_op_t type;
} fuzzing_operation_t;
void debug_print_operation(fuzzing_operation_t *operation) {
const char *names[] = {"CALLOC", "FREE", "MALLOC", "MALLOCN", "REALLOC", "REALLOCF", "REALLOCN", "ZALLOC", "LAST_NOP"};
debug_print("index: %d, arg.count: %d, arg.size: %d, type: %s\n", operation->index, operation->any_arg.count, operation->any_arg.size, names[operation->type]);
}
#define FUZZING_OPERATION_DATA_SIZE sizeof(fuzzing_operation_t)
int init_fuzzing_operation(fuzzing_operation_t* out, const uint8_t* fuzzed_data, size_t len) {
fuzzing_operation_t result = {0, {0,0},FREE};
// Return a free operation if we don't have enough data to construct
// a full operation.
if(sizeof(fuzzing_operation_t) > len) {
*out = result;
return 0;
}
// Randomly populate operation using fuzzed data.
memcpy(&result, fuzzed_data, sizeof(fuzzing_operation_t));
// Fix up bounds for args and indicies. Randomly copying fuzzed data may result
// in out of bounds indicies or the fuzzer trying to allocate way too much data.
result.index %= ALLOCATION_POINTERS;
result.any_arg.count %= MAX_COUNT;
result.any_arg.size %= MAX_ALLOC;
result.type = (uint8_t)result.type % (uint8_t)LAST_NOP;
*out = result;
return sizeof(fuzzing_operation_t);
}
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
mi_heap_t * heap = mi_heap_new();
void* allocation_ptrs[ALLOCATION_POINTERS] = {NULL};
for(size_t i = 0; i < size; i = i + FUZZING_OPERATION_DATA_SIZE) {
fuzzing_operation_t operation = {0, {0,0}, FREE};
init_fuzzing_operation(&operation, data + i, size - i);
debug_print_operation(&operation);
switch(operation.type) {
case CALLOC:
if(allocation_ptrs[operation.index] == NULL) {
debug_print("%s\n","CALLOC");
allocation_ptrs[operation.index] = mi_heap_calloc(heap, operation.any_arg.count, operation.any_arg.size);
} else {
debug_print("%s\n","CALLOC conditions not met");
}
break;
case FREE:
// Can be ptr or be NULL so we don't need to check first.
mi_free(allocation_ptrs[operation.index]);
allocation_ptrs[operation.index] = NULL;
break;
case MALLOC:
if(allocation_ptrs[operation.index] == NULL){
debug_print("%s\n","MALLOC");
allocation_ptrs[operation.index] = mi_heap_malloc(heap, operation.any_arg.size);
} else {
debug_print("%s\n","MALLOC conditions not met");
}
break;
case MALLOCN:
if(allocation_ptrs[operation.index] == NULL){
debug_print("%s\n","MALLOCN");
allocation_ptrs[operation.index] = mi_heap_mallocn(heap, operation.any_arg.count, operation.any_arg.size);
} else {
debug_print("%s\n","MALLOCN conditions not met");
}
break;
case REALLOC:
if(allocation_ptrs[operation.index] != NULL){
debug_print("%s\n","REALLOC");
allocation_ptrs[operation.index] = mi_heap_realloc(heap, allocation_ptrs[operation.index], operation.any_arg.size);
} else {
debug_print("%s\n","REALLOC conditions not met");
}
break;
case REALLOCN:
if(allocation_ptrs[operation.index] != NULL){
debug_print("%s\n","REALLOCN");
allocation_ptrs[operation.index] = mi_heap_reallocn(heap, allocation_ptrs[operation.index], operation.any_arg.count, operation.any_arg.size);
} else {
debug_print("%s\n","REALLOCN conditions not met");
}
break;
case REALLOCF:
if(allocation_ptrs[operation.index] != NULL){
debug_print("%s\n","REALLOCF");
allocation_ptrs[operation.index] = mi_heap_reallocf(heap, allocation_ptrs[operation.index], operation.any_arg.size);
} else {
debug_print("%s\n","REALLOCF conditions not met");
}
break;
case ZALLOC:
if(allocation_ptrs[operation.index] == NULL){
debug_print("%s\n","ZALLOC");
allocation_ptrs[operation.index] = mi_heap_zalloc(heap, operation.any_arg.size);
} else {
debug_print("%s\n","ZALLOC conditions not met");
}
break;
case LAST_NOP:
// No-op
break;
default:
mi_heap_destroy(heap);
exit(1);
break;
}
}
mi_heap_destroy(heap);
return 0;
}