diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
index d8cc25b1..3f1280ee 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -116,7 +116,7 @@
true
Default
../../include
- MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);
+ MI_DEBUG=3;%(PreprocessorDefinitions);
CompileAsCpp
false
stdcpp20
diff --git a/src/alloc.c b/src/alloc.c
index b0c89e65..25d6f62e 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -628,6 +628,9 @@ static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
return NULL;
}
uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size;
+ // note: the alignment of the guard page relies on blocks being os_page_size aligned which
+ // is ensured in `mi_arena_page_alloc_fresh`.
+ mi_assert_internal(_mi_is_aligned(block, os_page_size));
mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
if (!page->memid.is_pinned && _mi_is_aligned(guard_page, os_page_size)) {
_mi_os_protect(guard_page, os_page_size);
@@ -662,7 +665,7 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
if (block==NULL) return NULL;
- void* const p = mi_block_ptr_set_guarded(block, obj_size);
+ void* const p = mi_block_ptr_set_guarded(block, obj_size);
// stats
mi_track_malloc(p, size, zero);
diff --git a/src/arena.c b/src/arena.c
index 24835f42..9923eae1 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -285,7 +285,7 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
}
// check arena bounds
- const size_t min_reserve = MI_ARENA_MIN_SIZE;
+ const size_t min_reserve = MI_ARENA_MIN_SIZE;
const size_t max_reserve = MI_ARENA_MAX_SIZE; // 16 GiB
if (arena_reserve < min_reserve) {
arena_reserve = min_reserve;
@@ -302,7 +302,7 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
// and try to reserve the arena
- int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
+ int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
if (err != 0) {
// failed, try a smaller size?
const size_t small_arena_reserve = (MI_SIZE_BITS == 32 ? 128*MI_MiB : 1*MI_GiB);
@@ -624,7 +624,23 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
if (MI_PAGE_INFO_SIZE < _mi_align_up(sizeof(*page), MI_PAGE_MIN_BLOCK_ALIGN)) {
_mi_error_message(EFAULT, "fatal internal error: MI_PAGE_INFO_SIZE is too small.\n");
};
- const size_t block_start = (os_align ? MI_PAGE_ALIGN : MI_PAGE_INFO_SIZE);
+ size_t block_start;
+ #if MI_GUARDED
+ // in a guarded build, we aling pages with blocks a multiple of an OS page size, to the OS page size
+ // this ensures that all blocks in such pages are OS page size aligned (which is needed for the guard pages)
+ const size_t os_page_size = _mi_os_page_size();
+ mi_assert_internal(MI_PAGE_ALIGN >= os_page_size);
+ if (block_size % os_page_size == 0) {
+ block_start = _mi_align_up(MI_PAGE_INFO_SIZE, os_page_size);
+ }
+ else
+ #endif
+ if (os_align) {
+ block_start = MI_PAGE_ALIGN;
+ }
+ else {
+ block_start = MI_PAGE_INFO_SIZE;
+ }
const size_t reserved = (os_align ? 1 : (mi_size_of_slices(slice_count) - block_start) / block_size);
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
page->reserved = (uint16_t)reserved;
diff --git a/src/init.c b/src/init.c
index 19e111d3..57be59a8 100644
--- a/src/init.c
+++ b/src/init.c
@@ -180,7 +180,7 @@ mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t samp
if (heap->guarded_sample_rate >= 1) {
heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate;
}
- heap->guarded_sample_count = heap->guarded_sample_seed; // count down samples
+ heap->guarded_sample_count = 1 + heap->guarded_sample_seed; // count down samples
}
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
diff --git a/src/libc.c b/src/libc.c
index eed63d87..0ec2164d 100644
--- a/src/libc.c
+++ b/src/libc.c
@@ -84,8 +84,8 @@ bool _mi_getenv(const char* name, char* result, size_t result_size) {
// This is mostly to avoid calling these when libc is not yet
// initialized (and to reduce dependencies)
//
-// format: d i, p x u, s
-// prec: z l ll L
+// format: d i, p, x, u, s
+// type: z l ll L
// width: 10
// align-left: -
// fill: 0
diff --git a/test/main-override-static.c b/test/main-override-static.c
index 2e7f1aca..410764bd 100644
--- a/test/main-override-static.c
+++ b/test/main-override-static.c
@@ -233,8 +233,8 @@ static void test_heap_walk(void) {
}
static void test_canary_leak(void) {
- char* p = mi_mallocn_tp(char, 23);
- for (int i = 0; i < 23; i++) {
+ char* p = mi_mallocn_tp(char, 22);
+ for (int i = 0; i < 22; i++) {
p[i] = '0'+i;
}
puts(p);
diff --git a/test/test-stress.c b/test/test-stress.c
index 915c953f..0488fc2b 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -42,7 +42,7 @@ static int SCALE = 10;
static int ITER = 10;
#elif 0
static int THREADS = 4;
-static int SCALE = 100;
+static int SCALE = 10;
static int ITER = 10;
#define ALLOW_LARGE false
#elif 0