mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
fix MI_GUARDED build
This commit is contained in:
parent
2a1c346281
commit
c478ddaab4
7 changed files with 30 additions and 11 deletions
|
@ -116,7 +116,7 @@
|
||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<ConformanceMode>Default</ConformanceMode>
|
<ConformanceMode>Default</ConformanceMode>
|
||||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||||
<PreprocessorDefinitions>MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||||
<CompileAs>CompileAsCpp</CompileAs>
|
<CompileAs>CompileAsCpp</CompileAs>
|
||||||
<SupportJustMyCode>false</SupportJustMyCode>
|
<SupportJustMyCode>false</SupportJustMyCode>
|
||||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||||
|
|
|
@ -628,6 +628,9 @@ static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size;
|
uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size;
|
||||||
|
// note: the alignment of the guard page relies on blocks being os_page_size aligned which
|
||||||
|
// is ensured in `mi_arena_page_alloc_fresh`.
|
||||||
|
mi_assert_internal(_mi_is_aligned(block, os_page_size));
|
||||||
mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
|
mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
|
||||||
if (!page->memid.is_pinned && _mi_is_aligned(guard_page, os_page_size)) {
|
if (!page->memid.is_pinned && _mi_is_aligned(guard_page, os_page_size)) {
|
||||||
_mi_os_protect(guard_page, os_page_size);
|
_mi_os_protect(guard_page, os_page_size);
|
||||||
|
@ -662,7 +665,7 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
|
||||||
const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
|
const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
|
||||||
mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
|
mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
|
||||||
if (block==NULL) return NULL;
|
if (block==NULL) return NULL;
|
||||||
void* const p = mi_block_ptr_set_guarded(block, obj_size);
|
void* const p = mi_block_ptr_set_guarded(block, obj_size);
|
||||||
|
|
||||||
// stats
|
// stats
|
||||||
mi_track_malloc(p, size, zero);
|
mi_track_malloc(p, size, zero);
|
||||||
|
|
22
src/arena.c
22
src/arena.c
|
@ -285,7 +285,7 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
|
||||||
}
|
}
|
||||||
|
|
||||||
// check arena bounds
|
// check arena bounds
|
||||||
const size_t min_reserve = MI_ARENA_MIN_SIZE;
|
const size_t min_reserve = MI_ARENA_MIN_SIZE;
|
||||||
const size_t max_reserve = MI_ARENA_MAX_SIZE; // 16 GiB
|
const size_t max_reserve = MI_ARENA_MAX_SIZE; // 16 GiB
|
||||||
if (arena_reserve < min_reserve) {
|
if (arena_reserve < min_reserve) {
|
||||||
arena_reserve = min_reserve;
|
arena_reserve = min_reserve;
|
||||||
|
@ -302,7 +302,7 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
|
||||||
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
|
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
|
||||||
|
|
||||||
// and try to reserve the arena
|
// and try to reserve the arena
|
||||||
int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
|
int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
// failed, try a smaller size?
|
// failed, try a smaller size?
|
||||||
const size_t small_arena_reserve = (MI_SIZE_BITS == 32 ? 128*MI_MiB : 1*MI_GiB);
|
const size_t small_arena_reserve = (MI_SIZE_BITS == 32 ? 128*MI_MiB : 1*MI_GiB);
|
||||||
|
@ -624,7 +624,23 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
|
||||||
if (MI_PAGE_INFO_SIZE < _mi_align_up(sizeof(*page), MI_PAGE_MIN_BLOCK_ALIGN)) {
|
if (MI_PAGE_INFO_SIZE < _mi_align_up(sizeof(*page), MI_PAGE_MIN_BLOCK_ALIGN)) {
|
||||||
_mi_error_message(EFAULT, "fatal internal error: MI_PAGE_INFO_SIZE is too small.\n");
|
_mi_error_message(EFAULT, "fatal internal error: MI_PAGE_INFO_SIZE is too small.\n");
|
||||||
};
|
};
|
||||||
const size_t block_start = (os_align ? MI_PAGE_ALIGN : MI_PAGE_INFO_SIZE);
|
size_t block_start;
|
||||||
|
#if MI_GUARDED
|
||||||
|
// in a guarded build, we aling pages with blocks a multiple of an OS page size, to the OS page size
|
||||||
|
// this ensures that all blocks in such pages are OS page size aligned (which is needed for the guard pages)
|
||||||
|
const size_t os_page_size = _mi_os_page_size();
|
||||||
|
mi_assert_internal(MI_PAGE_ALIGN >= os_page_size);
|
||||||
|
if (block_size % os_page_size == 0) {
|
||||||
|
block_start = _mi_align_up(MI_PAGE_INFO_SIZE, os_page_size);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
#endif
|
||||||
|
if (os_align) {
|
||||||
|
block_start = MI_PAGE_ALIGN;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
block_start = MI_PAGE_INFO_SIZE;
|
||||||
|
}
|
||||||
const size_t reserved = (os_align ? 1 : (mi_size_of_slices(slice_count) - block_start) / block_size);
|
const size_t reserved = (os_align ? 1 : (mi_size_of_slices(slice_count) - block_start) / block_size);
|
||||||
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
||||||
page->reserved = (uint16_t)reserved;
|
page->reserved = (uint16_t)reserved;
|
||||||
|
|
|
@ -180,7 +180,7 @@ mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t samp
|
||||||
if (heap->guarded_sample_rate >= 1) {
|
if (heap->guarded_sample_rate >= 1) {
|
||||||
heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate;
|
heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate;
|
||||||
}
|
}
|
||||||
heap->guarded_sample_count = heap->guarded_sample_seed; // count down samples
|
heap->guarded_sample_count = 1 + heap->guarded_sample_seed; // count down samples
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
|
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
|
||||||
|
|
|
@ -84,8 +84,8 @@ bool _mi_getenv(const char* name, char* result, size_t result_size) {
|
||||||
// This is mostly to avoid calling these when libc is not yet
|
// This is mostly to avoid calling these when libc is not yet
|
||||||
// initialized (and to reduce dependencies)
|
// initialized (and to reduce dependencies)
|
||||||
//
|
//
|
||||||
// format: d i, p x u, s
|
// format: d i, p, x, u, s
|
||||||
// prec: z l ll L
|
// type: z l ll L
|
||||||
// width: 10
|
// width: 10
|
||||||
// align-left: -
|
// align-left: -
|
||||||
// fill: 0
|
// fill: 0
|
||||||
|
|
|
@ -233,8 +233,8 @@ static void test_heap_walk(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_canary_leak(void) {
|
static void test_canary_leak(void) {
|
||||||
char* p = mi_mallocn_tp(char, 23);
|
char* p = mi_mallocn_tp(char, 22);
|
||||||
for (int i = 0; i < 23; i++) {
|
for (int i = 0; i < 22; i++) {
|
||||||
p[i] = '0'+i;
|
p[i] = '0'+i;
|
||||||
}
|
}
|
||||||
puts(p);
|
puts(p);
|
||||||
|
|
|
@ -42,7 +42,7 @@ static int SCALE = 10;
|
||||||
static int ITER = 10;
|
static int ITER = 10;
|
||||||
#elif 0
|
#elif 0
|
||||||
static int THREADS = 4;
|
static int THREADS = 4;
|
||||||
static int SCALE = 100;
|
static int SCALE = 10;
|
||||||
static int ITER = 10;
|
static int ITER = 10;
|
||||||
#define ALLOW_LARGE false
|
#define ALLOW_LARGE false
|
||||||
#elif 0
|
#elif 0
|
||||||
|
|
Loading…
Add table
Reference in a new issue