add _mi_os_guard_page_size

This commit is contained in:
daanx 2024-12-23 16:28:34 -08:00
parent 88d8ee964f
commit b515a0ad4c
5 changed files with 91 additions and 42 deletions

View file

@ -116,6 +116,7 @@ void _mi_os_free(void* p, size_t size, mi_memid_t memid);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
size_t _mi_os_page_size(void);
size_t _mi_os_guard_page_size(void);
size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
bool _mi_os_has_virtual_reserve(void);
@ -129,6 +130,13 @@ bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset);
size_t _mi_os_secure_guard_page_size(void);
bool _mi_os_secure_guard_page_set_at(void* addr, bool is_pinned);
bool _mi_os_secure_guard_page_set_before(void* addr, bool is_pinned);
bool _mi_os_secure_guard_page_reset_at(void* addr);
bool _mi_os_secure_guard_page_reset_before(void* addr);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid);
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid);

View file

@ -46,8 +46,12 @@ terms of the MIT license. A copy of the license can be found in the file
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
// #define MI_STAT 1
// Define MI_SECURE to enable security mitigations. The lowest two have minimal performance impact:
// Define MI_SECURE to enable security mitigations. Level 1 has minimal performance impact,
// but protects most metadata with guard pages:
// #define MI_SECURE 1 // guard page around metadata
//
// Level 2 has more performance impact but protect well against various buffer overflows
// by surrounding all mimalloc pages with guard pages:
// #define MI_SECURE 2 // guard page around each mimalloc page (can fragment VMA's with large heaps..)
//
// The next two levels can have more performance cost:
@ -126,7 +130,6 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_MEDIUM_PAGE_SIZE (8*MI_SMALL_PAGE_SIZE) // 512 KiB (=byte in the bitmap)
#define MI_LARGE_PAGE_SIZE (MI_SIZE_SIZE*MI_MEDIUM_PAGE_SIZE) // 4 MiB (=word in the bitmap)
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U)
#define MI_BIN_FULL (MI_BIN_HUGE+1)

View file

@ -25,12 +25,6 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_META_PAGE_SIZE MI_ARENA_SLICE_SIZE
#define MI_META_PAGE_ALIGN MI_ARENA_SLICE_ALIGN
#if MI_SECURE
#define MI_META_PAGE_GUARD_SIZE (4*MI_KiB)
#else
#define MI_META_PAGE_GUARD_SIZE (0)
#endif
#define MI_META_BLOCK_SIZE (128) // large enough such that META_MAX_SIZE > 4k (even on 32-bit)
#define MI_META_BLOCK_ALIGN MI_META_BLOCK_SIZE
#define MI_META_BLOCKS_PER_PAGE (MI_ARENA_SLICE_SIZE / MI_META_BLOCK_SIZE) // 1024
@ -47,7 +41,7 @@ static mi_decl_cache_align _Atomic(mi_meta_page_t*) mi_meta_pages = MI_ATOMIC_V
#if MI_DEBUG > 1
static mi_meta_page_t* mi_meta_page_of_ptr(void* p, size_t* block_idx) {
mi_meta_page_t* mpage = (mi_meta_page_t*)((uint8_t*)mi_align_down_ptr(p,MI_META_PAGE_ALIGN) + MI_META_PAGE_GUARD_SIZE);
mi_meta_page_t* mpage = (mi_meta_page_t*)((uint8_t*)mi_align_down_ptr(p,MI_META_PAGE_ALIGN) + _mi_os_secure_guard_page_size());
if (block_idx != NULL) {
*block_idx = ((uint8_t*)p - (uint8_t*)mpage) / MI_META_BLOCK_SIZE;
}
@ -60,9 +54,9 @@ static mi_meta_page_t* mi_meta_page_next( mi_meta_page_t* mpage ) {
}
static void* mi_meta_block_start( mi_meta_page_t* mpage, size_t block_idx ) {
mi_assert_internal(_mi_is_aligned((uint8_t*)mpage - MI_META_PAGE_GUARD_SIZE, MI_META_PAGE_ALIGN));
mi_assert_internal(_mi_is_aligned((uint8_t*)mpage - _mi_os_secure_guard_page_size(), MI_META_PAGE_ALIGN));
mi_assert_internal(block_idx < MI_META_BLOCKS_PER_PAGE);
void* p = ((uint8_t*)mpage - MI_META_PAGE_GUARD_SIZE + (block_idx * MI_META_BLOCK_SIZE));
void* p = ((uint8_t*)mpage - _mi_os_secure_guard_page_size() + (block_idx * MI_META_BLOCK_SIZE));
mi_assert_internal(mpage == mi_meta_page_of_ptr(p,NULL));
return p;
}
@ -82,20 +76,18 @@ static mi_meta_page_t* mi_meta_page_zalloc(void) {
}
// guard pages
#if MI_SECURE
if (!memid.is_pinned) {
_mi_os_decommit(base, MI_META_PAGE_GUARD_SIZE);
_mi_os_decommit(base + MI_META_PAGE_SIZE - MI_META_PAGE_GUARD_SIZE, MI_META_PAGE_GUARD_SIZE);
}
#if MI_SECURE >= 1
_mi_os_secure_guard_page_set_at(base, memid.is_pinned);
_mi_os_secure_guard_page_set_before(base + MI_META_PAGE_SIZE, memid.is_pinned);
#endif
// initialize the page and free block bitmap
mi_meta_page_t* mpage = (mi_meta_page_t*)(base + MI_META_PAGE_GUARD_SIZE);
mi_meta_page_t* mpage = (mi_meta_page_t*)(base + _mi_os_secure_guard_page_size());
mpage->memid = memid;
mi_bitmap_init(&mpage->blocks_free, MI_META_BLOCKS_PER_PAGE, true /* already_zero */);
const size_t mpage_size = offsetof(mi_meta_page_t,blocks_free) + mi_bitmap_size(MI_META_BLOCKS_PER_PAGE, NULL);
const size_t info_blocks = _mi_divide_up(mpage_size,MI_META_BLOCK_SIZE);
const size_t guard_blocks = _mi_divide_up(MI_META_PAGE_GUARD_SIZE, MI_META_BLOCK_SIZE);
const size_t guard_blocks = _mi_divide_up(_mi_os_secure_guard_page_size(), MI_META_BLOCK_SIZE);
mi_assert_internal(info_blocks + 2*guard_blocks < MI_META_BLOCKS_PER_PAGE);
mi_bitmap_unsafe_setN(&mpage->blocks_free, info_blocks + guard_blocks, MI_META_BLOCKS_PER_PAGE - info_blocks - 2*guard_blocks);

View file

@ -576,12 +576,6 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
return NULL;
}
#if MI_SECURE < 2
#define MI_ARENA_GUARD_PAGE_SIZE (0)
#else
#define MI_ARENA_GUARD_PAGE_SIZE (4*MI_KiB)
#endif
// Allocate a fresh page
static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment,
mi_arena_t* req_arena, size_t tseq)
@ -621,11 +615,14 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(!os_align || _mi_is_aligned((uint8_t*)page + page_alignment, block_alignment));
// guard page at the end
const size_t page_noguard_size = mi_size_of_slices(slice_count) - MI_ARENA_GUARD_PAGE_SIZE;
#if MI_SECURE >= 2
if (memid.initially_committed && !memid.is_pinned) {
_mi_os_decommit((uint8_t*)page + page_noguard_size, MI_ARENA_GUARD_PAGE_SIZE);
// guard page at the end of mimalloc page?
#if MI_SECURE < 2
const size_t page_noguard_size = mi_size_of_slices(slice_count);
#else
mi_assert(mi_size_of_slices(slice_count) > _mi_os_secure_guard_page_size());
const size_t page_noguard_size = mi_size_of_slices(slice_count) - _mi_os_secure_guard_page_size();
if (memid.initially_committed) {
_mi_os_secure_guard_page_set_at((uint8_t*)page + page_noguard_size, memid.is_pinned);
}
#endif
@ -795,7 +792,7 @@ void _mi_arenas_page_free(mi_page_t* page) {
// we must do this since we may later allocate large spans over this page and cannot have a guard page in between
#if MI_SECURE >= 2
if (!page->memid.is_pinned) {
_mi_os_commit((uint8_t*)page + mi_memid_size(page->memid) - MI_ARENA_GUARD_PAGE_SIZE, MI_ARENA_GUARD_PAGE_SIZE, NULL);
_mi_os_secure_guard_page_reset_before((uint8_t*)page + mi_memid_size(page->memid));
}
#endif
@ -1089,7 +1086,7 @@ static size_t mi_arena_info_slices_needed(size_t slice_count, size_t* bitmap_bas
const size_t size = base_size + bitmaps_size;
const size_t os_page_size = _mi_os_page_size();
const size_t info_size = _mi_align_up(size, os_page_size) + MI_ARENA_GUARD_PAGE_SIZE;
const size_t info_size = _mi_align_up(size, os_page_size) + _mi_os_secure_guard_page_size();
const size_t info_slices = mi_slice_count_of_size(info_size);
if (bitmap_base != NULL) *bitmap_base = base_size;
@ -1105,7 +1102,6 @@ static mi_bitmap_t* mi_arena_bitmap_init(size_t slice_count, uint8_t** base) {
static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
{
mi_assert(!is_large || (memid.initially_committed && memid.is_pinned));
mi_assert(_mi_is_aligned(start,MI_ARENA_SLICE_SIZE));
mi_assert(start!=NULL);
if (start==NULL) return false;
@ -1134,17 +1130,15 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s
// commit & zero if needed
if (!memid.initially_committed) {
// if MI_SECURE, leave a guard OS page decommitted at the end
_mi_os_commit(arena, mi_size_of_slices(info_slices) - MI_ARENA_GUARD_PAGE_SIZE, NULL);
// leave a guard OS page decommitted at the end
_mi_os_commit(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size(), NULL);
}
else if (!memid.is_pinned) {
#if MI_SECURE > 0
// if MI_SECURE, decommit a guard OS page at the end of the arena info
_mi_os_decommit((uint8_t*)arena + mi_size_of_slices(info_slices) - MI_ARENA_GUARD_PAGE_SIZE, MI_ARENA_GUARD_PAGE_SIZE);
#endif
else {
// if MI_SECURE, set a guard page at the end
_mi_os_secure_guard_page_set_before((uint8_t*)arena + mi_size_of_slices(info_slices), memid.is_pinned);
}
if (!memid.initially_zero) {
_mi_memzero(arena, mi_size_of_slices(info_slices) - MI_ARENA_GUARD_PAGE_SIZE);
_mi_memzero(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size());
}
// init

View file

@ -61,8 +61,16 @@ size_t _mi_os_large_page_size(void) {
return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size());
}
size_t _mi_os_guard_page_size(void) {
const size_t gsize = _mi_os_page_size();
mi_assert(gsize <= (MI_ARENA_SLICE_SIZE/8));
return gsize;
}
size_t _mi_os_virtual_address_bits(void) {
return mi_os_mem_config.virtual_address_bits;
const size_t vbits = mi_os_mem_config.virtual_address_bits;
mi_assert(vbits <= MI_MAX_VABITS);
return vbits;
}
bool _mi_os_use_large_page(size_t size, size_t alignment) {
@ -99,6 +107,50 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
return NULL;
}
// In secure mode, return the size of a guard page, otherwise 0
size_t _mi_os_secure_guard_page_size(void) {
#if MI_SECURE > 0
return _mi_os_guard_page_size();
#else
return 0;
#endif
}
// In secure mode, try to decommit an area and output a warning if this fails.
bool _mi_os_secure_guard_page_set_at(void* addr, bool is_pinned) {
if (addr == NULL) return true;
#if MI_SECURE > 0
const bool ok = (is_pinned ? false : _mi_os_decommit(addr, _mi_os_secure_guard_page_size()));
if (!ok) {
_mi_error_message(EINVAL, "secure level %d, but failed to commit guard page (at %p of size %zu)\n", MI_SECURE, addr, _mi_os_secure_guard_page_size());
}
return ok;
#else
MI_UNUSED(is_pinned);
return true;
#endif
}
// In secure mode, try to decommit an area and output a warning if this fails.
bool _mi_os_secure_guard_page_set_before(void* addr, bool is_pinned) {
return _mi_os_secure_guard_page_set_at((uint8_t*)addr - _mi_os_secure_guard_page_size(), is_pinned);
}
// In secure mode, try to recommit an area
bool _mi_os_secure_guard_page_reset_at(void* addr) {
if (addr == NULL) return true;
#if MI_SECURE > 0
return _mi_os_commit(addr, _mi_os_secure_guard_page_size(), NULL);
#else
return true;
#endif
}
// In secure mode, try to recommit an area
bool _mi_os_secure_guard_page_reset_before(void* addr) {
return _mi_os_secure_guard_page_reset_at((uint8_t*)addr - _mi_os_secure_guard_page_size());
}
/* -----------------------------------------------------------
Free memory