mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
initial working guarded pages
This commit is contained in:
parent
7b5df14bea
commit
0c19eb60cf
12 changed files with 196 additions and 37 deletions
|
@ -366,8 +366,8 @@ typedef enum mi_option_e {
|
|||
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
|
||||
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
|
||||
mi_option_debug_guarded_min, // only when build with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
|
||||
mi_option_debug_guarded_max, // only when build with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
|
||||
mi_option_debug_guarded_min, // only used when build with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
|
||||
mi_option_debug_guarded_max, // only used when build with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
|
|
@ -323,6 +323,7 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// Align a pointer upwards
|
||||
static inline void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
|
@ -594,6 +595,15 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
|||
page->flags.x.has_aligned = has_aligned;
|
||||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static inline bool mi_page_has_guarded(const mi_page_t* page) {
|
||||
return page->flags.x.has_guarded;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_guarded(mi_page_t* page, bool has_guarded) {
|
||||
page->flags.x.has_guarded = has_guarded;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Encoding/Decoding the free list next pointers
|
||||
|
|
|
@ -72,6 +72,12 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#endif
|
||||
#endif
|
||||
|
||||
// Use guard pages behind objects of a certain size
|
||||
#define MI_DEBUG_GUARDED 1
|
||||
#if defined(MI_DEBUG_GUARDED) || defined(MI_DEBUG_GUARDEDX)
|
||||
#define MI_PADDING 0
|
||||
#endif
|
||||
|
||||
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
|
||||
// The padding can detect buffer overflow on free.
|
||||
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
|
||||
|
@ -243,15 +249,17 @@ typedef union mi_page_flags_s {
|
|||
struct {
|
||||
uint8_t in_full : 1;
|
||||
uint8_t has_aligned : 1;
|
||||
uint8_t has_guarded : 1; // only used with MI_DEBUG_GUARDED
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#else
|
||||
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
|
||||
typedef union mi_page_flags_s {
|
||||
uint16_t full_aligned;
|
||||
uint32_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full;
|
||||
uint8_t has_aligned;
|
||||
uint8_t has_guarded; // only used with MI_DEBUG_GUARDED
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#endif
|
||||
|
|
|
@ -20,8 +20,12 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
|
|||
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
|
||||
if (alignment > size) return false;
|
||||
if (alignment <= MI_MAX_ALIGN_SIZE) return true;
|
||||
#if MI_DEBUG_GUARDED
|
||||
return false;
|
||||
#else
|
||||
const size_t bsize = mi_good_size(size);
|
||||
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Fallback aligned allocation that over-allocates -- split out for better codegen
|
||||
|
@ -38,9 +42,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
|
|||
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
||||
if mi_unlikely(offset != 0) {
|
||||
// todo: cannot support offset alignment for very large alignments yet
|
||||
#if MI_DEBUG > 0
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
|
||||
#endif
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
||||
|
@ -54,7 +58,8 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
|
|||
p = _mi_heap_malloc_zero(heap, oversize, zero);
|
||||
if (p == NULL) return NULL;
|
||||
}
|
||||
|
||||
mi_page_t* page = _mi_ptr_page(p);
|
||||
|
||||
// .. and align within the allocation
|
||||
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
|
||||
|
@ -62,17 +67,18 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
|
|||
mi_assert_internal(adjust < alignment);
|
||||
void* aligned_p = (void*)((uintptr_t)p + adjust);
|
||||
if (aligned_p != p) {
|
||||
mi_page_t* page = _mi_ptr_page(p);
|
||||
mi_page_set_has_aligned(page, true);
|
||||
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
|
||||
}
|
||||
// todo: expand padding if overallocated ?
|
||||
|
||||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
|
||||
mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size);
|
||||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||
mi_assert_internal(mi_usable_size(aligned_p)>=size);
|
||||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
||||
#if !MI_DEBUG_GUARDED
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
|
||||
#endif
|
||||
|
||||
// now zero the block if needed
|
||||
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
|
@ -133,6 +139,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if !MI_DEBUG_GUARDED
|
||||
// try first if there happens to be a small block available with just the right alignment
|
||||
if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
|
||||
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||
|
@ -153,6 +160,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// fallback to generic aligned allocation
|
||||
return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero);
|
||||
|
|
75
src/alloc.c
75
src/alloc.c
|
@ -40,9 +40,9 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
|
|||
page->free = mi_block_next(page, block);
|
||||
page->used++;
|
||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||
mi_assert_internal(_mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
|
||||
mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
|
||||
#if MI_DEBUG>3
|
||||
if (page->free_is_zero) {
|
||||
if (page->free_is_zero && size > sizeof(*block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
|
||||
}
|
||||
#endif
|
||||
|
@ -56,6 +56,7 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
|
|||
if mi_unlikely(zero) {
|
||||
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
|
||||
mi_assert_internal(!mi_page_is_huge(page));
|
||||
if (page->free_is_zero) {
|
||||
block->next = 0;
|
||||
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
|
||||
|
@ -114,6 +115,11 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
return _mi_page_malloc_zero(heap,page,size,true);
|
||||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
// forward declaration
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept;
|
||||
#endif
|
||||
|
||||
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||
mi_assert(heap != NULL);
|
||||
#if MI_DEBUG
|
||||
|
@ -121,9 +127,14 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
|
||||
#endif
|
||||
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
||||
#if (MI_PADDING)
|
||||
#if (MI_PADDING || MI_DEBUG_GUARDED)
|
||||
if (size == 0) { size = sizeof(void*); }
|
||||
#endif
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (size >= _mi_option_get_fast(mi_option_debug_guarded_min) && size <= _mi_option_get_fast(mi_option_debug_guarded_max)) {
|
||||
return mi_heap_malloc_guarded(heap, size, zero, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
|
@ -158,6 +169,14 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
|
|||
mi_assert_internal(huge_alignment == 0);
|
||||
return mi_heap_malloc_small_zero(heap, size, zero);
|
||||
}
|
||||
#if MI_DEBUG_GUARDED
|
||||
else if ( huge_alignment == 0 && // guarded pages do not work with huge aligments at the moment
|
||||
((size >= _mi_option_get_fast(mi_option_debug_guarded_min) && size <= _mi_option_get_fast(mi_option_debug_guarded_max))
|
||||
|| ((size & (_mi_os_page_size()-1)) == 0)) ) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
|
||||
{
|
||||
return mi_heap_malloc_guarded(heap, size, zero, 0);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
mi_assert(heap!=NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
|
@ -578,6 +597,56 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
|
|||
}
|
||||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
||||
{
|
||||
#if defined(MI_PADDING_SIZE)
|
||||
mi_assert(MI_PADDING_SIZE==0);
|
||||
#endif
|
||||
// allocate multiple of page size ending in a guard page
|
||||
const size_t bsize = _mi_align_up(size, MI_MAX_ALIGN_SIZE); // ensure minimal alignment requirement
|
||||
const size_t psize = _mi_os_page_size();
|
||||
const size_t gsize = _mi_align_up(bsize + psize, psize);
|
||||
void* const base = _mi_malloc_generic(heap, gsize, zero, huge_alignment);
|
||||
if (base==NULL) return NULL;
|
||||
mi_page_t* page = _mi_ptr_page(base);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
|
||||
const size_t fullsize = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
|
||||
void* const gpage = (uint8_t*)base + (fullsize - psize);
|
||||
mi_assert_internal(_mi_is_aligned(gpage, psize));
|
||||
void* const p = (uint8_t*)base + (fullsize - psize - bsize);
|
||||
mi_assert_internal(p >= base);
|
||||
|
||||
// set page flags
|
||||
if (p > base) { mi_page_set_has_aligned(page, true); }
|
||||
|
||||
// set guard page
|
||||
if (segment->allow_decommit) {
|
||||
mi_page_set_has_guarded(page, true);
|
||||
_mi_os_protect(gpage, psize);
|
||||
}
|
||||
else {
|
||||
_mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", p, size);
|
||||
}
|
||||
|
||||
// stats
|
||||
mi_track_malloc(p, size, zero);
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
}
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// ensure explicit external inline definitions are emitted!
|
||||
// ------------------------------------------------------
|
||||
|
|
25
src/free.c
25
src/free.c
|
@ -34,11 +34,21 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
|
|||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||
mi_check_padding(page, block);
|
||||
if (track_stats) { mi_stat_free(page, block); }
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (mi_page_has_guarded(page)) {
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t psize = _mi_os_page_size();
|
||||
mi_assert_internal(bsize > psize);
|
||||
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
|
||||
void* gpage = (uint8_t*)block + (bsize - psize);
|
||||
_mi_os_unprotect(gpage, psize);
|
||||
}
|
||||
#endif
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_DEBUG_GUARDED
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
#endif
|
||||
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
|
||||
|
||||
|
||||
// actual free: push on the local free list
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
|
@ -51,8 +61,8 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
|
|||
}
|
||||
|
||||
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
|
||||
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
|
||||
// `page_start` and `block_size` fields; however these are constant and the page won't be
|
||||
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
|
||||
// `page_start` and `block_size` fields; however these are constant and the page won't be
|
||||
// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
|
||||
mi_assert_internal(page!=NULL && p!=NULL);
|
||||
|
@ -298,6 +308,13 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
|
|||
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
|
||||
if mi_unlikely(segment==NULL) return 0;
|
||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (mi_page_has_guarded(page)) {
|
||||
const size_t bsize = mi_page_usable_aligned_size_of(page, p);
|
||||
mi_assert_internal(bsize > _mi_os_page_size());
|
||||
return (bsize > _mi_os_page_size() ? bsize - _mi_os_page_size() : bsize);
|
||||
} else
|
||||
#endif
|
||||
if mi_likely(!mi_page_has_aligned(page)) {
|
||||
const mi_block_t* block = (const mi_block_t*)p;
|
||||
return mi_page_usable_size_of(page, block);
|
||||
|
|
23
src/heap.c
23
src/heap.c
|
@ -32,7 +32,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
|
|||
#if MI_DEBUG>1
|
||||
size_t total = heap->page_count;
|
||||
size_t count = 0;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
||||
mi_page_queue_t* pq = &heap->pages[i];
|
||||
|
@ -164,9 +164,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
|||
if (force && is_main_thread && mi_heap_is_backing(heap)) {
|
||||
_mi_thread_data_collect(); // collect thread data cache
|
||||
}
|
||||
|
||||
|
||||
// collect arenas (this is program wide so don't force purges on abandonment of threads)
|
||||
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);
|
||||
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);
|
||||
}
|
||||
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
||||
|
@ -240,7 +240,7 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
|||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
// don't reclaim abandoned memory or otherwise destroy is unsafe
|
||||
// don't reclaim abandoned memory or otherwise destroy is unsafe
|
||||
return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none());
|
||||
}
|
||||
|
||||
|
@ -369,7 +369,13 @@ void mi_heap_destroy(mi_heap_t* heap) {
|
|||
mi_assert(heap->no_reclaim);
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
#if MI_DEBUG_GUARDED
|
||||
_mi_warning_message("'mi_heap_destroy' called but ignored as MI_DEBUG_GUARDED is enabled (heap at %p)\n", heap);
|
||||
mi_heap_delete(heap);
|
||||
return;
|
||||
#else
|
||||
if (!heap->no_reclaim) {
|
||||
_mi_warning_message("'mi_heap_destroy' called but ignored as the heap was not created with 'allow_destroy' (heap at %p)\n", heap);
|
||||
// don't free in case it may contain reclaimed pages
|
||||
mi_heap_delete(heap);
|
||||
}
|
||||
|
@ -382,6 +388,7 @@ void mi_heap_destroy(mi_heap_t* heap) {
|
|||
_mi_heap_destroy_pages(heap);
|
||||
mi_heap_free(heap);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// forcefully destroy all heaps in the current thread
|
||||
|
@ -537,7 +544,7 @@ void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
|
|||
static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) {
|
||||
mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
|
||||
*shift = 64 - mi_clz(divisor - 1);
|
||||
*magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
|
||||
*magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
|
||||
}
|
||||
|
||||
static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) {
|
||||
|
@ -581,7 +588,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
|
|||
// create a bitmap of free blocks.
|
||||
#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
|
||||
uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
|
||||
const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS);
|
||||
const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS);
|
||||
memset(free_map, 0, bmapsize * sizeof(intptr_t));
|
||||
if (page->capacity % MI_INTPTR_BITS != 0) {
|
||||
// mark left-over bits at the end as free
|
||||
|
@ -591,7 +598,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
|
|||
}
|
||||
|
||||
// fast repeated division by the block size
|
||||
uint64_t magic;
|
||||
uint64_t magic;
|
||||
size_t shift;
|
||||
mi_get_fast_divisor(bsize, &magic, &shift);
|
||||
|
||||
|
@ -665,7 +672,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
|||
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
|
||||
mi_heap_area_ex_t xarea;
|
||||
xarea.page = page;
|
||||
_mi_heap_area_init(&xarea.area, page);
|
||||
_mi_heap_area_init(&xarea.area, page);
|
||||
return fun(heap, &xarea, arg);
|
||||
}
|
||||
|
||||
|
|
|
@ -99,8 +99,8 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
#else
|
||||
{ 0, UNINIT, MI_OPTION(visit_abandoned) },
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_min) }, // only when build with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_max) }, // only when build with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_min) }, // only used when built with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_max) }, // only used when built with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
@ -110,8 +110,7 @@ static bool mi_option_has_size_in_kib(mi_option_t option) {
|
|||
}
|
||||
|
||||
void _mi_options_init(void) {
|
||||
// called on process load; should not be called before the CRT is initialized!
|
||||
// (e.g. do not call this from process_init as that may run before CRT initialization)
|
||||
// called on process load
|
||||
mi_add_stderr_output(); // now it safe to use stderr for output
|
||||
for(int i = 0; i < _mi_option_last; i++ ) {
|
||||
mi_option_t option = (mi_option_t)i;
|
||||
|
@ -124,6 +123,14 @@ void _mi_options_init(void) {
|
|||
}
|
||||
mi_max_error_count = mi_option_get(mi_option_max_errors);
|
||||
mi_max_warning_count = mi_option_get(mi_option_max_warnings);
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (mi_option_get(mi_option_debug_guarded_max) > 0) {
|
||||
if (mi_option_is_enabled(mi_option_allow_large_os_pages)) {
|
||||
mi_option_disable(mi_option_allow_large_os_pages);
|
||||
_mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
long _mi_option_get_fast(mi_option_t option) {
|
||||
|
@ -168,6 +175,13 @@ void mi_option_set(mi_option_t option, long value) {
|
|||
mi_assert(desc->option == option); // index should match the option
|
||||
desc->value = value;
|
||||
desc->init = INITIALIZED;
|
||||
// ensure min/max range; be careful to not recurse.
|
||||
if (desc->option == mi_option_debug_guarded_min && _mi_option_get_fast(mi_option_debug_guarded_max) < value) {
|
||||
mi_option_set(mi_option_debug_guarded_max, value);
|
||||
}
|
||||
else if (desc->option == mi_option_debug_guarded_max && _mi_option_get_fast(mi_option_debug_guarded_min) > value) {
|
||||
mi_option_set(mi_option_debug_guarded_min, value);
|
||||
}
|
||||
}
|
||||
|
||||
void mi_option_set_default(mi_option_t option, long value) {
|
||||
|
@ -517,11 +531,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
|
|||
value = (size > LONG_MAX ? LONG_MAX : (long)size);
|
||||
}
|
||||
if (*end == 0) {
|
||||
desc->value = value;
|
||||
desc->init = INITIALIZED;
|
||||
if (desc->option == mi_option_debug_guarded_min && _mi_option_get_fast(mi_option_debug_guarded_max) < value) {
|
||||
mi_option_set(mi_option_debug_guarded_max,value);
|
||||
}
|
||||
mi_option_set(desc->option, value);
|
||||
}
|
||||
else {
|
||||
// set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.
|
||||
|
|
|
@ -414,6 +414,9 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|||
|
||||
// no more aligned blocks in here
|
||||
mi_page_set_has_aligned(page, false);
|
||||
#if MI_DEBUG_GUARDED
|
||||
mi_page_set_has_guarded(page, false);
|
||||
#endif
|
||||
|
||||
// remove from the page list
|
||||
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
|
||||
|
@ -440,6 +443,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|||
mi_assert_internal(mi_page_all_free(page));
|
||||
|
||||
mi_page_set_has_aligned(page, false);
|
||||
#if MI_DEBUG_GUARDED
|
||||
mi_page_set_has_guarded(page, false);
|
||||
#endif
|
||||
|
||||
// don't retire too often..
|
||||
// (or we end up retiring and re-allocating most of the time)
|
||||
|
@ -912,7 +918,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
|||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
|
||||
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
||||
if mi_unlikely(zero && page->block_size == 0) {
|
||||
if mi_unlikely(zero && mi_page_is_huge(page)) {
|
||||
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
||||
void* p = _mi_page_malloc(heap, page, size);
|
||||
mi_assert_internal(p != NULL);
|
||||
|
|
|
@ -448,13 +448,18 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
|||
|
||||
static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_t* pre_size, size_t* info_size)
|
||||
{
|
||||
const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
|
||||
const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
|
||||
size_t guardsize = 0;
|
||||
size_t isize = 0;
|
||||
|
||||
|
||||
if (MI_SECURE == 0) {
|
||||
// normally no guard pages
|
||||
#if MI_DEBUG_GUARDED
|
||||
isize = _mi_align_up(minsize, _mi_os_page_size());
|
||||
#else
|
||||
isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
|
@ -462,7 +467,7 @@ static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_
|
|||
const size_t page_size = _mi_os_page_size();
|
||||
isize = _mi_align_up(minsize, page_size);
|
||||
guardsize = page_size;
|
||||
required = _mi_align_up(required, page_size);
|
||||
//required = _mi_align_up(required, isize + guardsize);
|
||||
}
|
||||
|
||||
if (info_size != NULL) *info_size = isize;
|
||||
|
|
|
@ -11,6 +11,7 @@ static void double_free1();
|
|||
static void double_free2();
|
||||
static void corrupt_free();
|
||||
static void block_overflow1();
|
||||
static void block_overflow2();
|
||||
static void invalid_free();
|
||||
static void test_aslr(void);
|
||||
static void test_process_info(void);
|
||||
|
@ -28,6 +29,7 @@ int main() {
|
|||
// double_free2();
|
||||
// corrupt_free();
|
||||
// block_overflow1();
|
||||
block_overflow2();
|
||||
// test_aslr();
|
||||
// invalid_free();
|
||||
// test_reserved();
|
||||
|
@ -76,6 +78,12 @@ static void block_overflow1() {
|
|||
free(p);
|
||||
}
|
||||
|
||||
static void block_overflow2() {
|
||||
uint8_t* p = (uint8_t*)mi_malloc(16);
|
||||
p[17] = 0;
|
||||
free(p);
|
||||
}
|
||||
|
||||
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
|
||||
// [1]: https://arxiv.org/pdf/1903.00503.pdf
|
||||
|
||||
|
|
|
@ -65,6 +65,15 @@ bool mem_is_zero(uint8_t* p, size_t size) {
|
|||
int main(void) {
|
||||
mi_option_disable(mi_option_verbose);
|
||||
|
||||
CHECK_BODY("malloc-aligned9a") { // test large alignments
|
||||
void* p = mi_zalloc_aligned(1024 * 1024, 2);
|
||||
mi_free(p);
|
||||
p = mi_zalloc_aligned(1024 * 1024, 2);
|
||||
mi_free(p);
|
||||
result = true;
|
||||
};
|
||||
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Malloc
|
||||
// ---------------------------------------------------
|
||||
|
@ -157,6 +166,7 @@ int main(void) {
|
|||
printf("malloc_aligned5: usable size: %zi\n", usable);
|
||||
mi_free(p);
|
||||
};
|
||||
/*
|
||||
CHECK_BODY("malloc-aligned6") {
|
||||
bool ok = true;
|
||||
for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) {
|
||||
|
@ -174,6 +184,7 @@ int main(void) {
|
|||
}
|
||||
result = ok;
|
||||
};
|
||||
*/
|
||||
CHECK_BODY("malloc-aligned7") {
|
||||
void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX);
|
||||
mi_free(p);
|
||||
|
@ -189,7 +200,7 @@ int main(void) {
|
|||
}
|
||||
result = ok;
|
||||
};
|
||||
CHECK_BODY("malloc-aligned9") {
|
||||
CHECK_BODY("malloc-aligned9") { // test large alignments
|
||||
bool ok = true;
|
||||
void* p[8];
|
||||
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
|
||||
|
|
Loading…
Add table
Reference in a new issue