mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-03 22:19:32 +03:00
update guarded implementation to use block tags
This commit is contained in:
parent
0e76fe3798
commit
498c92e348
11 changed files with 161 additions and 108 deletions
|
@ -116,7 +116,7 @@
|
|||
<SDLCheck>true</SDLCheck>
|
||||
<ConformanceMode>Default</ConformanceMode>
|
||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>MI_DEBUG=1;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>MI_DEBUG=4;MI_DEBUG_GUARDED=1;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||
|
|
|
@ -368,6 +368,8 @@ typedef enum mi_option_e {
|
|||
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
|
||||
mi_option_debug_guarded_min, // only used when building with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
|
||||
mi_option_debug_guarded_max, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
|
||||
mi_option_debug_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
|
||||
mi_option_debug_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
|
|
@ -600,16 +600,25 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
|||
page->flags.x.has_aligned = has_aligned;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Guarded objects
|
||||
------------------------------------------------------------------- */
|
||||
#if MI_DEBUG_GUARDED
|
||||
static inline bool mi_page_has_guarded(const mi_page_t* page) {
|
||||
return page->flags.x.has_guarded;
|
||||
static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) {
|
||||
const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block;
|
||||
return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED);
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_guarded(mi_page_t* page, bool has_guarded) {
|
||||
page->flags.x.has_guarded = has_guarded;
|
||||
static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
|
||||
MI_UNUSED(heap);
|
||||
return (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max)
|
||||
&& size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min));
|
||||
}
|
||||
|
||||
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
#endif
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Encoding/Decoding the free list next pointers
|
||||
|
||||
|
|
|
@ -232,6 +232,13 @@ typedef struct mi_block_s {
|
|||
mi_encoded_t next;
|
||||
} mi_block_t;
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
// we always align guarded pointers in a block at an offset
|
||||
// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones
|
||||
#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0))
|
||||
#define MI_BLOCK_TAG_GUARDED (~MI_BLOCK_TAG_ALIGNED)
|
||||
#endif
|
||||
|
||||
|
||||
// The delayed flags are used for efficient multi-threaded free-ing
|
||||
typedef enum mi_delayed_e {
|
||||
|
|
|
@ -20,14 +20,24 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
|
|||
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
|
||||
if (alignment > size) return false;
|
||||
if (alignment <= MI_MAX_ALIGN_SIZE) return true;
|
||||
#if MI_DEBUG_GUARDED
|
||||
return false;
|
||||
#else
|
||||
const size_t bsize = mi_good_size(size);
|
||||
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
|
||||
#endif
|
||||
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
|
||||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept {
|
||||
// use over allocation for guarded blocksl
|
||||
mi_assert_internal(alignment > 0 && alignment < MI_BLOCK_ALIGNMENT_MAX);
|
||||
const size_t oversize = size + alignment - 1;
|
||||
void* base = _mi_heap_malloc_guarded(heap, oversize, zero);
|
||||
void* p = mi_align_up_ptr(base, alignment);
|
||||
mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size);
|
||||
mi_assert_internal(mi_usable_size(p) >= size);
|
||||
mi_assert_internal(_mi_is_aligned(p, alignment));
|
||||
return p;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Fallback aligned allocation that over-allocates -- split out for better codegen
|
||||
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
|
||||
{
|
||||
|
@ -68,6 +78,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
|
|||
void* aligned_p = (void*)((uintptr_t)p + adjust);
|
||||
if (aligned_p != p) {
|
||||
mi_page_set_has_aligned(page, true);
|
||||
#if MI_DEBUG_GUARDED
|
||||
// set tag to aligned so mi_usable_size works with guard pages
|
||||
if (adjust > sizeof(mi_block_t)) {
|
||||
mi_block_t* const block = (mi_block_t*)p;
|
||||
block->next = MI_BLOCK_TAG_ALIGNED;
|
||||
}
|
||||
#endif
|
||||
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
|
||||
}
|
||||
// todo: expand padding if overallocated ?
|
||||
|
@ -76,10 +93,8 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
|
|||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||
mi_assert_internal(mi_usable_size(aligned_p)>=size);
|
||||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
||||
#if !MI_DEBUG_GUARDED
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
|
||||
#endif
|
||||
|
||||
|
||||
// now zero the block if needed
|
||||
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
||||
|
@ -128,6 +143,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t*
|
|||
return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero);
|
||||
}
|
||||
|
||||
|
||||
// Primitive aligned allocation
|
||||
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
|
||||
{
|
||||
|
@ -138,8 +154,13 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (offset==0 && alignment < MI_BLOCK_ALIGNMENT_MAX && mi_heap_malloc_use_guarded(heap,size)) {
|
||||
return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !MI_DEBUG_GUARDED
|
||||
// try first if there happens to be a small block available with just the right alignment
|
||||
if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
|
||||
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||
|
@ -160,8 +181,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// fallback to generic aligned allocation
|
||||
return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero);
|
||||
}
|
||||
|
@ -313,3 +333,5 @@ mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t
|
|||
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
|
||||
return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
|
||||
}
|
||||
|
||||
|
||||
|
|
116
src/alloc.c
116
src/alloc.c
|
@ -31,22 +31,22 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
|
||||
|
||||
|
||||
// check the free list
|
||||
mi_block_t* const block = page->free;
|
||||
if mi_unlikely(block == NULL) {
|
||||
return _mi_malloc_generic(heap, size, zero, 0);
|
||||
}
|
||||
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
||||
|
||||
|
||||
// pop from the free list
|
||||
page->free = mi_block_next(page, block);
|
||||
page->used++;
|
||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||
mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
|
||||
|
||||
|
||||
#if MI_DEBUG>3
|
||||
if (page->free_is_zero && size > sizeof(*block)) {
|
||||
if (page->free_is_zero && size > sizeof(*block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
|
||||
}
|
||||
#endif
|
||||
|
@ -122,9 +122,7 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment);
|
||||
static inline bool mi_heap_malloc_small_use_guarded(size_t size);
|
||||
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
#endif
|
||||
|
||||
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||
|
@ -138,7 +136,9 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
if (size == 0) { size = sizeof(void*); }
|
||||
#endif
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (mi_heap_malloc_small_use_guarded(size)) { return mi_heap_malloc_guarded(heap, size, zero); }
|
||||
if (mi_heap_malloc_use_guarded(heap,size)) {
|
||||
return _mi_heap_malloc_guarded(heap, size, zero);
|
||||
}
|
||||
#endif
|
||||
|
||||
// get page in constant time, and allocate from it
|
||||
|
@ -171,13 +171,15 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
|
|||
|
||||
// The main allocation function
|
||||
extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
|
||||
// fast path for small objects
|
||||
// fast path for small objects
|
||||
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
|
||||
mi_assert_internal(huge_alignment == 0);
|
||||
return mi_heap_malloc_small_zero(heap, size, zero);
|
||||
}
|
||||
#if MI_DEBUG_GUARDED
|
||||
else if (mi_heap_malloc_use_guarded(size,huge_alignment>0)) { return mi_heap_malloc_guarded(heap, size, zero); }
|
||||
else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) {
|
||||
return _mi_heap_malloc_guarded(heap, size, zero);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
// regular allocation
|
||||
|
@ -185,7 +187,7 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
|
|||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
||||
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
|
@ -602,61 +604,65 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
|
|||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static inline bool mi_heap_malloc_small_use_guarded(size_t size) {
|
||||
return (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max)
|
||||
&& size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min));
|
||||
// We always allocate a guarded allocation at an offset (`mi_page_has_aligned` will be true).
|
||||
// We then set the first word of the block to `0` for regular offset aligned allocations (in `alloc-aligned.c`)
|
||||
// and the first word to `~0` for guarded allocations to have a correct `mi_usable_size`
|
||||
|
||||
static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
|
||||
// TODO: we can still make padding work by moving it out of the guard page area
|
||||
mi_page_t* const page = _mi_ptr_page(block);
|
||||
mi_page_set_has_aligned(page, true);
|
||||
block->next = MI_BLOCK_TAG_GUARDED;
|
||||
|
||||
// set guard page at the end of the block
|
||||
mi_segment_t* const segment = _mi_page_segment(page);
|
||||
const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
|
||||
const size_t os_page_size = _mi_os_page_size();
|
||||
mi_assert_internal(block_size >= obj_size + os_page_size + sizeof(mi_block_t));
|
||||
if (block_size < obj_size + os_page_size + sizeof(mi_block_t)) {
|
||||
// should never happen
|
||||
mi_free(block);
|
||||
return NULL;
|
||||
}
|
||||
uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size;
|
||||
mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
|
||||
if (segment->allow_decommit && _mi_is_aligned(guard_page, os_page_size)) {
|
||||
_mi_os_protect(guard_page, os_page_size);
|
||||
}
|
||||
else {
|
||||
_mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", block, block_size);
|
||||
}
|
||||
|
||||
// align pointer just in front of the guard page
|
||||
size_t offset = block_size - os_page_size - obj_size;
|
||||
mi_assert_internal(offset > sizeof(mi_block_t));
|
||||
if (offset > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
// give up to place it right in front of the guard page if the offset is too large for unalignment
|
||||
offset = MI_BLOCK_ALIGNMENT_MAX;
|
||||
}
|
||||
void* p = (uint8_t*)block + offset;
|
||||
mi_track_align(block, p, offset, obj_size);
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment) {
|
||||
return (!has_huge_alignment // guarded pages do not work with huge aligments at the moment
|
||||
&& _mi_option_get_fast(mi_option_debug_guarded_max) > 0 // guarded must be enabled
|
||||
&& (mi_heap_malloc_small_use_guarded(size)
|
||||
|| ((mi_good_size(size) & (_mi_os_page_size() - 1)) == 0)) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
|
||||
);
|
||||
}
|
||||
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
|
||||
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
#if defined(MI_PADDING_SIZE)
|
||||
mi_assert(MI_PADDING_SIZE==0);
|
||||
#endif
|
||||
// allocate multiple of page size ending in a guard page
|
||||
const size_t obj_size = _mi_align_up(size, MI_MAX_ALIGN_SIZE); // ensure minimal alignment requirement
|
||||
// ensure minimal alignment requirement?
|
||||
const size_t os_page_size = _mi_os_page_size();
|
||||
const size_t req_size = _mi_align_up(obj_size + os_page_size, os_page_size);
|
||||
void* const block = _mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
|
||||
const size_t obj_size = (mi_option_is_enabled(mi_option_debug_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE));
|
||||
const size_t bsize = _mi_align_up(_mi_align_up(obj_size, MI_MAX_ALIGN_SIZE) + sizeof(mi_block_t), MI_MAX_ALIGN_SIZE);
|
||||
const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
|
||||
mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
|
||||
if (block==NULL) return NULL;
|
||||
mi_page_t* page = _mi_ptr_page(block);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
|
||||
const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
|
||||
void* const guard_page = (uint8_t*)block + (block_size - os_page_size);
|
||||
mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
|
||||
|
||||
// place block in front of the guard page
|
||||
size_t offset = block_size - os_page_size - obj_size;
|
||||
if (offset > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
// give up to place it right in front of the guard page if the offset is too large for unalignment
|
||||
offset = MI_BLOCK_ALIGNMENT_MAX;
|
||||
}
|
||||
void* const p = (uint8_t*)block + offset;
|
||||
mi_assert_internal(p>=block);
|
||||
|
||||
// set page flags
|
||||
if (offset > 0) {
|
||||
mi_page_set_has_aligned(page, true);
|
||||
}
|
||||
|
||||
// set guard page
|
||||
if (segment->allow_decommit) {
|
||||
mi_page_set_has_guarded(page, true);
|
||||
_mi_os_protect(guard_page, os_page_size);
|
||||
}
|
||||
else {
|
||||
_mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", p, size);
|
||||
}
|
||||
void* const p = mi_block_ptr_set_guarded(block, obj_size);
|
||||
|
||||
// stats
|
||||
const size_t usize = mi_usable_size(p);
|
||||
mi_assert_internal(usize >= size);
|
||||
mi_track_malloc(p, size, zero);
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
|
|
63
src/free.c
63
src/free.c
|
@ -70,20 +70,29 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
|
|||
}
|
||||
|
||||
// forward declaration for a MI_DEBUG_GUARDED build
|
||||
static void mi_block_unguard(mi_page_t* page, mi_block_t* block);
|
||||
#if MI_DEBUG_GUARDED
|
||||
static void mi_block_unguard_prim(mi_page_t* page, mi_block_t* block, void* p); // forward declaration
|
||||
static inline void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
|
||||
if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard_prim(page, block, p); }
|
||||
}
|
||||
#else
|
||||
static inline void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
|
||||
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
// free a local pointer (page parameter comes first for better codegen)
|
||||
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
|
||||
MI_UNUSED(segment);
|
||||
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
|
||||
mi_block_unguard(page,block);
|
||||
mi_block_unguard(page, block, p);
|
||||
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
|
||||
}
|
||||
|
||||
// free a pointer owned by another thread (page parameter comes first for better codegen)
|
||||
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
|
||||
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
|
||||
mi_block_unguard(page, block);
|
||||
mi_block_unguard(page, block, p);
|
||||
mi_free_block_mt(page, segment, block);
|
||||
}
|
||||
|
||||
|
@ -297,20 +306,19 @@ static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* p
|
|||
const size_t size = mi_page_usable_size_of(page, block);
|
||||
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
|
||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||
return (size - adjust);
|
||||
const size_t aligned_size = (size - adjust);
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (mi_block_ptr_is_guarded(block, p)) {
|
||||
return aligned_size - _mi_os_page_size();
|
||||
}
|
||||
#endif
|
||||
return aligned_size;
|
||||
}
|
||||
|
||||
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
|
||||
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
|
||||
if mi_unlikely(segment==NULL) return 0;
|
||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (mi_page_has_guarded(page)) {
|
||||
const size_t bsize = mi_page_usable_aligned_size_of(page, p);
|
||||
mi_assert_internal(bsize > _mi_os_page_size());
|
||||
return (bsize > _mi_os_page_size() ? bsize - _mi_os_page_size() : bsize);
|
||||
} else
|
||||
#endif
|
||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
if mi_likely(!mi_page_has_aligned(page)) {
|
||||
const mi_block_t* block = (const mi_block_t*)p;
|
||||
return mi_page_usable_size_of(page, block);
|
||||
|
@ -534,22 +542,19 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|||
|
||||
|
||||
// Remove guard page when building with MI_DEBUG_GUARDED
|
||||
#if !MI_DEBUG_GUARDED
|
||||
static void mi_block_unguard(mi_page_t* page, mi_block_t* block) {
|
||||
MI_UNUSED(page);
|
||||
MI_UNUSED(block);
|
||||
// do nothing
|
||||
}
|
||||
#else
|
||||
static void mi_block_unguard(mi_page_t* page, mi_block_t* block) {
|
||||
if (mi_page_has_guarded(page)) {
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t psize = _mi_os_page_size();
|
||||
mi_assert_internal(bsize > psize);
|
||||
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
|
||||
void* gpage = (uint8_t*)block + (bsize - psize);
|
||||
mi_assert_internal(_mi_is_aligned(gpage, psize));
|
||||
_mi_os_unprotect(gpage, psize);
|
||||
}
|
||||
#if MI_DEBUG_GUARDED
|
||||
static void mi_block_unguard_prim(mi_page_t* page, mi_block_t* block, void* p) {
|
||||
mi_assert_internal(mi_block_ptr_is_guarded(block, p));
|
||||
mi_assert_internal(mi_page_has_aligned(page));
|
||||
mi_assert_internal((uint8_t*)p - (uint8_t*)block >= sizeof(mi_block_t));
|
||||
mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED);
|
||||
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t psize = _mi_os_page_size();
|
||||
mi_assert_internal(bsize > psize);
|
||||
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
|
||||
void* gpage = (uint8_t*)block + bsize - psize;
|
||||
mi_assert_internal(_mi_is_aligned(gpage, psize));
|
||||
_mi_os_unprotect(gpage, psize);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -370,7 +370,7 @@ void mi_heap_destroy(mi_heap_t* heap) {
|
|||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
#if MI_DEBUG_GUARDED
|
||||
_mi_warning_message("'mi_heap_destroy' called but ignored as MI_DEBUG_GUARDED is enabled (heap at %p)\n", heap);
|
||||
// _mi_warning_message("'mi_heap_destroy' called but MI_DEBUG_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap);
|
||||
mi_heap_delete(heap);
|
||||
return;
|
||||
#else
|
||||
|
|
|
@ -143,7 +143,13 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ 0, UNINIT, MI_OPTION(visit_abandoned) },
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_min) }, // only used when building with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_max) }, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects
|
||||
{ MI_GiB, UNINIT, MI_OPTION(debug_guarded_max) }, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
|
||||
#if MI_DEBUG_GUARDED
|
||||
{ 1000,UNINIT, MI_OPTION(debug_guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded(= 1000)
|
||||
#else
|
||||
{ 0, UNINIT, MI_OPTION(debug_guarded_sample_rate)},
|
||||
#endif
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
|
@ -414,9 +414,6 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|||
|
||||
// no more aligned blocks in here
|
||||
mi_page_set_has_aligned(page, false);
|
||||
#if MI_DEBUG_GUARDED
|
||||
mi_page_set_has_guarded(page, false);
|
||||
#endif
|
||||
|
||||
// remove from the page list
|
||||
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
|
||||
|
@ -443,9 +440,6 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|||
mi_assert_internal(mi_page_all_free(page));
|
||||
|
||||
mi_page_set_has_aligned(page, false);
|
||||
#if MI_DEBUG_GUARDED
|
||||
mi_page_set_has_guarded(page, false);
|
||||
#endif
|
||||
|
||||
// don't retire too often..
|
||||
// (or we end up retiring and re-allocating most of the time)
|
||||
|
|
|
@ -22,6 +22,8 @@ terms of the MIT license.
|
|||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
#define MI_DEBUG_GUARDED
|
||||
|
||||
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
||||
//
|
||||
// argument defaults
|
||||
|
|
Loading…
Add table
Reference in a new issue