diff --git a/CMakeLists.txt b/CMakeLists.txt
index 87837026..5fc1808e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -26,7 +26,7 @@ option(MI_BUILD_OBJECT "Build object library" ON)
option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
-option(MI_DEBUG_GUARDED "Build with guard pages behind certain object allocations (implies MI_NO_PADDING=ON)" OFF)
+option(MI_GUARDED "Build with guard pages behind certain object allocations (implies MI_NO_PADDING=ON)" OFF)
option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF)
option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF)
option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF)
@@ -207,9 +207,9 @@ if(MI_TRACK_ETW)
endif()
endif()
-if(MI_DEBUG_GUARDED)
- message(STATUS "Compile guard pages behind certain object allocations (MI_DEBUG_GUARDED=ON)")
- list(APPEND mi_defines MI_DEBUG_GUARDED=1)
+if(MI_GUARDED)
+ message(STATUS "Compile guard pages behind certain object allocations (MI_GUARDED=ON)")
+ list(APPEND mi_defines MI_GUARDED=1)
if(NOT MI_NO_PADDING)
message(STATUS " Disabling padding due to guard pages (MI_NO_PADDING=ON)")
set(MI_NO_PADDING ON)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 4455dfeb..e4361f98 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -117,8 +117,8 @@ jobs:
CC: clang
CXX: clang
BuildType: debug-guarded-clang
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=RelWithDebInfo -DMI_DEBUG_FULL=ON -DMI_DEBUG_GUARDED=ON
-
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=RelWithDebInfo -DMI_DEBUG_FULL=ON -DMI_GUARDED=ON
+
steps:
- task: CMake@1
inputs:
@@ -129,7 +129,7 @@ jobs:
- script: ctest --verbose --timeout 180
workingDirectory: $(BuildType)
displayName: CTest
- env:
+ env:
MIMALLOC_DEBUG_GUARDED_MAX: 1024
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-ubuntu-$(BuildType)
diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
index 5a614289..160f1436 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -116,7 +116,7 @@
true
Default
../../include
- MI_DEBUG=4;MI_DEBUG_GUARDED=1;%(PreprocessorDefinitions);
+ MI_DEBUG=4;MI_GUARDED=1;%(PreprocessorDefinitions);
CompileAsCpp
false
stdcpp20
diff --git a/include/mimalloc.h b/include/mimalloc.h
index 940284b6..4ecb8be0 100644
--- a/include/mimalloc.h
+++ b/include/mimalloc.h
@@ -290,7 +290,7 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t a
#endif
-// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
+// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
// Used for example for separate interpreter's in one process.
typedef void* mi_subproc_id_t;
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
@@ -349,7 +349,7 @@ typedef enum mi_option_e {
mi_option_deprecated_segment_cache,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
- mi_option_deprecated_segment_reset,
+ mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10)
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
@@ -366,10 +366,10 @@ typedef enum mi_option_e {
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
- mi_option_debug_guarded_min, // only used when building with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
- mi_option_debug_guarded_max, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
- mi_option_debug_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
- mi_option_debug_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
+ mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0)
+ mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0)
+ mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
+ mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,
@@ -539,7 +539,7 @@ template struct _mi_heap_stl_allocator_common : publi
protected:
std::shared_ptr heap;
template friend struct _mi_heap_stl_allocator_common;
-
+
_mi_heap_stl_allocator_common() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
@@ -556,7 +556,7 @@ private:
template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common {
using typename _mi_heap_stl_allocator_common::size_type;
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called
- mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
@@ -573,7 +573,7 @@ template bool operator!=(const mi_heap_stl_allocator& x,
template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common {
using typename _mi_heap_stl_allocator_common::size_type;
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called
- mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h
index d73532e0..ae3a3358 100644
--- a/include/mimalloc/internal.h
+++ b/include/mimalloc/internal.h
@@ -603,7 +603,7 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
/* -------------------------------------------------------------------
Guarded objects
------------------------------------------------------------------- */
-#if MI_DEBUG_GUARDED
+#if MI_GUARDED
static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) {
const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block;
return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED);
@@ -611,8 +611,8 @@ static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void*
static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
MI_UNUSED(heap);
- return (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max)
- && size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min));
+ return (size <= (size_t)_mi_option_get_fast(mi_option_guarded_max)
+ && size >= (size_t)_mi_option_get_fast(mi_option_guarded_min));
}
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h
index e01754e2..29ba8564 100644
--- a/include/mimalloc/types.h
+++ b/include/mimalloc/types.h
@@ -74,8 +74,8 @@ terms of the MIT license. A copy of the license can be found in the file
// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
// Padding should be disabled when using guard pages
-// #define MI_DEBUG_GUARDED 1
-#if defined(MI_DEBUG_GUARDED)
+// #define MI_GUARDED 1
+#if defined(MI_GUARDED)
#define MI_PADDING 0
#endif
@@ -232,7 +232,7 @@ typedef struct mi_block_s {
mi_encoded_t next;
} mi_block_t;
-#if MI_DEBUG_GUARDED
+#if MI_GUARDED
// we always align guarded pointers in a block at an offset
// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones
#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0))
@@ -257,7 +257,6 @@ typedef union mi_page_flags_s {
struct {
uint8_t in_full : 1;
uint8_t has_aligned : 1;
- uint8_t has_guarded : 1; // only used with MI_DEBUG_GUARDED
} x;
} mi_page_flags_t;
#else
@@ -267,7 +266,6 @@ typedef union mi_page_flags_s {
struct {
uint8_t in_full;
uint8_t has_aligned;
- uint8_t has_guarded; // only used with MI_DEBUG_GUARDED
} x;
} mi_page_flags_t;
#endif
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index 12815689..86b13dea 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -21,10 +21,10 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
if (alignment > size) return false;
if (alignment <= MI_MAX_ALIGN_SIZE) return true;
const size_t bsize = mi_good_size(size);
- return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
+ return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
}
-#if MI_DEBUG_GUARDED
+#if MI_GUARDED
static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept {
// use over allocation for guarded blocksl
mi_assert_internal(alignment > 0 && alignment < MI_BLOCK_ALIGNMENT_MAX);
@@ -69,7 +69,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
if (p == NULL) return NULL;
}
mi_page_t* page = _mi_ptr_page(p);
-
+
// .. and align within the allocation
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
@@ -78,7 +78,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
void* aligned_p = (void*)((uintptr_t)p + adjust);
if (aligned_p != p) {
mi_page_set_has_aligned(page, true);
- #if MI_DEBUG_GUARDED
+ #if MI_GUARDED
// set tag to aligned so mi_usable_size works with guard pages
if (adjust > sizeof(mi_block_t)) {
mi_block_t* const block = (mi_block_t*)p;
@@ -94,7 +94,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
mi_assert_internal(mi_usable_size(aligned_p)>=size);
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
-
+
// now zero the block if needed
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
// for the tracker, on huge aligned allocations only from the start of the large block is defined
@@ -115,27 +115,27 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t*
{
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
// we don't allocate more than MI_MAX_ALLOC_SIZE (see )
- if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) {
+ if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) {
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}
-
+
// use regular allocation if it is guaranteed to fit the alignment constraints.
// this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist
// a page with the right block size, and if we always use the over-alloc fallback that would never happen.
if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
- const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
+ const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
if mi_likely(is_aligned_or_null) {
return p;
}
else {
// this should never happen if the `mi_malloc_is_naturally_aligned` check is correct..
mi_assert(false);
- mi_free(p);
+ mi_free(p);
}
}
@@ -155,16 +155,16 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
return NULL;
}
- #if MI_DEBUG_GUARDED
+ #if MI_GUARDED
if (offset==0 && alignment < MI_BLOCK_ALIGNMENT_MAX && mi_heap_malloc_use_guarded(heap,size)) {
return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero);
}
#endif
-
+
// try first if there happens to be a small block available with just the right alignment
if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
- const size_t padsize = size + MI_PADDING_SIZE;
+ const size_t padsize = size + MI_PADDING_SIZE;
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
if mi_likely(page->free != NULL) {
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
@@ -181,7 +181,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
}
}
}
-
+
// fallback to generic aligned allocation
return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero);
}
diff --git a/src/alloc.c b/src/alloc.c
index b4713ff1..561b0026 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -121,7 +121,7 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz
return _mi_page_malloc_zero(heap,page,size,true);
}
-#if MI_DEBUG_GUARDED
+#if MI_GUARDED
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
#endif
@@ -132,12 +132,12 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
const uintptr_t tid = _mi_thread_id();
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
#endif
- #if (MI_PADDING || MI_DEBUG_GUARDED)
+ #if (MI_PADDING || MI_GUARDED)
if (size == 0) { size = sizeof(void*); }
#endif
- #if MI_DEBUG_GUARDED
- if (mi_heap_malloc_use_guarded(heap,size)) {
- return _mi_heap_malloc_guarded(heap, size, zero);
+ #if MI_GUARDED
+ if (mi_heap_malloc_use_guarded(heap,size)) {
+ return _mi_heap_malloc_guarded(heap, size, zero);
}
#endif
@@ -176,9 +176,9 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
mi_assert_internal(huge_alignment == 0);
return mi_heap_malloc_small_zero(heap, size, zero);
}
- #if MI_DEBUG_GUARDED
- else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) {
- return _mi_heap_malloc_guarded(heap, size, zero);
+ #if MI_GUARDED
+ else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) {
+ return _mi_heap_malloc_guarded(heap, size, zero);
}
#endif
else {
@@ -603,7 +603,7 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
}
}
-#if MI_DEBUG_GUARDED
+#if MI_GUARDED
// We always allocate a guarded allocation at an offset (`mi_page_has_aligned` will be true).
// We then set the first word of the block to `0` for regular offset aligned allocations (in `alloc-aligned.c`)
// and the first word to `~0` for guarded allocations to have a correct `mi_usable_size`
@@ -653,7 +653,7 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
// allocate multiple of page size ending in a guard page
// ensure minimal alignment requirement?
const size_t os_page_size = _mi_os_page_size();
- const size_t obj_size = (mi_option_is_enabled(mi_option_debug_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE));
+ const size_t obj_size = (mi_option_is_enabled(mi_option_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE));
const size_t bsize = _mi_align_up(_mi_align_up(obj_size, MI_MAX_ALIGN_SIZE) + sizeof(mi_block_t), MI_MAX_ALIGN_SIZE);
const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
diff --git a/src/free.c b/src/free.c
index 73c05c87..afbafae6 100644
--- a/src/free.c
+++ b/src/free.c
@@ -33,8 +33,8 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
// checks
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
- if (track_stats) { mi_stat_free(page, block); }
- #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_DEBUG_GUARDED
+ if (track_stats) { mi_stat_free(page, block); }
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
@@ -69,14 +69,14 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
return (mi_block_t*)((uintptr_t)p - adjust);
}
-// forward declaration for a MI_DEBUG_GUARDED build
-#if MI_DEBUG_GUARDED
-static void mi_block_unguard_prim(mi_page_t* page, mi_block_t* block, void* p); // forward declaration
-static inline void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
- if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard_prim(page, block, p); }
+// forward declaration for a MI_GUARDED build
+#if MI_GUARDED
+static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p); // forward declaration
+static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard(page, block, p); }
}
#else
-static inline void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p);
}
#endif
@@ -85,14 +85,14 @@ static inline void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p)
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
MI_UNUSED(segment);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
- mi_block_unguard(page, block, p);
+ mi_block_check_unguard(page, block, p);
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
}
// free a pointer owned by another thread (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
- mi_block_unguard(page, block, p);
+ mi_block_check_unguard(page, block, p);
mi_free_block_mt(page, segment, block);
}
@@ -109,17 +109,17 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
{
MI_UNUSED(msg);
-#if (MI_DEBUG>0)
- if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
+ #if (MI_DEBUG>0)
+ if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0 && !mi_option_is_enabled(mi_option_guarded_precise)) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
-#endif
+ #endif
mi_segment_t* const segment = _mi_ptr_segment(p);
if mi_unlikely(segment==NULL) return segment;
-#if (MI_DEBUG>0)
+ #if (MI_DEBUG>0)
if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
@@ -127,13 +127,13 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
-#endif
-#if (MI_DEBUG>0 || MI_SECURE>=4)
+ #endif
+ #if (MI_DEBUG>0 || MI_SECURE>=4)
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
return NULL;
}
-#endif
+ #endif
return segment;
}
@@ -307,7 +307,7 @@ static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* p
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
const size_t aligned_size = (size - adjust);
- #if MI_DEBUG_GUARDED
+ #if MI_GUARDED
if (mi_block_ptr_is_guarded(block, p)) {
return aligned_size - _mi_os_page_size();
}
@@ -318,7 +318,7 @@ static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* p
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
if mi_unlikely(segment==NULL) return 0;
- const mi_page_t* const page = _mi_segment_page_of(segment, p);
+ const mi_page_t* const page = _mi_segment_page_of(segment, p);
if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
@@ -541,12 +541,12 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#endif
-// Remove guard page when building with MI_DEBUG_GUARDED
-#if MI_DEBUG_GUARDED
-static void mi_block_unguard_prim(mi_page_t* page, mi_block_t* block, void* p) {
+// Remove guard page when building with MI_GUARDED
+#if MI_GUARDED
+static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
mi_assert_internal(mi_block_ptr_is_guarded(block, p));
mi_assert_internal(mi_page_has_aligned(page));
- mi_assert_internal((uint8_t*)p - (uint8_t*)block >= sizeof(mi_block_t));
+ mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t));
mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED);
const size_t bsize = mi_page_block_size(page);
@@ -555,6 +555,6 @@ static void mi_block_unguard_prim(mi_page_t* page, mi_block_t* block, void* p) {
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
void* gpage = (uint8_t*)block + bsize - psize;
mi_assert_internal(_mi_is_aligned(gpage, psize));
- _mi_os_unprotect(gpage, psize);
+ _mi_os_unprotect(gpage, psize);
}
#endif
diff --git a/src/heap.c b/src/heap.c
index eb0ab991..78ebcd1e 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -369,8 +369,8 @@ void mi_heap_destroy(mi_heap_t* heap) {
mi_assert(heap->no_reclaim);
mi_assert_expensive(mi_heap_is_valid(heap));
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
- #if MI_DEBUG_GUARDED
- // _mi_warning_message("'mi_heap_destroy' called but MI_DEBUG_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap);
+ #if MI_GUARDED
+ // _mi_warning_message("'mi_heap_destroy' called but MI_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap);
mi_heap_delete(heap);
return;
#else
diff --git a/src/options.c b/src/options.c
index 3d9017f1..c5f1e2a1 100644
--- a/src/options.c
+++ b/src/options.c
@@ -47,9 +47,9 @@ typedef struct mi_option_desc_s {
#define MI_OPTION(opt) mi_option_##opt, #opt, NULL
#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy
-// Some options can be set at build time for statically linked libraries
+// Some options can be set at build time for statically linked libraries
// (use `-DMI_EXTRA_CPPDEFS="opt1=val1;opt2=val2"`)
-//
+//
// This is useful if we cannot pass them as environment variables
// (and setting them programmatically would be too late)
@@ -102,17 +102,17 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_DEFAULT_VERBOSE, UNINIT, MI_OPTION(verbose) },
// some of the following options are experimental and not all combinations are allowed.
- { MI_DEFAULT_EAGER_COMMIT,
+ { MI_DEFAULT_EAGER_COMMIT,
UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
- { MI_DEFAULT_ARENA_EAGER_COMMIT,
+ { MI_DEFAULT_ARENA_EAGER_COMMIT,
UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux)
{ 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit)
- { MI_DEFAULT_ALLOW_LARGE_OS_PAGES,
+ { MI_DEFAULT_ALLOW_LARGE_OS_PAGES,
UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
- { MI_DEFAULT_RESERVE_HUGE_OS_PAGES,
+ { MI_DEFAULT_RESERVE_HUGE_OS_PAGES,
UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
{-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
- { MI_DEFAULT_RESERVE_OS_MEMORY,
+ { MI_DEFAULT_RESERVE_OS_MEMORY,
UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
@@ -137,18 +137,18 @@ static mi_option_desc_t options[_mi_option_last] =
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
-#if defined(MI_VISIT_ABANDONED)
+#if defined(MI_VISIT_ABANDONED)
{ 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandonded segments; requires taking locks during reclaim.
#else
- { 0, UNINIT, MI_OPTION(visit_abandoned) },
+ { 0, UNINIT, MI_OPTION(visit_abandoned) },
#endif
- { 0, UNINIT, MI_OPTION(debug_guarded_min) }, // only used when building with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects
- { MI_GiB, UNINIT, MI_OPTION(debug_guarded_max) }, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects
- { 0, UNINIT, MI_OPTION(debug_guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
-#if MI_DEBUG_GUARDED
- { 1000,UNINIT, MI_OPTION(debug_guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded(= 1000)
+ { 0, UNINIT, MI_OPTION(guarded_min) }, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects
+ { MI_GiB, UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects
+ { 0, UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
+#if MI_GUARDED
+ { 1000,UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded(= 1000)
#else
- { 0, UNINIT, MI_OPTION(debug_guarded_sample_rate)},
+ { 0, UNINIT, MI_OPTION(guarded_sample_rate)},
#endif
};
@@ -172,25 +172,25 @@ void _mi_options_init(void) {
}
mi_max_error_count = mi_option_get(mi_option_max_errors);
mi_max_warning_count = mi_option_get(mi_option_max_warnings);
- #if MI_DEBUG_GUARDED
- if (mi_option_get(mi_option_debug_guarded_max) > 0) {
+ #if MI_GUARDED
+ if (mi_option_get(mi_option_guarded_max) > 0) {
if (mi_option_is_enabled(mi_option_allow_large_os_pages)) {
mi_option_disable(mi_option_allow_large_os_pages);
_mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n");
}
}
- _mi_verbose_message("guarded build: %s\n", mi_option_get(mi_option_debug_guarded_max) > 0 ? "enabled" : "disabled");
+ _mi_verbose_message("guarded build: %s\n", mi_option_get(mi_option_guarded_max) > 0 ? "enabled" : "disabled");
#endif
}
long _mi_option_get_fast(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last);
- mi_option_desc_t* desc = &options[option];
+ mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
//mi_assert(desc->init != UNINIT);
return desc->value;
}
-
+
mi_decl_nodiscard long mi_option_get(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last);
@@ -225,11 +225,11 @@ void mi_option_set(mi_option_t option, long value) {
desc->value = value;
desc->init = INITIALIZED;
// ensure min/max range; be careful to not recurse.
- if (desc->option == mi_option_debug_guarded_min && _mi_option_get_fast(mi_option_debug_guarded_max) < value) {
- mi_option_set(mi_option_debug_guarded_max, value);
+ if (desc->option == mi_option_guarded_min && _mi_option_get_fast(mi_option_guarded_max) < value) {
+ mi_option_set(mi_option_guarded_max, value);
}
- else if (desc->option == mi_option_debug_guarded_max && _mi_option_get_fast(mi_option_debug_guarded_min) > value) {
- mi_option_set(mi_option_debug_guarded_min, value);
+ else if (desc->option == mi_option_guarded_max && _mi_option_get_fast(mi_option_guarded_min) > value) {
+ mi_option_set(mi_option_guarded_min, value);
}
}
@@ -565,7 +565,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
char* end = buf;
long value = strtol(buf, &end, 10);
if (mi_option_has_size_in_kib(desc->option)) {
- // this option is interpreted in KiB to prevent overflow of `long` for large allocations
+ // this option is interpreted in KiB to prevent overflow of `long` for large allocations
// (long is 32-bit on 64-bit windows, which allows for 4TiB max.)
size_t size = (value < 0 ? 0 : (size_t)value);
bool overflow = false;
@@ -580,7 +580,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
value = (size > LONG_MAX ? LONG_MAX : (long)size);
}
if (*end == 0) {
- mi_option_set(desc->option, value);
+ mi_option_set(desc->option, value);
}
else {
// set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.
diff --git a/src/segment.c b/src/segment.c
index 837a65e9..18736818 100644
--- a/src/segment.c
+++ b/src/segment.c
@@ -455,7 +455,7 @@ static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_
if (MI_SECURE == 0) {
// normally no guard pages
- #if MI_DEBUG_GUARDED
+ #if MI_GUARDED
isize = _mi_align_up(minsize, _mi_os_page_size());
#else
isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
diff --git a/test/test-api-fill.c b/test/test-api-fill.c
index 3baee83d..eebbd394 100644
--- a/test/test-api-fill.c
+++ b/test/test-api-fill.c
@@ -271,7 +271,7 @@ int main(void) {
mi_free(p);
};
- #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_DEBUG_GUARDED)
+ #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_GUARDED)
CHECK_BODY("fill-freed-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
diff --git a/test/test-stress.c b/test/test-stress.c
index b062f2ce..cb769dbf 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -22,21 +22,22 @@ terms of the MIT license.
#include
#include
-#define MI_DEBUG_GUARDED
+// #define MI_GUARDED
+// #define USE_STD_MALLOC
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
//
// argument defaults
#if defined(MI_TSAN) // with thread-sanitizer reduce the threads to test within the azure pipeline limits
-static int THREADS = 8;
+static int THREADS = 8;
static int SCALE = 25;
static int ITER = 400;
#elif defined(MI_UBSAN) // with undefined behavious sanitizer reduce parameters to stay within the azure pipeline limits
-static int THREADS = 8;
+static int THREADS = 8;
static int SCALE = 25;
static int ITER = 20;
-#elif defined(MI_DEBUG_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
-static int THREADS = 8;
+#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
+static int THREADS = 8;
static int SCALE = 10;
static int ITER = 10;
#else
@@ -58,7 +59,6 @@ static size_t use_one_size = 0; // use single object size of `N *
static bool main_participates = false; // main thread participates as a worker too
-// #define USE_STD_MALLOC
#ifdef USE_STD_MALLOC
#define custom_calloc(n,s) calloc(n,s)
#define custom_realloc(p,s) realloc(p,s)