add guarded objects that are sampled (and fit a size range). guarded sample rate etc can be set per heap as well as defaulted with options

This commit is contained in:
daanx 2024-11-17 22:45:09 -08:00
parent 8ba1879073
commit d57cb0765d
9 changed files with 61 additions and 26 deletions

View file

@ -309,6 +309,12 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool al
// deprecated // deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
// Experimental: objects followed by a guard page.
// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);
// ------------------------------------------------------ // ------------------------------------------------------
// Convenience // Convenience

View file

@ -611,17 +611,23 @@ static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void*
} }
static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) { static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
MI_UNUSED(heap); // this code is written to result in fast assembly as it is on the hot path for allocation
if (heap->guarded_sample_rate==0 || const size_t count = heap->guarded_sample_count - 1; // if the rate was 0, this will underflow and count for a long time..
size > heap->guarded_size_max || if mi_likely(count != 0) {
size < heap->guarded_size_min) { // no sample
heap->guarded_sample_count = count;
return false; return false;
} }
if (++heap->guarded_sample_count < heap->guarded_sample_rate) { else if (size >= heap->guarded_size_min && size <= heap->guarded_size_max) {
return false; // use guarded allocation
heap->guarded_sample_count = heap->guarded_sample_rate; // reset
return (heap->guarded_sample_rate != 0);
} }
heap->guarded_sample_count = 0; // reset else {
return true; // failed size criteria, rewind count (but don't write to an empty heap)
if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
return false;
}
} }
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;

View file

@ -507,7 +507,7 @@ struct mi_heap_s {
size_t guarded_size_max; // maximal size for guarded objects size_t guarded_size_max; // maximal size for guarded objects
size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages) size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages)
size_t guarded_sample_seed; // starting sample count size_t guarded_sample_seed; // starting sample count
size_t guarded_sample_count; // current sample count (wraps at `sample_rate`) size_t guarded_sample_count; // current sample count (counting down to 0)
#endif #endif
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")

View file

@ -661,16 +661,14 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
void* const p = mi_block_ptr_set_guarded(block, obj_size); void* const p = mi_block_ptr_set_guarded(block, obj_size);
// stats // stats
const size_t usize = mi_usable_size(p);
mi_assert_internal(usize >= size);
mi_track_malloc(p, size, zero); mi_track_malloc(p, size, zero);
#if MI_STAT>1
if (p != NULL) { if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
mi_heap_stat_counter_increase(heap, guarded_alloc_count, 1); #endif
_mi_stat_counter_increase(&heap->tld->stats.guarded_alloc_count, 1);
} }
#endif
#if MI_DEBUG>3 #if MI_DEBUG>3
if (p != NULL && zero) { if (p != NULL && zero) {
mi_assert_expensive(mi_mem_is_zero(p, size)); mi_assert_expensive(mi_mem_is_zero(p, size));

View file

@ -544,6 +544,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
// Remove guard page when building with MI_GUARDED // Remove guard page when building with MI_GUARDED
#if MI_GUARDED #if MI_GUARDED
static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) { static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
MI_UNUSED(p);
mi_assert_internal(mi_block_ptr_is_guarded(block, p)); mi_assert_internal(mi_block_ptr_is_guarded(block, p));
mi_assert_internal(mi_page_has_aligned(page)); mi_assert_internal(mi_page_has_aligned(page));
mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t)); mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t));

View file

@ -113,7 +113,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
false, // can reclaim false, // can reclaim
0, // tag 0, // tag
#if MI_GUARDED #if MI_GUARDED
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`)
#endif #endif
MI_SMALL_PAGES_EMPTY, MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY MI_PAGE_QUEUES_EMPTY
@ -167,16 +167,39 @@ bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
mi_stats_t _mi_stats_main = { MI_STATS_NULL }; mi_stats_t _mi_stats_main = { MI_STATS_NULL };
#if MI_GUARDED #if MI_GUARDED
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
heap->guarded_sample_seed = seed;
if (heap->guarded_sample_seed == 0) {
heap->guarded_sample_seed = _mi_heap_random_next(heap);
}
heap->guarded_sample_rate = sample_rate;
if (heap->guarded_sample_rate >= 1) {
heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate;
}
heap->guarded_sample_count = heap->guarded_sample_seed; // count down samples
}
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
heap->guarded_size_min = min;
heap->guarded_size_max = (min > max ? min : max);
}
void _mi_heap_guarded_init(mi_heap_t* heap) { void _mi_heap_guarded_init(mi_heap_t* heap) {
heap->guarded_sample_rate = mi_option_get_clamp(mi_option_guarded_sample_rate, 0, LONG_MAX); mi_heap_guarded_set_sample_rate(heap,
heap->guarded_size_max = mi_option_get_clamp(mi_option_guarded_max, 0, LONG_MAX); (size_t)mi_option_get_clamp(mi_option_guarded_sample_rate, 0, LONG_MAX),
heap->guarded_size_min = mi_option_get_clamp(mi_option_guarded_min, 0, (long)heap->guarded_size_max); (size_t)mi_option_get(mi_option_guarded_sample_seed));
heap->guarded_sample_seed = (size_t)mi_option_get(mi_option_guarded_sample_seed); mi_heap_guarded_set_size_bound(heap,
if (heap->guarded_sample_seed == 0) { heap->guarded_sample_seed = _mi_heap_random_next(heap); } (size_t)mi_option_get_clamp(mi_option_guarded_min, 0, LONG_MAX),
heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate; (size_t)mi_option_get_clamp(mi_option_guarded_max, 0, LONG_MAX) );
heap->guarded_sample_count = heap->guarded_sample_seed;
} }
#else #else
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
MI_UNUSED(heap); MI_UNUSED(sample_rate); MI_UNUSED(seed);
}
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
MI_UNUSED(heap); MI_UNUSED(min); MI_UNUSED(max);
}
void _mi_heap_guarded_init(mi_heap_t* heap) { void _mi_heap_guarded_init(mi_heap_t* heap) {
MI_UNUSED(heap); MI_UNUSED(heap);
} }
@ -576,7 +599,7 @@ static void mi_detect_cpu_features(void) {
} }
#else #else
static void mi_detect_cpu_features(void) { static void mi_detect_cpu_features(void) {
// nothing // nothing
} }
#endif #endif

View file

@ -146,7 +146,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_GiB, UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects { MI_GiB, UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects
{ 0, UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) { 0, UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
#if MI_GUARDED #if MI_GUARDED
{ 1000,UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded(= 1000) { 4000,UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded(= 1000)
#else #else
{ 0, UNINIT, MI_OPTION(guarded_sample_rate)}, { 0, UNINIT, MI_OPTION(guarded_sample_rate)},
#endif #endif

View file

@ -62,7 +62,7 @@ int main() {
test_mt_shutdown(); test_mt_shutdown();
*/ */
//fail_aslr(); //fail_aslr();
// mi_stats_print(NULL); mi_stats_print(NULL);
return 0; return 0;
} }

View file

@ -22,6 +22,7 @@ terms of the MIT license.
#include <string.h> #include <string.h>
#include <assert.h> #include <assert.h>
// #define MI_GUARDED
// #define USE_STD_MALLOC // #define USE_STD_MALLOC
// > mimalloc-test-stress [THREADS] [SCALE] [ITER] // > mimalloc-test-stress [THREADS] [SCALE] [ITER]
@ -35,7 +36,7 @@ static int ITER = 400;
static int THREADS = 8; static int THREADS = 8;
static int SCALE = 25; static int SCALE = 25;
static int ITER = 20; static int ITER = 20;
#elif defined(MI_XGUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits #elif defined(xMI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
static int THREADS = 8; static int THREADS = 8;
static int SCALE = 10; static int SCALE = 10;
static int ITER = 10; static int ITER = 10;