From ead1f349305d59b81c3b11076e97f760c3c72a02 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 11 Oct 2020 10:50:09 -0700 Subject: [PATCH 1/4] add extra NULL checks for heap parameters in the heap API (issue #311) --- src/heap.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/heap.c b/src/heap.c index b1079e14..4a91786b 100644 --- a/src/heap.c +++ b/src/heap.c @@ -114,7 +114,7 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) { - if (!mi_heap_is_initialized(heap)) return; + if (heap==NULL || !mi_heap_is_initialized(heap)) return; _mi_deferred_free(heap, collect >= MI_FORCE); // note: never reclaim on collect but leave it to threads that need storage to reclaim @@ -213,6 +213,7 @@ uintptr_t _mi_heap_random_next(mi_heap_t* heap) { // zero out the page queues static void mi_heap_reset_pages(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); mi_assert_internal(mi_heap_is_initialized(heap)); // TODO: copy full empty heap instead? memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); @@ -228,6 +229,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) { static void mi_heap_free(mi_heap_t* heap) { mi_assert(heap != NULL); mi_assert_internal(mi_heap_is_initialized(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; if (mi_heap_is_backing(heap)) return; // dont free the backing heap // reset default @@ -310,7 +312,7 @@ void mi_heap_destroy(mi_heap_t* heap) { mi_assert(mi_heap_is_initialized(heap)); mi_assert(heap->no_reclaim); mi_assert_expensive(mi_heap_is_valid(heap)); - if (!mi_heap_is_initialized(heap)) return; + if (heap==NULL || !mi_heap_is_initialized(heap)) return; if (!heap->no_reclaim) { // don't free in case it may contain reclaimed pages mi_heap_delete(heap); @@ -366,7 +368,7 @@ void mi_heap_delete(mi_heap_t* heap) mi_assert(heap != NULL); mi_assert(mi_heap_is_initialized(heap)); mi_assert_expensive(mi_heap_is_valid(heap)); - if (!mi_heap_is_initialized(heap)) return; + if (heap==NULL || !mi_heap_is_initialized(heap)) return; if (!mi_heap_is_backing(heap)) { // tranfer still used pages to the backing heap @@ -381,8 +383,9 @@ void mi_heap_delete(mi_heap_t* heap) } mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { + mi_assert(heap != NULL); mi_assert(mi_heap_is_initialized(heap)); - if (!mi_heap_is_initialized(heap)) return NULL; + if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; mi_assert_expensive(mi_heap_is_valid(heap)); mi_heap_t* old = mi_get_default_heap(); _mi_heap_set_default_direct(heap); @@ -408,7 +411,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) { bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { mi_assert(heap != NULL); - if (!mi_heap_is_initialized(heap)) return false; + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; return (heap == mi_heap_of_block(p)); } @@ -426,7 +429,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { mi_assert(heap != NULL); - if (!mi_heap_is_initialized(heap)) return false; + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers bool found = false; mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); From 5d2b925f3e2f4927a909efb4b97d35e9cad2a440 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 11 Oct 2020 10:56:57 -0700 Subject: [PATCH 2/4] wrap MI_SECURE conditional in #ifdef to avoid warnings (issue #311) --- src/page.c | 9 ++++++--- src/segment.c | 8 +++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/page.c b/src/page.c index cd96bb90..4b7e9ffb 100644 --- a/src/page.c +++ b/src/page.c @@ -705,14 +705,17 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { mi_page_queue_t* pq = mi_page_queue(heap,size); mi_page_t* page = pq->first; if (page != NULL) { - if ((MI_SECURE >= 3) && page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { - // in secure mode, we extend half the time to increase randomness + #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness + if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { mi_page_extend_free(heap, page, heap->tld); mi_assert_internal(mi_page_immediate_available(page)); } - else { + else + #endif + { _mi_page_free_collect(page,false); } + if (mi_page_immediate_available(page)) { page->retire_expire = 0; return page; // fast path diff --git a/src/segment.c b/src/segment.c index fb47fb0f..4dc7dbf6 100644 --- a/src/segment.c +++ b/src/segment.c @@ -385,11 +385,13 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_ psize -= segment->segment_info_size; } - if (MI_SECURE > 1 || (MI_SECURE == 1 && page->segment_idx == segment->capacity - 1)) { - // secure == 1: the last page has an os guard page at the end - // secure > 1: every page has an os guard page +#if (MI_SECURE > 1) // every page has an os guard page + psize -= _mi_os_page_size(); +#elif (MI_SECURE==1) // the last page has an os guard page at the end + if (page->segment_idx == segment->capacity - 1) { psize -= _mi_os_page_size(); } +#endif if (page_size != NULL) *page_size = psize; mi_assert_internal(page->xblock_size == 0 || _mi_ptr_page(p) == page); From 7114d5424acbe38411fd4c7938878ddcb2763479 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 11 Oct 2020 13:14:43 -0700 Subject: [PATCH 3/4] fix statistics to include padding correctly (issue #301) --- src/alloc.c | 42 ++++++++++++++++++++++++++++--------- src/segment.c | 7 ------- test/main-override-static.c | 27 +++++++++++++++++------- test/test-stress.c | 2 +- 4 files changed, 53 insertions(+), 25 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index 8e863a67..cceb35c7 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -282,6 +282,35 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co } #endif +// only maintain stats for smaller objects if requested +#if (MI_STAT>1) +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + mi_heap_t* const heap = mi_heap_get_default(); + const size_t usize = mi_page_usable_size_of(page, block); + const size_t bsize = mi_page_usable_block_size(page); + mi_heap_stat_decrease(heap, malloc, usize); + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1); + } +} +#else +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + UNUSED(page); UNUSED(block); +} +#endif + +// always maintain stats for huge objects +static void mi_stat_huge_free(const mi_page_t* page) { + mi_heap_t* const heap = mi_heap_get_default(); + const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc` + if (bsize <= MI_HUGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, huge, bsize); + } + else { + mi_heap_stat_decrease(heap, giant, bsize); + } +} + // ------------------------------------------------------ // Free // ------------------------------------------------------ @@ -300,6 +329,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc // huge page segments are always abandoned and can be freed immediately mi_segment_t* const segment = _mi_page_segment(page); if (segment->page_kind==MI_PAGE_HUGE) { + mi_stat_huge_free(page); _mi_segment_huge_page_free(segment, page, block); return; } @@ -343,7 +373,6 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc } } - // regular free static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) { @@ -383,6 +412,7 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) { mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); + mi_stat_free(page, block); _mi_free_block(page, local, block); } @@ -430,19 +460,11 @@ void mi_free(void* p) mi_attr_noexcept mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_block_t*)p; -#if (MI_STAT>1) - mi_heap_t* const heap = mi_heap_get_default(); - const size_t bsize = mi_page_usable_block_size(page); - mi_heap_stat_decrease(heap, malloc, bsize); - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire` - mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1); - } -#endif - if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned if (mi_unlikely(mi_check_is_double_free(page,block))) return; mi_check_padding(page, block); + mi_stat_free(page, block); #if (MI_DEBUG!=0) memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif diff --git a/src/segment.c b/src/segment.c index 4dc7dbf6..fb8e0fe1 100644 --- a/src/segment.c +++ b/src/segment.c @@ -1334,13 +1334,6 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block page->is_zero = false; mi_assert(page->used == 0); mi_tld_t* tld = heap->tld; - const size_t bsize = mi_page_usable_block_size(page); - if (bsize > MI_HUGE_OBJ_SIZE_MAX) { - _mi_stat_decrease(&tld->stats.giant, bsize); - } - else { - _mi_stat_decrease(&tld->stats.huge, bsize); - } mi_segments_track_size((long)segment->segment_size, &tld->segments); _mi_segment_page_free(page, true, &tld->segments); } diff --git a/test/main-override-static.c b/test/main-override-static.c index 8eb2249c..221db7e8 100644 --- a/test/main-override-static.c +++ b/test/main-override-static.c @@ -15,10 +15,11 @@ static void invalid_free(); static void test_aslr(void); static void test_process_info(void); static void test_reserved(void); +static void negative_stat(void); int main() { mi_version(); - + mi_stats_reset(); // detect double frees and heap corruption // double_free1(); // double_free2(); @@ -27,27 +28,29 @@ int main() { // test_aslr(); // invalid_free(); // test_reserved(); - + // negative_stat(); + void* p1 = malloc(78); void* p2 = malloc(24); free(p1); p1 = mi_malloc(8); - //char* s = strdup("hello\n"); + char* s = strdup("hello\n"); free(p2); + p2 = malloc(16); p1 = realloc(p1, 32); free(p1); free(p2); - //free(s); - //mi_collect(true); - + free(s); + /* now test if override worked by allocating/freeing across the api's*/ //p1 = mi_malloc(32); //free(p1); //p2 = malloc(32); //mi_free(p2); + mi_collect(true); mi_stats_print(NULL); - test_process_info(); + // test_process_info(); return 0; } @@ -167,3 +170,13 @@ static void test_reserved(void) { p3 = malloc(1*GiB); free(p4); } + + + +static void negative_stat(void) { + int* p = mi_malloc(60000); + mi_stats_print_out(NULL, NULL); + *p = 100; + mi_free(p); + mi_stats_print_out(NULL, NULL); +} \ No newline at end of file diff --git a/test/test-stress.c b/test/test-stress.c index d969284e..cba56310 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -123,7 +123,7 @@ static void free_items(void* p) { static void stress(intptr_t tid) { //bench_start_thread(); - uintptr_t r = (tid * 43); // rand(); + uintptr_t r = ((tid + 1) * 43); // rand(); const size_t max_item_shift = 5; // 128 const size_t max_item_retained_shift = max_item_shift + 2; size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more From 6279835976c4f122a3ccd26bef68976971434bcd Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 11 Oct 2020 13:22:14 -0700 Subject: [PATCH 4/4] fix unused parameter warning --- src/alloc.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/alloc.c b/src/alloc.c index cceb35c7..595cb86f 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -299,7 +299,8 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { } #endif -// always maintain stats for huge objects +#if (MI_STAT>0) +// maintain stats for huge objects static void mi_stat_huge_free(const mi_page_t* page) { mi_heap_t* const heap = mi_heap_get_default(); const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc` @@ -310,6 +311,11 @@ static void mi_stat_huge_free(const mi_page_t* page) { mi_heap_stat_decrease(heap, giant, bsize); } } +#else +static void mi_stat_huge_free(const mi_page_t* page) { + UNUSED(page); +} +#endif // ------------------------------------------------------ // Free