From d1bd1644d567ea48976caa151a935c1ad1cc8866 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 2 Sep 2019 13:16:52 -0700 Subject: [PATCH] support zero-initialized memory detection --- include/mimalloc-internal.h | 7 +++-- include/mimalloc-types.h | 9 ++++-- src/alloc-aligned.c | 2 +- src/alloc.c | 30 +++++++++++++++++-- src/init.c | 6 ++-- src/memory.c | 57 ++++++++++++++++++++++++------------- src/os.c | 20 +++++++------ src/page.c | 15 +++++++++- src/segment.c | 25 +++++++++++----- 9 files changed, 123 insertions(+), 48 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 3921de3a..2ef0839e 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -46,12 +46,12 @@ void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocat void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data // memory.c -void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, size_t* id, mi_os_tld_t* tld); +void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* id, mi_os_tld_t* tld); void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats); bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats); -bool _mi_mem_unreset(void* p, size_t size, mi_stats_t* stats); -bool _mi_mem_commit(void* p, size_t size, mi_stats_t* stats); +bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats); +bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); bool _mi_mem_protect(void* addr, size_t size); bool _mi_mem_unprotect(void* addr, size_t size); @@ -101,6 +101,7 @@ void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero); void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero); mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); bool _mi_free_delayed_block(mi_block_t* block); +void _mi_block_zero_init(void* p, size_t size); #if MI_DEBUG>1 bool _mi_page_is_valid(mi_page_t* page); diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 4bf51d1d..9928f12d 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -131,9 +131,11 @@ typedef enum mi_delayed_e { // test if both are false (`value == 0`) in the `mi_free` routine. typedef union mi_page_flags_u { uint16_t value; + uint8_t full_aligned; struct { - bool in_full; - bool has_aligned; + bool in_full:1; + bool has_aligned:1; + bool is_zero; // `true` if the blocks in the free list are zero initialized }; } mi_page_flags_t; @@ -165,7 +167,8 @@ typedef struct mi_page_s { bool segment_in_use:1; // `true` if the segment allocated this page bool is_reset:1; // `true` if the page memory was reset bool is_committed:1; // `true` if the page virtual memory is committed - + bool is_zero_init:1; // `true` if the page was zero initialized + // layout like this to optimize access in `mi_malloc` and `mi_free` uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` uint16_t reserved; // number of blocks reserved in memory diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 97f4319f..a18ae7a5 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -33,7 +33,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* heap, size_t size, size_t void* p = _mi_page_malloc(heap,page,size); mi_assert_internal(p != NULL); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); - if (zero) memset(p,0,size); + if (zero) _mi_block_zero_init(p,size); return p; } } diff --git a/src/alloc.c b/src/alloc.c index afc181dd..f9bbf66c 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -33,7 +33,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz page->used++; mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); #if (MI_DEBUG) - memset(block, MI_DEBUG_UNINIT, size); + if (!page->flags.is_zero) { memset(block, MI_DEBUG_UNINIT, size); } #elif (MI_SECURE) block->next = 0; #endif @@ -89,9 +89,32 @@ extern inline void* mi_malloc(size_t size) mi_attr_noexcept { return mi_heap_malloc(mi_get_default_heap(), size); } +void _mi_block_zero_init(void* p, size_t size) { + mi_assert_internal(p != NULL); + // already zero initialized memory? + if (size > 4*sizeof(void*)) { // don't bother for small sizes + mi_page_t* page = _mi_ptr_page(p); + if (page->flags.is_zero) { + ((mi_block_t*)p)->next = 0; + #if MI_DEBUG>0 + for (size_t i = 0; i < size; i++) { + if (((uint8_t*)p)[i] != 0) { + _mi_assert_fail("page not zero", __FILE__, __LINE__, "_mi_block_zero_init"); + } + } + #endif + return; // and done + } + } + // otherwise memset + memset(p, 0, size); +} + void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) { void* p = mi_heap_malloc(heap,size); - if (zero && p != NULL) memset(p,0,size); + if (zero && p != NULL) { + _mi_block_zero_init(p,size); + } return p; } @@ -127,6 +150,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc mi_block_set_next(page, block, page->free); page->free = block; page->used--; + page->flags.is_zero = false; _mi_segment_page_free(page,true,&heap->tld->segments); } return; @@ -254,7 +278,7 @@ void mi_free(void* p) mi_attr_noexcept // huge page stat is accounted for in `_mi_page_retire` #endif - if (mi_likely(tid == segment->thread_id && page->flags.value == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks + if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned mi_block_t* block = (mi_block_t*)p; mi_block_set_next(page, block, page->local_free); diff --git a/src/init.c b/src/init.c index bd594378..cb5901aa 100644 --- a/src/init.c +++ b/src/init.c @@ -12,7 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file // Empty page used to initialize the small free pages array const mi_page_t _mi_page_empty = { - 0, false, false, false, 0, 0, + 0, false, false, false, false, 0, 0, { 0 }, NULL, // free #if MI_SECURE @@ -352,7 +352,7 @@ void mi_thread_init(void) mi_attr_noexcept pthread_setspecific(mi_pthread_key, (void*)(_mi_thread_id()|1)); // set to a dummy value so that `mi_pthread_done` is called #endif - #if (MI_DEBUG>0) && !defined(NDEBUG) // not in release mode as that leads to crashes on Windows dynamic override + #if (MI_DEBUG>0 && !defined(_WIN32)) _mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id()); #endif } @@ -367,7 +367,7 @@ void mi_thread_done(void) mi_attr_noexcept { // abandon the thread local heap if (_mi_heap_done()) return; // returns true if already ran - #if (MI_DEBUG>0) + #if (MI_DEBUG>0 && !defined(_WIN32)) if (!_mi_is_main_thread()) { _mi_verbose_message("thread done: 0x%zx\n", _mi_thread_id()); } diff --git a/src/memory.c b/src/memory.c index d6689c7a..c8b3b138 100644 --- a/src/memory.c +++ b/src/memory.c @@ -41,10 +41,10 @@ Possible issues: size_t _mi_os_large_page_size(); bool _mi_os_protect(void* addr, size_t size); bool _mi_os_unprotect(void* addr, size_t size); -bool _mi_os_commit(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats); bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats); -bool _mi_os_unreset(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats); void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld); void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats); void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment); @@ -86,6 +86,7 @@ static inline void* mi_region_info_read(mi_region_info_t info, bool* is_large, b typedef struct mem_region_s { volatile _Atomic(uintptr_t) map; // in-use bit per MI_SEGMENT_SIZE block volatile _Atomic(mi_region_info_t) info; // start of virtual memory area, and flags + volatile _Atomic(uintptr_t) dirty_mask; // bit per block if the contents are not zero'd } mem_region_t; @@ -138,7 +139,8 @@ Commit from a region // Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written // if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call. // (not being able to claim is not considered an error so check for `p != NULL` afterwards). -static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, bool* commit, bool* allow_large, void** p, size_t* id, mi_os_tld_t* tld) +static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, + size_t size, bool* commit, bool* allow_large, bool* is_zero, void** p, size_t* id, mi_os_tld_t* tld) { size_t mask = mi_region_block_mask(blocks,bitidx); mi_assert_internal(mask != 0); @@ -203,10 +205,19 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit void* start = mi_region_info_read(info,®ion_is_large,®ion_is_committed); mi_assert_internal(!(region_is_large && !*allow_large)); + // set dirty bits + uintptr_t m; + do { + m = mi_atomic_read(®ion->dirty_mask); + } while (!mi_atomic_cas_weak(®ion->dirty_mask, m | mask, m)); + *is_zero = ((m & mask) == 0); // no dirty bit set in our claimed range? + void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE); if (*commit && !region_is_committed) { - // ensure commit - _mi_os_commit(blocks_start, mi_good_commit_size(size), tld->stats); // only commit needed size (unless using large OS pages) + // ensure commit + bool commit_zero = false; + _mi_os_commit(blocks_start, mi_good_commit_size(size), &commit_zero, tld->stats); // only commit needed size (unless using large OS pages) + if (commit_zero) *is_zero = true; } else if (!*commit && region_is_committed) { // but even when no commit is requested, we might have committed anyway (in a huge OS page for example) @@ -258,7 +269,8 @@ static inline size_t mi_bsr(uintptr_t x) { // Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written // if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call. // (not being able to claim is not considered an error so check for `p != NULL` afterwards). -static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, bool* commit, bool* allow_large, void** p, size_t* id, mi_os_tld_t* tld) +static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, + bool* commit, bool* allow_large, bool* is_zero, void** p, size_t* id, mi_os_tld_t* tld) { mi_assert_internal(p != NULL && id != NULL); mi_assert_internal(blocks < MI_REGION_MAP_BITS); @@ -288,7 +300,8 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc else { // success, we claimed the bits // now commit the block memory -- this can still fail - return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, allow_large, p, id, tld); + return mi_region_commit_blocks(region, idx, bitidx, blocks, + size, commit, allow_large, is_zero, p, id, tld); } } else { @@ -311,7 +324,9 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc // Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written // if the blocks were successfully claimed so ensure they are initialized to NULL/0 before the call. // (not being able to claim is not considered an error so check for `p != NULL` afterwards). -static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, bool* commit, bool* allow_large, void** p, size_t* id, mi_os_tld_t* tld) +static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, + bool* commit, bool* allow_large, bool* is_zero, + void** p, size_t* id, mi_os_tld_t* tld) { // check if there are available blocks in the region.. mi_assert_internal(idx < MI_REGION_MAX); @@ -331,7 +346,7 @@ static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, b ok = (start == NULL || (*commit || !is_committed) || (*allow_large || !is_large)); // Todo: test with one bitmap operation? } if (ok) { - return mi_region_alloc_blocks(region, idx, blocks, size, commit, allow_large, p, id, tld); + return mi_region_alloc_blocks(region, idx, blocks, size, commit, allow_large, is_zero, p, id, tld); } } return true; // no error, but no success either @@ -343,16 +358,19 @@ static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, b // Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`. // (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`) -void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, size_t* id, mi_os_tld_t* tld) +void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, + size_t* id, mi_os_tld_t* tld) { mi_assert_internal(id != NULL && tld != NULL); mi_assert_internal(size > 0); *id = SIZE_MAX; + *is_zero = false; bool default_large = false; - if (large==NULL) large = &default_large; // ensure `large != NULL` + if (large==NULL) large = &default_large; // ensure `large != NULL` // use direct OS allocation for huge blocks or alignment (with `id = SIZE_MAX`) if (size > MI_REGION_MAX_ALLOC_SIZE || alignment > MI_SEGMENT_ALIGN) { + *is_zero = true; return _mi_os_alloc_aligned(mi_good_commit_size(size), alignment, *commit, large, tld); // round up size } @@ -370,20 +388,21 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l size_t idx = 0; // tld->region_idx; // start at 0 to reuse low addresses? Or, use tld->region_idx to reduce contention? for (size_t visited = 0; visited < count; visited++, idx++) { if (idx >= count) idx = 0; // wrap around - if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, large, &p, id, tld)) return NULL; // error + if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, large, is_zero, &p, id, tld)) return NULL; // error if (p != NULL) break; } if (p == NULL) { // no free range in existing regions -- try to extend beyond the count.. but at most 4 regions for (idx = count; idx < count + 4 && idx < MI_REGION_MAX; idx++) { - if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, large, &p, id, tld)) return NULL; // error + if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, large, is_zero, &p, id, tld)) return NULL; // error if (p != NULL) break; } } if (p == NULL) { // we could not find a place to allocate, fall back to the os directly + *is_zero = true; p = _mi_os_alloc_aligned(size, alignment, commit, large, tld); } else { @@ -439,8 +458,8 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) { // reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large if (!is_large) { if (mi_option_is_enabled(mi_option_segment_reset)) { - _mi_os_reset(p, size, stats); - // _mi_os_decommit(p,size,stats); // if !is_eager_committed + _mi_os_reset(p, size, stats); // + // _mi_os_decommit(p,size,stats); // if !is_eager_committed (and clear dirty bits) } // else { _mi_os_reset(p,size,stats); } } @@ -495,8 +514,8 @@ void _mi_mem_collect(mi_stats_t* stats) { Other -----------------------------------------------------------------------------*/ -bool _mi_mem_commit(void* p, size_t size, mi_stats_t* stats) { - return _mi_os_commit(p, size, stats); +bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats) { + return _mi_os_commit(p, size, is_zero, stats); } bool _mi_mem_decommit(void* p, size_t size, mi_stats_t* stats) { @@ -507,8 +526,8 @@ bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats) { return _mi_os_reset(p, size, stats); } -bool _mi_mem_unreset(void* p, size_t size, mi_stats_t* stats) { - return _mi_os_unreset(p, size, stats); +bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats) { + return _mi_os_unreset(p, size, is_zero, stats); } bool _mi_mem_protect(void* p, size_t size) { diff --git a/src/os.c b/src/os.c index 2b1ed4f4..93d84365 100644 --- a/src/os.c +++ b/src/os.c @@ -585,8 +585,9 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* // Commit/Decommit memory. // Usuelly commit is aligned liberal, while decommit is aligned conservative. // (but not for the reset version where we want commit to be conservative as well) -static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, mi_stats_t* stats) { +static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) { // page align in the range, commit liberally, decommit conservative + *is_zero = false; size_t csize; void* start = mi_os_page_align_areax(conservative, addr, size, &csize); if (csize == 0 || _mi_os_is_huge_reserved(addr)) return true; @@ -600,6 +601,7 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ } #if defined(_WIN32) + *is_zero = true; if (commit) { void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE); err = (p == start ? 0 : GetLastError()); @@ -620,16 +622,17 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ return (err == 0); } -bool _mi_os_commit(void* addr, size_t size, mi_stats_t* stats) { - return mi_os_commitx(addr, size, true, false /* conservative? */, stats); +bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { + return mi_os_commitx(addr, size, true, false /* conservative? */, is_zero, stats); } bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) { - return mi_os_commitx(addr, size, false, true /* conservative? */, stats); + bool is_zero; + return mi_os_commitx(addr, size, false, true /* conservative? */, &is_zero, stats); } -bool _mi_os_commit_unreset(void* addr, size_t size, mi_stats_t* stats) { - return mi_os_commitx(addr, size, true, true /* conservative? */, stats); +bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { + return mi_os_commitx(addr, size, true, true /* conservative? */, is_zero, stats); } @@ -698,11 +701,12 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { } } -bool _mi_os_unreset(void* addr, size_t size, mi_stats_t* stats) { +bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { if (mi_option_is_enabled(mi_option_reset_decommits)) { - return _mi_os_commit_unreset(addr, size, stats); // re-commit it (conservatively!) + return _mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!) } else { + *is_zero = false; return mi_os_resetx(addr, size, false, stats); } } diff --git a/src/page.c b/src/page.c index 74c3d88e..f0f43960 100644 --- a/src/page.c +++ b/src/page.c @@ -81,6 +81,14 @@ static bool mi_page_is_valid_init(mi_page_t* page) { mi_assert_internal(mi_page_list_is_valid(page,page->free)); mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); + if (page->flags.is_zero) { + for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) { + for (size_t i = sizeof(mi_block_t); i < page->block_size; i++) { + mi_assert_internal(0 == *((uint8_t*)block + i)); + } + } + } + mi_block_t* tfree = mi_tf_block(page->thread_free); mi_assert_internal(mi_page_list_is_valid(page, tfree)); size_t tfree_count = mi_page_list_count(page, tfree); @@ -184,6 +192,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { // usual case page->free = page->local_free; page->local_free = NULL; + page->flags.is_zero = false; } else if (force) { // append -- only on shutdown (force) as this is a linear operation @@ -195,7 +204,8 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { mi_block_set_next(page, tail, page->free); page->free = page->local_free; page->local_free = NULL; - } + page->flags.is_zero = false; + } } mi_assert_internal(!force || page->local_free == NULL); @@ -547,6 +557,8 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st page->capacity += (uint16_t)extend; _mi_stat_increase(&stats->page_committed, extend * page->block_size); + // extension into zero initialized memory preserves the zero'd free list + if (!page->is_zero_init) page->flags.is_zero = false; mi_assert_expensive(mi_page_is_valid_init(page)); } @@ -565,6 +577,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi #if MI_SECURE page->cookie = _mi_heap_random(heap) | 1; #endif + page->flags.is_zero = page->is_zero_init; mi_assert_internal(page->capacity == 0); mi_assert_internal(page->free == NULL); diff --git a/src/segment.c b/src/segment.c index b03547b3..68f3fb05 100644 --- a/src/segment.c +++ b/src/segment.c @@ -330,6 +330,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); bool commit = eager || (page_kind > MI_PAGE_MEDIUM); bool protection_still_good = false; + bool is_zero = false; mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld); if (segment != NULL) { if (mi_option_is_enabled(mi_option_secure)) { @@ -343,23 +344,27 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, } if (!segment->mem_is_committed && page_kind > MI_PAGE_MEDIUM) { mi_assert_internal(!segment->mem_is_fixed); - _mi_mem_commit(segment, segment->segment_size, tld->stats); + _mi_mem_commit(segment, segment->segment_size, &is_zero, tld->stats); segment->mem_is_committed = true; } if (!segment->mem_is_fixed && (mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset))) { - _mi_mem_unreset(segment, segment->segment_size, tld->stats); + bool reset_zero = false; + _mi_mem_unreset(segment, segment->segment_size, &reset_zero, tld->stats); + if (reset_zero) is_zero = true; } } else { // Allocate the segment from the OS size_t memid; bool mem_large = (!eager_delay && !mi_option_is_enabled(mi_option_secure)); // only allow large OS pages once we are no longer lazy - segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &memid, os_tld); + segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld); if (segment == NULL) return NULL; // failed to allocate if (!commit) { // ensure the initial info is committed - _mi_mem_commit(segment, info_size, tld->stats); + bool commit_zero = false; + _mi_mem_commit(segment, info_size, &commit_zero, tld->stats); + if (commit_zero) is_zero = true; } segment->memid = memid; segment->mem_is_fixed = mem_large; @@ -403,6 +408,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, segment->pages[i].segment_idx = i; segment->pages[i].is_reset = false; segment->pages[i].is_committed = commit; + segment->pages[i].is_zero_init = is_zero; } _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size); //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment); @@ -463,12 +469,16 @@ static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_stats_t* stats) if (!page->is_committed) { mi_assert_internal(!segment->mem_is_fixed); page->is_committed = true; - _mi_mem_commit(start,psize,stats); + bool is_zero = false; + _mi_mem_commit(start,psize,&is_zero,stats); + if (is_zero) page->is_zero_init = true; } if (page->is_reset) { mi_assert_internal(!segment->mem_is_fixed); page->is_reset = false; - _mi_mem_unreset(start, psize, stats); + bool is_zero = false; + _mi_mem_unreset(start, psize, &is_zero, stats); + if (is_zero) page->is_zero_init = true; } } return page; @@ -493,7 +503,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta size_t inuse = page->capacity * page->block_size; _mi_stat_decrease(&stats->page_committed, inuse); _mi_stat_decrease(&stats->pages, 1); - + // reset the page memory to reduce memory pressure? if (!segment->mem_is_fixed && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) { size_t psize; @@ -503,6 +513,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta } // zero the page data, but not the segment fields + page->is_zero_init = false; ptrdiff_t ofs = offsetof(mi_page_t,capacity); memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs); page->segment_in_use = false;