diff --git a/src/arena.c b/src/arena.c index abc8b7ec..57ef5f60 100644 --- a/src/arena.c +++ b/src/arena.c @@ -60,11 +60,11 @@ typedef struct mi_arena_s { size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) int numa_node; // associated NUMA node bool is_zero_init; // is the arena zero initialized? - bool is_committed; // is the memory fully committed? (if so, block_committed == NULL) + bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL bool is_large; // large- or huge OS pages (always committed) _Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? - mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed? + mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) } mi_arena_t; @@ -127,8 +127,8 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren *memid = mi_arena_id_create(arena_index, bitmap_index); *is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); *large = arena->is_large; - *is_pinned = (arena->is_large || arena->is_committed); - if (arena->is_committed) { + *is_pinned = (arena->is_large || !arena->allow_decommit); + if (arena->blocks_committed == NULL) { // always committed *commit = true; } @@ -260,7 +260,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_o return; } // potentially decommit - if (arena->is_committed) { + if (!arena->allow_decommit || arena->blocks_committed == NULL) { mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c) } else { @@ -315,12 +315,16 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) arena->is_large = is_large; arena->is_zero_init = is_zero; - arena->is_committed = is_committed; + arena->allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory arena->search_idx = 0; arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap - arena->blocks_committed = (is_committed ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap + arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap // the bitmaps are already zero initialized due to os_alloc - // just claim leftover blocks if needed + // initialize committed bitmap? + if (arena->blocks_committed != NULL && is_committed) { + memset(arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); + } + // and claim leftover blocks if needed (so we never allocate there) ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; mi_assert_internal(post >= 0); if (post > 0) { diff --git a/src/init.c b/src/init.c index 76c20842..e957559e 100644 --- a/src/init.c +++ b/src/init.c @@ -526,7 +526,7 @@ void mi_process_init(void) mi_attr_noexcept { } if (mi_option_is_enabled(mi_option_reserve_os_memory)) { long ksize = mi_option_get(mi_option_reserve_os_memory); - if (ksize > 0) mi_reserve_os_memory((size_t)ksize*KiB, true, true); + if (ksize > 0) mi_reserve_os_memory((size_t)ksize*KiB, true /* commit? */, true /* allow large pages? */); } }