This commit is contained in:
daan 2021-10-19 15:07:19 -07:00
commit 505ea78cae
15 changed files with 104 additions and 77 deletions

View file

@ -62,11 +62,11 @@ typedef struct mi_arena_s {
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
int numa_node; // associated NUMA node
bool is_zero_init; // is the arena zero initialized?
bool is_committed; // is the memory fully committed? (if so, block_committed == NULL)
bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
bool is_large; // large- or huge OS pages (always committed)
_Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed?
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
} mi_arena_t;
@ -129,8 +129,8 @@ static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t n
*memid = mi_arena_id_create(arena_index, bitmap_index);
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
*large = arena->is_large;
*is_pinned = (arena->is_large || arena->is_committed);
if (arena->is_committed) {
*is_pinned = (arena->is_large || !arena->allow_decommit);
if (arena->blocks_committed == NULL) {
// always committed
*commit = true;
}
@ -245,12 +245,13 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
return;
}
// potentially decommit
if (arena->is_committed) {
mi_assert_internal(all_committed);
if (!arena->allow_decommit || arena->blocks_committed == NULL) {
mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c)
}
else {
mi_assert_internal(arena->blocks_committed != NULL);
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails
// todo: use reset instead of decommit on windows?
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
}
// and make it available to others again
@ -302,12 +303,16 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la
arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
arena->is_large = is_large;
arena->is_zero_init = is_zero;
arena->is_committed = is_committed;
arena->allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory
arena->search_idx = 0;
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_committed = (is_committed ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
// the bitmaps are already zero initialized due to os_alloc
// just claim leftover blocks if needed
// initialize committed bitmap?
if (arena->blocks_committed != NULL && is_committed) {
memset(arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t));
}
// and claim leftover blocks if needed (so we never allocate there)
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
mi_assert_internal(post >= 0);
if (post > 0) {
@ -332,7 +337,7 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
return ENOMEM;
}
_mi_verbose_message("reserved %zu kb memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : "");
_mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : "");
return 0;
}
@ -349,10 +354,10 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec
size_t pages_reserved = 0;
void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize);
if (p==NULL || pages_reserved==0) {
_mi_warning_message("failed to reserve %zu gb huge pages\n", pages);
_mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
return ENOMEM;
}
_mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages);
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory(p, hsize, true, true, true, numa_node)) {
_mi_os_free_huge_pages(p, hsize, &_mi_stats_main);