track pinned memory separately from large os pages

This commit is contained in:
daan 2020-09-08 16:46:03 -07:00
parent c86459afef
commit 14b8d27386
8 changed files with 83 additions and 57 deletions

View file

@ -50,8 +50,8 @@ bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
// arena.c
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
@ -77,7 +77,8 @@ typedef union mi_region_info_u {
uintptr_t value;
struct {
bool valid; // initialized?
bool is_large; // allocated in fixed large/huge OS pages
bool is_large:1; // allocated in fixed large/huge OS pages
bool is_pinned:1; // pinned memory cannot be decommitted
short numa_node; // the associated NUMA node (where -1 means no associated node)
} x;
} mi_region_info_t;
@ -177,8 +178,9 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
bool region_commit = (commit && mi_option_is_enabled(mi_option_eager_region_commit));
bool region_large = (commit && allow_large);
bool is_zero = false;
bool is_pinned = false;
size_t arena_memid = 0;
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, &region_commit, &region_large, &is_zero, &arena_memid, tld);
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, &region_commit, &region_large, &is_pinned, &is_zero, &arena_memid, tld);
if (start == NULL) return false;
mi_assert_internal(!(region_large && !allow_large));
mi_assert_internal(!region_large || region_commit);
@ -208,6 +210,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
info.value = 0; // initialize the full union to zero
info.x.valid = true;
info.x.is_large = region_large;
info.x.is_pinned = is_pinned;
info.x.numa_node = (short)_mi_os_numa_node(tld);
mi_atomic_store_release(&r->info, info.value); // now make it available to others
*region = r;
@ -259,16 +262,16 @@ static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large,
}
static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(blocks <= MI_BITMAP_FIELD_BITS);
mem_region_t* region;
mi_bitmap_index_t bit_idx;
const int numa_node = (_mi_os_numa_node_count() <= 1 ? -1 : _mi_os_numa_node(tld));
// try to claim in existing regions
if (!mi_region_try_claim(numa_node, blocks, *is_large, &region, &bit_idx, tld)) {
if (!mi_region_try_claim(numa_node, blocks, *large, &region, &bit_idx, tld)) {
// otherwise try to allocate a fresh region and claim in there
if (!mi_region_try_alloc_os(blocks, *commit, *is_large, &region, &bit_idx, tld)) {
if (!mi_region_try_alloc_os(blocks, *commit, *large, &region, &bit_idx, tld)) {
// out of regions or memory
return NULL;
}
@ -282,12 +285,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
mi_region_info_t info;
info.value = mi_atomic_load_acquire(&region->info);
uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,&region->start);
mi_assert_internal(!(info.x.is_large && !*is_large));
mi_assert_internal(!(info.x.is_large && !*large));
mi_assert_internal(start != NULL);
*is_zero = _mi_bitmap_claim(&region->dirty, 1, blocks, bit_idx, NULL);
*is_large = info.x.is_large;
*memid = mi_memid_create(region, bit_idx);
*is_zero = _mi_bitmap_claim(&region->dirty, 1, blocks, bit_idx, NULL);
*large = info.x.is_large;
*is_pinned = info.x.is_pinned;
*memid = mi_memid_create(region, bit_idx);
void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
// commit
@ -296,7 +300,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
bool any_uncommitted;
_mi_bitmap_claim(&region->commit, 1, blocks, bit_idx, &any_uncommitted);
if (any_uncommitted) {
mi_assert_internal(!info.x.is_large);
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
bool commit_zero;
_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld);
if (commit_zero) *is_zero = true;
@ -311,7 +315,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
// unreset reset blocks
if (_mi_bitmap_is_any_claimed(&region->reset, 1, blocks, bit_idx)) {
// some blocks are still reset
mi_assert_internal(!info.x.is_large);
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
mi_bitmap_unclaim(&region->reset, 1, blocks, bit_idx);
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
@ -338,12 +342,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(memid != NULL && tld != NULL);
mi_assert_internal(size > 0);
*memid = 0;
*is_zero = false;
*is_pinned = false;
bool default_large = false;
if (large==NULL) large = &default_large; // ensure `large != NULL`
if (size == 0) return NULL;
@ -354,14 +359,14 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
size_t arena_memid;
const size_t blocks = mi_region_block_count(size);
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) {
p = mi_region_try_alloc(blocks, commit, large, is_zero, memid, tld);
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
if (p == NULL) {
_mi_warning_message("unable to allocate from region: size %zu\n", size);
}
}
if (p == NULL) {
// and otherwise fall back to the OS
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_zero, &arena_memid, tld);
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, &arena_memid, tld);
*memid = mi_memid_create_from_arena(arena_memid);
}
@ -418,7 +423,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
}
// reset the blocks to reduce the working set.
if (!info.x.is_large && mi_option_is_enabled(mi_option_segment_reset)
if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
&& (mi_option_is_enabled(mi_option_eager_commit) ||
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
{