track commit and is_large status more precisely

This commit is contained in:
daan 2019-08-28 11:58:30 -07:00
parent a551f3abc4
commit 9af51506a6
6 changed files with 57 additions and 38 deletions

View file

@ -45,8 +45,7 @@ void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocat
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
// memory.c
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, size_t* id, mi_os_tld_t* tld);
void* _mi_mem_alloc(size_t size, bool commit, bool* large, size_t* id, mi_os_tld_t* tld);
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, size_t* id, mi_os_tld_t* tld);
void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats);
bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats);

View file

@ -225,11 +225,13 @@ typedef enum mi_option_e {
mi_option_verbose,
// the following options are experimental
mi_option_secure,
mi_option_lazy_commit,
mi_option_eager_commit,
mi_option_eager_region_commit,
mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages,
mi_option_segment_cache,
mi_option_segment_reset,
mi_option_page_reset,
mi_option_cache_reset,
mi_option_reset_decommits,

View file

@ -46,8 +46,9 @@ bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld);
bool _mi_os_is_huge_reserved(void* p);
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment);
bool _mi_os_is_huge_reserved(void* p);
// Constants
#if (MI_INTPTR_SIZE==8)
@ -137,7 +138,7 @@ Commit from a region
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, bool commit, bool* large, void** p, size_t* id, mi_os_tld_t* tld)
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, bool* commit, bool* allow_large, void** p, size_t* id, mi_os_tld_t* tld)
{
size_t mask = mi_region_block_mask(blocks,bitidx);
mi_assert_internal(mask != 0);
@ -149,9 +150,16 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
if (info == 0)
{
bool region_commit = mi_option_is_enabled(mi_option_eager_region_commit);
bool region_large = region_commit && *large;
void* start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, region_commit, &region_large, tld);
*large = region_large;
bool region_large = *allow_large;
void* start = NULL;
if (region_large) {
start = _mi_os_try_alloc_from_huge_reserved(MI_REGION_SIZE, MI_SEGMENT_ALIGN);
if (start != NULL) { region_commit = true; }
}
if (start == NULL) {
start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, region_commit, &region_large, tld);
}
mi_assert_internal(!(region_large && !*allow_large));
if (start == NULL) {
// failure to allocate from the OS! unclaim the blocks and fail
@ -191,13 +199,22 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
// Commit the blocks to memory
bool region_is_committed = false;
void* start = mi_region_info_read(info,large,&region_is_committed);
bool region_is_large = false;
void* start = mi_region_info_read(info,&region_is_large,&region_is_committed);
mi_assert_internal(!(region_is_large && !*allow_large));
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
if (commit && !region_is_committed) {
if (*commit && !region_is_committed) {
// ensure commit
_mi_os_commit(blocks_start, mi_good_commit_size(size), tld->stats); // only commit needed size (unless using large OS pages)
}
else if (!*commit && region_is_committed) {
// but even when no commit is requested, we might have committed anyway (in a huge OS page for example)
*commit = true;
}
// and return the allocation
*allow_large = region_is_large;
*p = blocks_start;
*id = (idx*MI_REGION_MAP_BITS) + bitidx;
return true;
@ -241,7 +258,7 @@ static inline size_t mi_bsr(uintptr_t x) {
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, bool commit, bool* large, void** p, size_t* id, mi_os_tld_t* tld)
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, bool* commit, bool* allow_large, void** p, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(p != NULL && id != NULL);
mi_assert_internal(blocks < MI_REGION_MAP_BITS);
@ -271,7 +288,7 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
else {
// success, we claimed the bits
// now commit the block memory -- this can still fail
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, large, p, id, tld);
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, allow_large, p, id, tld);
}
}
else {
@ -294,27 +311,27 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/0 before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, bool commit, bool* large, void** p, size_t* id, mi_os_tld_t* tld)
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, bool* commit, bool* allow_large, void** p, size_t* id, mi_os_tld_t* tld)
{
// check if there are available blocks in the region..
mi_assert_internal(idx < MI_REGION_MAX);
mem_region_t* region = &regions[idx];
uintptr_t m = mi_atomic_read_relaxed(&region->map);
if (m != MI_REGION_MAP_FULL) { // some bits are zero
bool ok = (commit || *large); // committing or allow-large is always ok
bool ok = (*commit || *allow_large); // committing or allow-large is always ok
if (!ok) {
// otherwise skip incompatible regions if possible.
// this is not guaranteed due to multiple threads allocating at the same time but
// that's ok. In secure mode, large is never allowed so that works out; otherwise
// we might just not be able to reset/decommit individual pages sometimes.
// that's ok. In secure mode, large is never allowed for any thread, so that works out;
// otherwise we might just not be able to reset/decommit individual pages sometimes.
mi_region_info_t info = mi_atomic_read_relaxed(&region->info);
bool is_large;
bool is_committed;
void* start = mi_region_info_read(info,&is_large,&is_committed);
ok = (start == NULL || (commit || !is_committed) || (*large || !is_large)); // Todo: test with one bitmap operation?
ok = (start == NULL || (*commit || !is_committed) || (*allow_large || !is_large)); // Todo: test with one bitmap operation?
}
if (ok) {
return mi_region_alloc_blocks(region, idx, blocks, size, commit, large, p, id, tld);
return mi_region_alloc_blocks(region, idx, blocks, size, commit, allow_large, p, id, tld);
}
}
return true; // no error, but no success either
@ -326,7 +343,7 @@ static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, b
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, size_t* id, mi_os_tld_t* tld)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(id != NULL && tld != NULL);
mi_assert_internal(size > 0);
@ -336,7 +353,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool* la
// use direct OS allocation for huge blocks or alignment (with `id = SIZE_MAX`)
if (size > MI_REGION_MAX_ALLOC_SIZE || alignment > MI_SEGMENT_ALIGN) {
return _mi_os_alloc_aligned(mi_good_commit_size(size), alignment, commit, large, tld); // round up size
return _mi_os_alloc_aligned(mi_good_commit_size(size), alignment, *commit, large, tld); // round up size
}
// always round size to OS page size multiple (so commit/decommit go over the entire range)
@ -371,6 +388,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool* la
}
else {
tld->region_idx = idx; // next start of search?
}
mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0);
@ -378,10 +396,6 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool* la
}
// Allocate `size` memory. Return non NULL on success, with a given memory `id`.
void* _mi_mem_alloc(size_t size, bool commit, bool* large, size_t* id, mi_os_tld_t* tld) {
return _mi_mem_alloc_aligned(size,0,commit,large,id,tld);
}
/* ----------------------------------------------------------------------------
Free
@ -424,8 +438,11 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
// if the memory is reused soon.
// reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large
if (!is_large) {
// _mi_os_reset(p,size,stats);
// _mi_os_decommit(p,size,stats); // if !is_committed
if (mi_option_is_enabled(mi_option_segment_reset)) {
_mi_os_reset(p, size, stats);
// _mi_os_decommit(p,size,stats); // if !is_eager_committed
}
// else { _mi_os_reset(p,size,stats); }
}
if (!is_eager_committed) {
// adjust commit statistics as we commit again when re-using the same slot

View file

@ -58,6 +58,7 @@ static mi_option_desc_t options[_mi_option_last] =
#endif
// the following options are experimental and not all combinations make sense.
{ 0, UNINIT, MI_OPTION(lazy_commit) }, // the first N segments per thread are lazily committed
{ 1, UNINIT, MI_OPTION(eager_commit) }, // note: needs to be on when eager_region_commit is enabled
#ifdef _WIN32 // and BSD?
{ 0, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
@ -67,6 +68,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free
{ 0, UNINIT, MI_OPTION(page_reset) },
{ 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // note: cannot enable this if secure is on

View file

@ -35,9 +35,9 @@ terms of the MIT license. A copy of the license can be found in the file
On windows initializes support for aligned allocation and
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
----------------------------------------------------------- */
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_is_huge_reserved(void* p);
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_is_huge_reserved(void* p);
void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment);
static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
@ -418,8 +418,8 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
if (!commit) allow_large = false;
void* p = NULL;
if (allow_large) {
p = mi_os_alloc_from_huge_reserved(size, try_alignment, commit);
if (commit && allow_large) {
p = _mi_os_try_alloc_from_huge_reserved(size, try_alignment);
if (p != NULL) {
*is_large = true;
return p;
@ -781,12 +781,11 @@ bool _mi_os_is_huge_reserved(void* p) {
(uint8_t*)p < (uint8_t*)mi_atomic_read_ptr(&os_huge_reserved.start) + mi_atomic_read(&os_huge_reserved.reserved));
}
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit)
void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment)
{
// only allow large aligned allocations
if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL;
if (try_alignment > MI_SEGMENT_SIZE) return NULL;
if (!commit) return NULL;
if (mi_atomic_read_ptr(&os_huge_reserved.start)==NULL) return NULL;
if (mi_atomic_read(&os_huge_reserved.used) >= mi_atomic_read(&os_huge_reserved.reserved)) return NULL; // already full

View file

@ -326,8 +326,8 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
// Try to get it from our thread local cache first
bool eager = mi_option_is_enabled(mi_option_eager_commit);
bool commit = eager || (page_kind > MI_PAGE_MEDIUM);
bool lazy = (tld->count < mi_option_get(mi_option_lazy_commit));
bool commit = (!lazy && mi_option_is_enabled(mi_option_eager_commit)) || (page_kind > MI_PAGE_MEDIUM);
bool protection_still_good = false;
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
if (segment != NULL) {
@ -353,8 +353,8 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
else {
// Allocate the segment from the OS
size_t memid;
bool mem_large = (eager && !mi_option_is_enabled(mi_option_secure)); // only allow large OS pages once we are no longer lazy
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, commit, &mem_large, &memid, os_tld);
bool mem_large = (!lazy && !mi_option_is_enabled(mi_option_secure)); // only allow large OS pages once we are no longer lazy
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate
if (!commit) {
// ensure the initial info is committed