rename lazy to eager_commit_delay

This commit is contained in:
daan 2019-08-28 12:09:23 -07:00
parent 9af51506a6
commit d381fcd9fa
3 changed files with 7 additions and 7 deletions

View file

@ -225,16 +225,16 @@ typedef enum mi_option_e {
mi_option_verbose, mi_option_verbose,
// the following options are experimental // the following options are experimental
mi_option_secure, mi_option_secure,
mi_option_lazy_commit,
mi_option_eager_commit, mi_option_eager_commit,
mi_option_eager_region_commit, mi_option_eager_region_commit,
mi_option_large_os_pages, // implies eager commit mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages, mi_option_reserve_huge_os_pages,
mi_option_segment_cache, mi_option_segment_cache,
mi_option_segment_reset,
mi_option_page_reset, mi_option_page_reset,
mi_option_cache_reset, mi_option_cache_reset,
mi_option_reset_decommits, mi_option_reset_decommits,
mi_option_eager_commit_delay,
mi_option_segment_reset,
mi_option_os_tag, mi_option_os_tag,
_mi_option_last _mi_option_last
} mi_option_t; } mi_option_t;

View file

@ -58,7 +58,6 @@ static mi_option_desc_t options[_mi_option_last] =
#endif #endif
// the following options are experimental and not all combinations make sense. // the following options are experimental and not all combinations make sense.
{ 0, UNINIT, MI_OPTION(lazy_commit) }, // the first N segments per thread are lazily committed
{ 1, UNINIT, MI_OPTION(eager_commit) }, // note: needs to be on when eager_region_commit is enabled { 1, UNINIT, MI_OPTION(eager_commit) }, // note: needs to be on when eager_region_commit is enabled
#ifdef _WIN32 // and BSD? #ifdef _WIN32 // and BSD?
{ 0, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...) { 0, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
@ -68,10 +67,11 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free
{ 0, UNINIT, MI_OPTION(page_reset) }, { 0, UNINIT, MI_OPTION(page_reset) },
{ 0, UNINIT, MI_OPTION(cache_reset) }, { 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // note: cannot enable this if secure is on { 0, UNINIT, MI_OPTION(reset_decommits) }, // note: cannot enable this if secure is on
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free
{ 100, UNINIT, MI_OPTION(os_tag) } // only apple specific for now but might serve more or less related purpose { 100, UNINIT, MI_OPTION(os_tag) } // only apple specific for now but might serve more or less related purpose
}; };

View file

@ -326,8 +326,8 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift); size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
// Try to get it from our thread local cache first // Try to get it from our thread local cache first
bool lazy = (tld->count < mi_option_get(mi_option_lazy_commit)); bool eager = mi_option_is_enabled(mi_option_eager_commit) && (tld->count < mi_option_get(mi_option_eager_commit_delay));
bool commit = (!lazy && mi_option_is_enabled(mi_option_eager_commit)) || (page_kind > MI_PAGE_MEDIUM); bool commit = eager || (page_kind > MI_PAGE_MEDIUM);
bool protection_still_good = false; bool protection_still_good = false;
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld); mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
if (segment != NULL) { if (segment != NULL) {
@ -353,7 +353,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
else { else {
// Allocate the segment from the OS // Allocate the segment from the OS
size_t memid; size_t memid;
bool mem_large = (!lazy && !mi_option_is_enabled(mi_option_secure)); // only allow large OS pages once we are no longer lazy bool mem_large = (eager && !mi_option_is_enabled(mi_option_secure)); // only allow large OS pages once we are no longer lazy
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &memid, os_tld); segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate if (segment == NULL) return NULL; // failed to allocate
if (!commit) { if (!commit) {