clean up options; make secure work with eager_page_commit

This commit is contained in:
daan 2019-11-25 10:47:17 -08:00
parent 1643273357
commit 1d998af854
3 changed files with 33 additions and 23 deletions

View file

@ -267,19 +267,19 @@ typedef enum mi_option_e {
// the following options are experimental
mi_option_eager_commit,
mi_option_eager_region_commit,
mi_option_eager_page_commit,
mi_option_reset_decommits,
mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages,
mi_option_segment_cache,
mi_option_page_reset,
mi_option_segment_reset,
mi_option_reset_decommits,
mi_option_eager_commit_delay,
mi_option_reset_delay,
mi_option_use_numa_nodes,
mi_option_os_tag,
mi_option_max_errors,
_mi_option_last
_mi_option_last,
mi_option_eager_page_commit = mi_option_eager_commit
} mi_option_t;

View file

@ -56,21 +56,21 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(verbose) },
// the following options are experimental and not all combinations make sense.
{ 0, UNINIT, MI_OPTION(eager_commit) }, // note: needs to be on when eager_region_commit is enabled
#ifdef _WIN32 // and BSD?
{ 0, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit on demand
#if defined(_WIN32) || (MI_INTPTR_SIZE <= 4) // and other OS's without overcommit?
{ 0, UNINIT, MI_OPTION(eager_region_commit) },
{ 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
#else
{ 1, UNINIT, MI_OPTION(eager_region_commit) },
{ 1, UNINIT, MI_OPTION(eager_region_commit) },
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // reset uses MADV_FREE/MADV_DONTNEED
#endif
{ 1, UNINIT, MI_OPTION(eager_page_commit) },
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset pages on free
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output

View file

@ -165,7 +165,7 @@ static void mi_segment_protect_range(void* p, size_t size, bool protect) {
}
}
static void mi_segment_protect(mi_segment_t* segment, bool protect) {
static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t* tld) {
// add/remove guard pages
if (MI_SECURE != 0) {
// in secure mode, we set up a protected page in between the segment info and the page data
@ -175,7 +175,13 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect) {
mi_segment_protect_range((uint8_t*)segment + segment->segment_info_size - os_page_size, os_page_size, protect);
if (MI_SECURE <= 1 || segment->capacity == 1) {
// and protect the last (or only) page too
mi_segment_protect_range((uint8_t*)segment + segment->segment_size - os_page_size, os_page_size, protect);
mi_assert_internal(segment->page_kind >= MI_PAGE_LARGE);
uint8_t* start = (uint8_t*)segment + segment->segment_size - os_page_size;
if (protect && !mi_option_is_enabled(mi_option_eager_page_commit)) {
// ensure secure page is committed
_mi_mem_commit(start, os_page_size, NULL, tld);
}
mi_segment_protect_range(start, os_page_size, protect);
}
else {
// or protect every page
@ -323,19 +329,23 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
mi_segments_track_size(-((long)segment_size),tld);
if (MI_SECURE != 0) {
mi_assert_internal(!segment->mem_is_fixed);
mi_segment_protect(segment, false); // ensure no more guard pages are set
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
}
bool fully_committed = true;
bool any_reset = false;
bool fully_committed = true;
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->is_committed) { fully_committed = false; }
if (page->is_reset) {
any_reset = true;
if (mi_option_is_enabled(mi_option_reset_decommits)) { fully_committed = false;}
}
if (page->is_reset) { any_reset = true; }
}
if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
fully_committed = false;
}
if (segment->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) {
fully_committed = false;
}
_mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
}
@ -442,7 +452,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
{
if (MI_SECURE!=0) {
mi_assert_internal(!segment->mem_is_fixed);
mi_segment_protect(segment, false); // reset protection if the page kind differs
mi_segment_protect(segment, false, tld->os); // reset protection if the page kind differs
}
// different page kinds; unreset any reset pages, and unprotect
// TODO: optimize cache pop to return fitting pages if possible?
@ -514,7 +524,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
// _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
// set protection
mi_segment_protect(segment, true);
mi_segment_protect(segment, true, tld->os);
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
return segment;
@ -563,8 +573,8 @@ static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t*
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_fixed);
mi_assert_internal(!page->is_reset);
page->is_committed = true;
if (segment->page_kind < MI_PAGE_LARGE || mi_option_is_enabled(mi_option_eager_page_commit)) {
page->is_committed = true;
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
@ -594,7 +604,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(page->segment_in_use);
mi_assert_internal(mi_page_all_free(page));
mi_assert_internal(segment->page_kind >= MI_PAGE_LARGE || page->is_committed);
mi_assert_internal(page->is_committed);
size_t inuse = page->capacity * page->block_size;
_mi_stat_decrease(&tld->stats->page_committed, inuse);
_mi_stat_decrease(&tld->stats->pages, 1);
@ -725,7 +735,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
mi_page_t* page = &segment->pages[i];
if (page->segment_in_use) {
mi_assert_internal(!page->is_reset);
mi_assert_internal(segment->page_kind >= MI_PAGE_LARGE || page->is_committed);
mi_assert_internal(page->is_committed);
segment->abandoned--;
mi_assert(page->next == NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);