merge from dev-exp

This commit is contained in:
daan 2019-11-11 14:51:16 -08:00
commit 741a80256d
5 changed files with 28 additions and 55 deletions

View file

@ -271,10 +271,8 @@ typedef enum mi_option_e {
mi_option_reserve_huge_os_pages, mi_option_reserve_huge_os_pages,
mi_option_segment_cache, mi_option_segment_cache,
mi_option_page_reset, mi_option_page_reset,
mi_option_cache_reset,
mi_option_reset_decommits,
mi_option_eager_commit_delay,
mi_option_segment_reset, mi_option_segment_reset,
mi_option_eager_commit_delay,
mi_option_reset_delay, mi_option_reset_delay,
mi_option_os_tag, mi_option_os_tag,
mi_option_max_numa_node, mi_option_max_numa_node,

View file

@ -213,14 +213,12 @@ static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, boo
if (rnode >= 0 && rnode != numa_node) return false; if (rnode >= 0 && rnode != numa_node) return false;
} }
// note: we also skip if commit is false and the region is committed, // check allow-large
// that is a bit strong but prevents allocation of eager-delayed segments in an eagerly committed region
bool is_large; bool is_large;
bool is_committed; bool is_committed;
mi_region_info_read(info, &is_large, &is_committed); mi_region_info_read(info, &is_large, &is_committed);
if (!commit && is_committed) return false;
if (!allow_large && is_large) return false; if (!allow_large && is_large) return false;
return true; return true;
} }
@ -360,8 +358,8 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_os_tld_t* tld) {
const size_t blocks = mi_region_block_count(size); const size_t blocks = mi_region_block_count(size);
mi_region_info_t info = mi_atomic_read(&region->info); mi_region_info_t info = mi_atomic_read(&region->info);
bool is_large; bool is_large;
bool is_eager_committed; bool is_committed;
void* start = mi_region_info_read(info,&is_large,&is_eager_committed); void* start = mi_region_info_read(info, &is_large, &is_committed);
mi_assert_internal(start != NULL); mi_assert_internal(start != NULL);
void* blocks_start = (uint8_t*)start + (bit_idx * MI_SEGMENT_SIZE); void* blocks_start = (uint8_t*)start + (bit_idx * MI_SEGMENT_SIZE);
mi_assert_internal(blocks_start == p); // not a pointer in our area? mi_assert_internal(blocks_start == p); // not a pointer in our area?
@ -372,18 +370,13 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_os_tld_t* tld) {
// TODO: implement delayed decommit/reset as these calls are too expensive // TODO: implement delayed decommit/reset as these calls are too expensive
// if the memory is reused soon. // if the memory is reused soon.
// reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large // reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large
if (!is_large) { if (!is_large &&
if (mi_option_is_enabled(mi_option_segment_reset)) { mi_option_is_enabled(mi_option_segment_reset) &&
if (!is_eager_committed && // cannot reset large pages mi_option_is_enabled(mi_option_eager_commit)) // cannot reset halfway committed segments, use `option_page_reset` instead
(mi_option_is_enabled(mi_option_eager_commit) || // cannot reset halfway committed segments, use `option_page_reset` instead
mi_option_is_enabled(mi_option_reset_decommits))) // but we can decommit halfway committed segments
{ {
_mi_os_reset(p, size, tld->stats); // cannot use delay reset! (due to concurrent allocation in the same region) _mi_os_reset(p, size, tld->stats);
//_mi_os_decommit(p, size, stats); // todo: and clear dirty bits?
} }
} if (!is_committed) {
}
if (!is_eager_committed) {
// adjust commit statistics as we commit again when re-using the same slot // adjust commit statistics as we commit again when re-using the same slot
_mi_stat_decrease(&tld->stats->committed, mi_good_commit_size(size)); _mi_stat_decrease(&tld->stats->committed, mi_good_commit_size(size));
} }

View file

@ -65,11 +65,9 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(page_reset) }, { 0, UNINIT, MI_OPTION(page_reset) }, // reset pages on free
{ 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // note: cannot enable this if secure is on
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit) { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds { 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 256, UNINIT, MI_OPTION(max_numa_node) }, // maximum allowed numa node { 256, UNINIT, MI_OPTION(max_numa_node) }, // maximum allowed numa node

View file

@ -646,10 +646,6 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, false, true /* conservative? */, &is_zero, stats); return mi_os_commitx(addr, size, false, true /* conservative? */, &is_zero, stats);
} }
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, true /* conservative? */, is_zero, stats);
}
// Signal to the OS that the address range is no longer in use // Signal to the OS that the address range is no longer in use
// but may be used later again. This will release physical memory // but may be used later again. This will release physical memory
@ -708,23 +704,13 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
// pages and reduce swapping while keeping the memory committed. // pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset. // We page align to a conservative area inside the range to reset.
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_decommit(addr,size,stats);
}
else {
return mi_os_resetx(addr, size, true, stats); return mi_os_resetx(addr, size, true, stats);
} }
}
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
}
else {
*is_zero = false; *is_zero = false;
return mi_os_resetx(addr, size, false, stats); return mi_os_resetx(addr, size, false, stats);
} }
}
// Protect a region in memory to be not accessible. // Protect a region in memory to be not accessible.

View file

@ -280,9 +280,6 @@ static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld)
return false; return false;
} }
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE); mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
if (!segment->mem_is_fixed && mi_option_is_enabled(mi_option_cache_reset)) {
_mi_mem_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->os);
}
segment->next = tld->cache; segment->next = tld->cache;
tld->cache = segment; tld->cache = segment;
tld->cache_count++; tld->cache_count++;
@ -327,12 +324,14 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
mi_assert_internal(segment_size >= required); mi_assert_internal(segment_size >= required);
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift); size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
// Try to get it from our thread local cache first // Initialize parameters
bool eager_delay = (tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); bool eager_delayed = (page_kind <= MI_PAGE_MEDIUM && tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
bool commit = eager || (page_kind > MI_PAGE_MEDIUM); bool commit = eager || (page_kind >= MI_PAGE_LARGE);
bool protection_still_good = false; bool protection_still_good = false;
bool is_zero = false; bool is_zero = false;
// Try to get it from our thread local cache first
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld); mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
if (segment != NULL) { if (segment != NULL) {
if (MI_SECURE!=0) { if (MI_SECURE!=0) {
@ -349,8 +348,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
_mi_mem_commit(segment, segment->segment_size, &is_zero, tld->os); _mi_mem_commit(segment, segment->segment_size, &is_zero, tld->os);
segment->mem_is_committed = true; segment->mem_is_committed = true;
} }
if (!segment->mem_is_fixed && if (!segment->mem_is_fixed && mi_option_is_enabled(mi_option_page_reset)) {
(mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset))) {
bool reset_zero = false; bool reset_zero = false;
_mi_mem_unreset(segment, segment->segment_size, &reset_zero, tld->os); _mi_mem_unreset(segment, segment->segment_size, &reset_zero, tld->os);
if (reset_zero) is_zero = true; if (reset_zero) is_zero = true;
@ -359,7 +357,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
else { else {
// Allocate the segment from the OS // Allocate the segment from the OS
size_t memid; size_t memid;
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy bool mem_large = (!eager_delayed && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld); segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate if (segment == NULL) return NULL; // failed to allocate
if (!commit) { if (!commit) {