merge from dev

This commit is contained in:
Daan Leijen 2022-04-09 15:07:07 -07:00
commit 7e492f4420
6 changed files with 32 additions and 97 deletions

View file

@ -394,69 +394,13 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
}
}
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
#define MI_SEGMENT_CACHE_FRACTION (8)
// note: returned segment may be partially reset
static mi_segment_t* mi_segment_cache_pop(size_t segment_slices, mi_segments_tld_t* tld) {
if (segment_slices != 0 && segment_slices != MI_SLICES_PER_SEGMENT) return NULL;
mi_segment_t* segment = tld->cache;
if (segment == NULL) return NULL;
tld->cache_count--;
tld->cache = segment->next;
segment->next = NULL;
mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
_mi_stat_decrease(&tld->stats->segments_cache, 1);
return segment;
}
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
{
// if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
size_t max_cache = mi_option_get(mi_option_segment_cache);
if (tld->cache_count < max_cache
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
) {
return false;
}
// take the opportunity to reduce the segment cache if it is too large (now)
// TODO: this never happens as we check against peak usage, should we use current usage instead?
while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
mi_assert_internal(segment != NULL);
if (segment != NULL) mi_segment_os_free(segment, tld);
}
return true;
}
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(segment->next == NULL);
if (segment->segment_slices != MI_SLICES_PER_SEGMENT || mi_segment_cache_full(tld)) {
return false;
}
// mi_segment_delayed_decommit(segment, true, tld->stats);
mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
mi_assert_internal(segment->next == NULL);
segment->next = tld->cache;
tld->cache = segment;
tld->cache_count++;
_mi_stat_increase(&tld->stats->segments_cache,1);
return true;
}
// called by threads that are terminating to free cached segments
// called by threads that are terminating
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
mi_segment_t* segment;
while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
mi_segment_os_free(segment, tld);
}
mi_assert_internal(tld->cache_count == 0);
mi_assert_internal(tld->cache == NULL);
MI_UNUSED(tld);
// nothing to do
}
/* -----------------------------------------------------------
Span management
----------------------------------------------------------- */
@ -927,7 +871,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
size_t guard_slices = 0;
if (MI_SECURE>0) {
// in secure mode, we set up a protected page in between the segment info
// and the page data
// and the page data, and at the end of the segment.
size_t os_pagesize = _mi_os_page_size();
mi_assert_internal(mi_segment_info_size(segment) - os_pagesize >= pre_size);
_mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
@ -969,6 +913,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, m
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
MI_UNUSED(force);
mi_assert_internal(segment != NULL);
mi_assert_internal(segment->next == NULL);
mi_assert_internal(segment->used == 0);
@ -992,13 +937,8 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
// stats
_mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
if (!force && mi_segment_cache_push(segment, tld)) {
// it is put in our cache
}
else {
// otherwise return it to the OS
mi_segment_os_free(segment, tld);
}
// return it to the OS
mi_segment_os_free(segment, tld);
}
@ -1488,15 +1428,10 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
{
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
// 1. try to get a segment from our cache
mi_segment_t* segment = mi_segment_cache_pop(MI_SEGMENT_SIZE, tld);
if (segment != NULL) {
mi_segment_init(segment, 0, tld, os_tld, NULL);
return segment;
}
// 2. try to reclaim an abandoned segment
// 1. try to reclaim an abandoned segment
bool reclaimed;
segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
if (reclaimed) {
// reclaimed the right page right into the heap
mi_assert_internal(segment != NULL);
@ -1506,7 +1441,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
// reclaimed a segment with a large enough empty span in it
return segment;
}
// 3. otherwise allocate a fresh segment
// 2. otherwise allocate a fresh segment
return mi_segment_alloc(0, tld, os_tld, NULL);
}