ensure huge pages are part of the segment cache to avoid mmap calls

This commit is contained in:
daan 2019-06-23 21:37:43 -07:00
parent e5e2b789ba
commit 43568fa422
3 changed files with 75 additions and 50 deletions

View file

@ -373,7 +373,7 @@ typedef struct mi_segments_tld_s {
size_t count; // current number of segments size_t count; // current number of segments
size_t peak; // peak number of segments size_t peak; // peak number of segments
size_t cache_count; // number of segments in the cache size_t cache_count; // number of segments in the cache
mi_segment_t* cache; // small cache of segments (to avoid repeated mmap calls) mi_segment_queue_t cache; // (small) cache of segments for small and large pages (to avoid repeated mmap calls)
mi_stats_t* stats; // points to tld stats mi_stats_t* stats; // points to tld stats
} mi_segments_tld_t; } mi_segments_tld_t;

View file

@ -90,7 +90,7 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
static mi_tld_t tld_main = { static mi_tld_t tld_main = {
0, 0,
&_mi_heap_main, &_mi_heap_main,
{ { NULL, NULL }, 0, 0, 0, NULL, tld_main_stats }, // segments { { NULL, NULL }, 0, 0, 0, {NULL,NULL}, tld_main_stats }, // segments
{ 0, NULL, NULL, 0, tld_main_stats }, // os { 0, NULL, NULL, 0, tld_main_stats }, // os
{ MI_STATS_NULL } // stats { MI_STATS_NULL } // stats
}; };

View file

@ -112,6 +112,18 @@ static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment)
} }
} }
static void mi_segment_queue_insert_before(mi_segment_queue_t* queue, mi_segment_t* elem, mi_segment_t* segment) {
mi_assert_expensive(elem==NULL || mi_segment_queue_contains(queue, elem));
mi_assert_expensive(segment != NULL && !mi_segment_queue_contains(queue, segment));
segment->prev = (elem == NULL ? queue->last : elem->prev);
if (segment->prev != NULL) segment->prev->next = segment;
else queue->first = segment;
segment->next = elem;
if (segment->next != NULL) segment->next->prev = segment;
else queue->last = segment;
}
// Start of the page available memory // Start of the page available memory
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
@ -194,19 +206,34 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
_mi_os_free(segment, segment_size,tld->stats); _mi_os_free(segment, segment_size,tld->stats);
} }
// The segment cache is limited to be at most 1/2 of the peak // The segment cache is limited to be at most 1/6 of the peak
// number of segments in use (and no more than 32) // number of segments in use (and no more than 32)
#define MI_SEGMENT_CACHE_MAX (16) #define MI_SEGMENT_CACHE_MAX (32)
#define MI_SEGMENT_CACHE_FRACTION (6) #define MI_SEGMENT_CACHE_FRACTION (6)
static mi_segment_t* mi_segment_cache_pop(mi_segments_tld_t* tld) { // Get a segment of at least `required` size.
mi_segment_t* segment = tld->cache; static mi_segment_t* _mi_segment_cache_findx(mi_segments_tld_t* tld, size_t required, bool reverse) {
if (segment == NULL) return NULL; mi_segment_t* segment = (reverse ? tld->cache.last : tld->cache.first);
tld->cache_count--; while (segment != NULL) {
tld->cache = segment->next; if (segment->segment_size >= required) {
segment->next = NULL; tld->cache_count--;
return segment; mi_segment_queue_remove(&tld->cache, segment);
// TODO: unmap excess memory if larger than N%
return segment;
}
segment = (reverse ? segment->prev : segment->next);
}
return NULL;
}
static mi_segment_t* mi_segment_cache_find(mi_segments_tld_t* tld, size_t required) {
return _mi_segment_cache_findx(tld,required,false);
}
static mi_segment_t* mi_segment_cache_evict(mi_segments_tld_t* tld) {
// TODO: random eviction instead?
return _mi_segment_cache_findx(tld, 0, true /* from the end */);
} }
static bool mi_segment_cache_full(mi_segments_tld_t* tld) { static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
@ -214,24 +241,27 @@ static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
tld->cache_count*MI_SEGMENT_CACHE_FRACTION < mi_segments_peak(tld)) return false; tld->cache_count*MI_SEGMENT_CACHE_FRACTION < mi_segments_peak(tld)) return false;
// take the opportunity to reduce the segment cache if it is too large (now) // take the opportunity to reduce the segment cache if it is too large (now)
while (tld->cache_count*MI_SEGMENT_CACHE_FRACTION >= mi_segments_peak(tld) + 1) { while (tld->cache_count*MI_SEGMENT_CACHE_FRACTION >= mi_segments_peak(tld) + 1) {
mi_segment_t* segment = mi_segment_cache_pop(tld); mi_segment_t* segment = mi_segment_cache_evict(tld);
mi_assert_internal(segment != NULL); mi_assert_internal(segment != NULL);
if (segment != NULL) mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld); if (segment != NULL) mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
} }
return true; return true;
} }
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) { static bool mi_segment_cache_insert(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(segment->next==NULL && segment->prev==NULL);
mi_assert_internal(!mi_segment_is_in_free_queue(segment,tld)); mi_assert_internal(!mi_segment_is_in_free_queue(segment,tld));
mi_assert_internal(segment->next==NULL); mi_assert_expensive(!mi_segment_queue_contains(&tld->cache, segment));
if (mi_segment_cache_full(tld)) return false; if (mi_segment_cache_full(tld)) return false;
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) { if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) {
_mi_os_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size); _mi_os_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size);
} }
segment->next = tld->cache; // insert ordered
tld->cache = segment; mi_segment_t* seg = tld->cache.first;
while (seg != NULL && seg->segment_size < segment->segment_size) {
seg = seg->next;
}
mi_segment_queue_insert_before( &tld->cache, seg, segment );
tld->cache_count++; tld->cache_count++;
return true; return true;
} }
@ -239,11 +269,11 @@ static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld)
// called by ending threads to free cached segments // called by ending threads to free cached segments
void _mi_segment_thread_collect(mi_segments_tld_t* tld) { void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
mi_segment_t* segment; mi_segment_t* segment;
while ((segment = mi_segment_cache_pop(tld)) != NULL) { while ((segment = mi_segment_cache_find(tld,0)) != NULL) {
mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld); mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
} }
mi_assert_internal(tld->cache_count == 0); mi_assert_internal(tld->cache_count == 0);
mi_assert_internal(tld->cache == NULL); mi_assert_internal(mi_segment_queue_is_empty(&tld->cache));
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
@ -278,11 +308,9 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
mi_segment_t* segment = NULL; mi_segment_t* segment = NULL;
// try to get it from our caches // try to get it from our caches
if (segment_size == MI_SEGMENT_SIZE) { segment = mi_segment_cache_find(tld,segment_size);
segment = mi_segment_cache_pop(tld); if (segment != NULL && mi_option_is_enabled(mi_option_secure) && (segment->page_kind != page_kind || segment->segment_size != segment_size)) {
if (segment != NULL && mi_option_is_enabled(mi_option_secure) && segment->page_kind != page_kind) { _mi_os_unprotect(segment,segment->segment_size);
_mi_os_unprotect(segment,segment->segment_size);
}
} }
// and otherwise allocate it from the OS // and otherwise allocate it from the OS
@ -366,15 +394,12 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
} }
} }
if (segment->page_kind == MI_PAGE_HUGE) { if (!force && mi_segment_cache_insert(segment, tld)) {
mi_segment_os_free(segment, segment->segment_size, tld);
}
else if (!force && mi_segment_cache_push(segment, tld)) {
// it is put in our cache // it is put in our cache
} }
else { else {
// otherwise return it to the OS // otherwise return it to the OS
mi_segment_os_free(segment, MI_SEGMENT_SIZE,tld); mi_segment_os_free(segment, segment->segment_size, tld);
} }
} }