mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 11:58:41 +03:00
Initial commit of separate memory region layer and improved large OS pages support, see 'memory.c'
This commit is contained in:
parent
d6901558cd
commit
06bcea1761
18 changed files with 693 additions and 297 deletions
218
src/segment.c
218
src/segment.c
|
@ -108,19 +108,6 @@ static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment)
|
|||
}
|
||||
}
|
||||
|
||||
static void mi_segment_queue_insert_before(mi_segment_queue_t* queue, mi_segment_t* elem, mi_segment_t* segment) {
|
||||
mi_assert_expensive(elem==NULL || mi_segment_queue_contains(queue, elem));
|
||||
mi_assert_expensive(segment != NULL && !mi_segment_queue_contains(queue, segment));
|
||||
|
||||
segment->prev = (elem == NULL ? queue->last : elem->prev);
|
||||
if (segment->prev != NULL) segment->prev->next = segment;
|
||||
else queue->first = segment;
|
||||
segment->next = elem;
|
||||
if (segment->next != NULL) segment->next->prev = segment;
|
||||
else queue->last = segment;
|
||||
}
|
||||
|
||||
|
||||
// Start of the page available memory
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
{
|
||||
|
@ -176,17 +163,17 @@ static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size
|
|||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
/* ----------------------------------------------------------------------------
|
||||
Segment caches
|
||||
We keep a small segment cache per thread to avoid repeated allocation
|
||||
and free in the OS if a program allocates memory and then frees
|
||||
all again repeatedly. (We tried a one-element cache but that
|
||||
proves to be too small for certain workloads).
|
||||
----------------------------------------------------------- */
|
||||
We keep a small segment cache per thread to increase local
|
||||
reuse and avoid setting/clearing guard pages in secure mode.
|
||||
------------------------------------------------------------------------------- */
|
||||
|
||||
static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
|
||||
if (segment_size>=0) mi_stat_increase(tld->stats->segments,1);
|
||||
else mi_stat_decrease(tld->stats->segments,1);
|
||||
tld->count += (segment_size >= 0 ? 1 : -1);
|
||||
if (tld->count > tld->peak_count) tld->peak_count = tld->count;
|
||||
tld->current_size += segment_size;
|
||||
if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
|
||||
}
|
||||
|
@ -194,123 +181,87 @@ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
|
|||
|
||||
static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
|
||||
mi_segments_track_size(-((long)segment_size),tld);
|
||||
_mi_os_free(segment, segment_size,tld->stats);
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
_mi_mem_unprotect(segment, segment->segment_size); // ensure no more guard pages are set
|
||||
}
|
||||
_mi_mem_free(segment, segment_size, segment->memid, tld->stats);
|
||||
}
|
||||
|
||||
// The segment cache is limited to be at most 1/8 of the peak size
|
||||
// in use (and no more than 32)
|
||||
#define MI_SEGMENT_CACHE_MAX (32)
|
||||
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
|
||||
// and no more than 4.
|
||||
#define MI_SEGMENT_CACHE_MAX (4)
|
||||
#define MI_SEGMENT_CACHE_FRACTION (8)
|
||||
|
||||
|
||||
// Get a segment of at least `required` size.
|
||||
// If `required == MI_SEGMENT_SIZE` the `segment_size` will match exactly
|
||||
static mi_segment_t* _mi_segment_cache_findx(mi_segments_tld_t* tld, size_t required, bool reverse) {
|
||||
mi_assert_internal(required % _mi_os_page_size() == 0);
|
||||
mi_segment_t* segment = (reverse ? tld->cache.last : tld->cache.first);
|
||||
while (segment != NULL) {
|
||||
if (segment->segment_size >= required) {
|
||||
tld->cache_count--;
|
||||
tld->cache_size -= segment->segment_size;
|
||||
mi_segment_queue_remove(&tld->cache, segment);
|
||||
// exact size match?
|
||||
if (required==0 || segment->segment_size == required) {
|
||||
return segment;
|
||||
}
|
||||
// not more than 25% waste and on a huge page segment? (in that case the segment size does not need to match required)
|
||||
else if (required != MI_SEGMENT_SIZE && segment->segment_size - (segment->segment_size/4) <= required) {
|
||||
return segment;
|
||||
}
|
||||
// try to shrink the memory to match exactly
|
||||
else {
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
_mi_os_unprotect(segment, segment->segment_size);
|
||||
}
|
||||
if (_mi_os_shrink(segment, segment->segment_size, required)) {
|
||||
tld->current_size -= segment->segment_size;
|
||||
tld->current_size += required;
|
||||
segment->segment_size = required;
|
||||
return segment;
|
||||
}
|
||||
else {
|
||||
// if that all fails, we give up
|
||||
mi_segment_os_free(segment,segment->segment_size,tld);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
segment = (reverse ? segment->prev : segment->next);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static mi_segment_t* mi_segment_cache_find(mi_segments_tld_t* tld, size_t required) {
|
||||
return _mi_segment_cache_findx(tld,required,false);
|
||||
}
|
||||
|
||||
static mi_segment_t* mi_segment_cache_evict(mi_segments_tld_t* tld) {
|
||||
// TODO: random eviction instead?
|
||||
return _mi_segment_cache_findx(tld, 0, true /* from the end */);
|
||||
static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t* tld) {
|
||||
if (segment_size != 0 && segment_size != MI_SEGMENT_SIZE) return NULL;
|
||||
mi_segment_t* segment = tld->cache;
|
||||
if (segment == NULL) return NULL;
|
||||
tld->cache_count--;
|
||||
tld->cache = segment->next;
|
||||
segment->next = NULL;
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
return segment;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
|
||||
if (tld->cache_count < MI_SEGMENT_CACHE_MAX &&
|
||||
tld->cache_size*MI_SEGMENT_CACHE_FRACTION < tld->peak_size) return false;
|
||||
tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) { // always allow 1 element cache
|
||||
return false;
|
||||
}
|
||||
// take the opportunity to reduce the segment cache if it is too large (now)
|
||||
while (tld->cache_size*MI_SEGMENT_CACHE_FRACTION >= tld->peak_size + 1) {
|
||||
mi_segment_t* segment = mi_segment_cache_evict(tld);
|
||||
// TODO: this never happens as we check against peak usage, should we use current usage instead?
|
||||
while (tld->cache_count > (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
|
||||
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
|
||||
mi_assert_internal(segment != NULL);
|
||||
if (segment != NULL) mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_insert(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->next==NULL && segment->prev==NULL);
|
||||
mi_assert_internal(!mi_segment_is_in_free_queue(segment,tld));
|
||||
mi_assert_expensive(!mi_segment_queue_contains(&tld->cache, segment));
|
||||
if (mi_segment_cache_full(tld)) return false;
|
||||
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(!mi_segment_is_in_free_queue(segment, tld));
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) {
|
||||
_mi_os_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size);
|
||||
_mi_mem_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
|
||||
}
|
||||
// insert ordered
|
||||
mi_segment_t* seg = tld->cache.first;
|
||||
while (seg != NULL && seg->segment_size < segment->segment_size) {
|
||||
seg = seg->next;
|
||||
}
|
||||
mi_segment_queue_insert_before( &tld->cache, seg, segment );
|
||||
segment->next = tld->cache;
|
||||
tld->cache = segment;
|
||||
tld->cache_count++;
|
||||
tld->cache_size += segment->segment_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
// called by ending threads to free cached segments
|
||||
// called by threads that are terminating to free cached segments
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment;
|
||||
while ((segment = mi_segment_cache_find(tld,0)) != NULL) {
|
||||
mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
|
||||
while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
mi_assert_internal(tld->cache_count == 0 && tld->cache_size == 0);
|
||||
mi_assert_internal(mi_segment_queue_is_empty(&tld->cache));
|
||||
mi_assert_internal(tld->cache_count == 0);
|
||||
mi_assert_internal(tld->cache == NULL);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Segment allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||
static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
// calculate needed sizes first
|
||||
|
||||
size_t capacity;
|
||||
if (page_kind == MI_PAGE_HUGE) {
|
||||
mi_assert_internal(page_shift==MI_SEGMENT_SHIFT && required > 0);
|
||||
mi_assert_internal(page_shift == MI_SEGMENT_SHIFT && required > 0);
|
||||
capacity = 1;
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(required==0);
|
||||
mi_assert_internal(required == 0);
|
||||
size_t page_size = (size_t)1 << page_shift;
|
||||
capacity = MI_SEGMENT_SIZE / page_size;
|
||||
mi_assert_internal(MI_SEGMENT_SIZE % page_size == 0);
|
||||
|
@ -318,46 +269,52 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
|||
}
|
||||
size_t info_size;
|
||||
size_t pre_size;
|
||||
size_t segment_size = mi_segment_size( capacity, required, &pre_size, &info_size);
|
||||
size_t segment_size = mi_segment_size(capacity, required, &pre_size, &info_size);
|
||||
mi_assert_internal(segment_size >= required);
|
||||
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
|
||||
|
||||
// Allocate the segment
|
||||
mi_segment_t* segment = NULL;
|
||||
|
||||
// try to get it from our caches
|
||||
segment = mi_segment_cache_find(tld,segment_size);
|
||||
mi_assert_internal(segment == NULL ||
|
||||
(segment_size==MI_SEGMENT_SIZE && segment_size == segment->segment_size) ||
|
||||
(segment_size!=MI_SEGMENT_SIZE && segment_size <= segment->segment_size));
|
||||
if (segment != NULL && mi_option_is_enabled(mi_option_secure) && (segment->page_kind != page_kind || segment->segment_size != segment_size)) {
|
||||
_mi_os_unprotect(segment,segment->segment_size);
|
||||
// Try to get it from our thread local cache first
|
||||
bool protection_still_good = false;
|
||||
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
|
||||
if (segment != NULL) {
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
if (segment->page_kind != page_kind) {
|
||||
_mi_mem_unprotect(segment, segment->segment_size); // reset protection if the page kind differs
|
||||
}
|
||||
else {
|
||||
protection_still_good = true; // otherwise, the guard pages are still in place
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// and otherwise allocate it from the OS
|
||||
if (segment == NULL) {
|
||||
segment = (mi_segment_t*)_mi_os_alloc_aligned(segment_size, MI_SEGMENT_SIZE, os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_segments_track_size((long)segment_size,tld);
|
||||
else {
|
||||
// Allocate the segment from the OS
|
||||
size_t memid;
|
||||
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &memid, os_tld);
|
||||
if (segment == NULL) return NULL; // failed to allocate
|
||||
segment->memid = memid;
|
||||
mi_segments_track_size((long)segment_size, tld);
|
||||
}
|
||||
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||
|
||||
mi_assert_internal((uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||
|
||||
memset(segment, 0, info_size);
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
// and the page data
|
||||
// zero the segment info
|
||||
{ size_t memid = segment->memid;
|
||||
memset(segment, 0, info_size);
|
||||
segment->memid = memid;
|
||||
}
|
||||
|
||||
if (mi_option_is_enabled(mi_option_secure) && !protection_still_good) {
|
||||
// in secure mode, we set up a protected page in between the segment info and the page data
|
||||
mi_assert_internal( info_size == pre_size - _mi_os_page_size() && info_size % _mi_os_page_size() == 0);
|
||||
_mi_os_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
|
||||
_mi_mem_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
|
||||
size_t os_page_size = _mi_os_page_size();
|
||||
if (mi_option_get(mi_option_secure) <= 1) {
|
||||
// and protect the last page too
|
||||
_mi_os_protect( (uint8_t*)segment + segment_size - os_page_size, os_page_size );
|
||||
_mi_mem_protect( (uint8_t*)segment + segment_size - os_page_size, os_page_size );
|
||||
}
|
||||
else {
|
||||
// protect every page
|
||||
for (size_t i = 0; i < capacity; i++) {
|
||||
_mi_os_protect( (uint8_t*)segment + (i+1)*page_size - os_page_size, os_page_size );
|
||||
_mi_mem_protect( (uint8_t*)segment + (i+1)*page_size - os_page_size, os_page_size );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -372,7 +329,7 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
|||
for (uint8_t i = 0; i < segment->capacity; i++) {
|
||||
segment->pages[i].segment_idx = i;
|
||||
}
|
||||
mi_stat_increase(tld->stats->committed, segment->segment_info_size);
|
||||
mi_stat_increase(tld->stats->page_committed, segment->segment_info_size);
|
||||
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
|
||||
return segment;
|
||||
}
|
||||
|
@ -387,6 +344,7 @@ static size_t mi_page_size(const mi_page_t* page) {
|
|||
#endif
|
||||
|
||||
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
|
||||
UNUSED(force);
|
||||
//fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment);
|
||||
mi_assert(segment != NULL);
|
||||
if (mi_segment_is_in_free_queue(segment,tld)) {
|
||||
|
@ -403,7 +361,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
|||
mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
|
||||
mi_assert(segment->next == NULL);
|
||||
mi_assert(segment->prev == NULL);
|
||||
mi_stat_decrease( tld->stats->committed, segment->segment_info_size);
|
||||
mi_stat_decrease( tld->stats->page_committed, segment->segment_info_size);
|
||||
segment->thread_id = 0;
|
||||
|
||||
// update reset memory statistics
|
||||
|
@ -415,7 +373,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
|||
}
|
||||
}
|
||||
|
||||
if (!force && mi_segment_cache_insert(segment, tld)) {
|
||||
if (!force && mi_segment_cache_push(segment, tld)) {
|
||||
// it is put in our cache
|
||||
}
|
||||
else {
|
||||
|
@ -424,9 +382,6 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Free page management inside a segment
|
||||
----------------------------------------------------------- */
|
||||
|
@ -461,17 +416,16 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
|
|||
mi_assert_internal(page->segment_in_use);
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
size_t inuse = page->capacity * page->block_size;
|
||||
mi_stat_decrease( stats->committed, inuse);
|
||||
mi_stat_decrease( stats->page_committed, inuse);
|
||||
mi_stat_decrease( stats->pages, 1);
|
||||
|
||||
// reset the page memory to reduce memory pressure?
|
||||
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
mi_stat_increase( stats->reset, psize); // for stats we assume resetting the full page
|
||||
page->is_reset = true;
|
||||
if (inuse > 0) {
|
||||
_mi_os_reset(start, inuse);
|
||||
_mi_mem_reset(start, psize, stats); // TODO: just `inuse`?
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue