mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 11:58:41 +03:00
initial checkin
This commit is contained in:
parent
23b4e65faa
commit
26a874eb3f
41 changed files with 11897 additions and 0 deletions
647
src/segment.c
Normal file
647
src/segment.c
Normal file
|
@ -0,0 +1,647 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"license.txt" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc-internal.h"
|
||||
#include "mimalloc-atomic.h"
|
||||
|
||||
#include <string.h> // memset
|
||||
#include <stdio.h>
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Segment allocation
|
||||
We allocate pages inside big OS allocated "segments"
|
||||
(2mb on 64-bit). This is to avoid splitting VMA's on Linux
|
||||
and reduce fragmentation on other OS's. Each thread
|
||||
owns its own segments.
|
||||
|
||||
Currently we have:
|
||||
- small pages (64kb), 32 in one segment
|
||||
- large pages (2mb), 1 in one segment
|
||||
- huge blocks > RC_LARGE_SIZE_MAX (256kb) are directly allocated by the OS
|
||||
|
||||
It might be good to have "medium" pages too (of, say 256kb)
|
||||
to reduce pressure on the virtual address space on 32-bit systems
|
||||
but for now we choose the simpler implementation since this
|
||||
will only be a problem if multiple threads allocate many
|
||||
differently sized objects between 8kb and 2mb which is not common.
|
||||
|
||||
In any case the memory for a segment is virtual and only
|
||||
committed on demand (i.e. we are careful to not touch the memory
|
||||
until we actually allocate a block there)
|
||||
|
||||
If a thread ends, it "abandons" pages with used blocks
|
||||
and there is an abandoned segment list whose segments can
|
||||
be reclaimed by still running threads, much like work-stealing.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
#if (MI_DEBUG > 1)
|
||||
static bool mi_segment_is_valid(mi_segment_t* segment) {
|
||||
mi_assert_internal(segment != NULL);
|
||||
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
|
||||
mi_assert_internal(segment->used <= segment->capacity);
|
||||
mi_assert_internal(segment->abandoned <= segment->used);
|
||||
size_t nfree = 0;
|
||||
for (size_t i = 0; i < segment->capacity; i++) {
|
||||
if (!segment->pages[i].segment_in_use) nfree++;
|
||||
}
|
||||
mi_assert_internal(nfree + segment->used == segment->capacity);
|
||||
mi_assert_internal(segment->thread_id == _mi_thread_id()); // or 0
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Queue of segments containing free pages
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, mi_segment_t* segment) {
|
||||
mi_assert_internal(segment != NULL);
|
||||
mi_segment_t* list = queue->first;
|
||||
while (list != NULL) {
|
||||
if (list == segment) break;
|
||||
mi_assert_internal(list->next==NULL || list->next->prev == list);
|
||||
mi_assert_internal(list->prev==NULL || list->prev->next == list);
|
||||
list = list->next;
|
||||
}
|
||||
return (list == segment);
|
||||
}
|
||||
#endif
|
||||
|
||||
// quick test to see if a segment is in the free pages queue
|
||||
static bool mi_segment_is_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
bool in_queue = (segment->next != NULL || segment->prev != NULL || tld->small_free.first == segment);
|
||||
if (in_queue) {
|
||||
mi_assert(segment->page_kind == MI_PAGE_SMALL); // for now we only support small pages
|
||||
mi_assert_expensive(mi_segment_queue_contains(&tld->small_free, segment));
|
||||
}
|
||||
return in_queue;
|
||||
}
|
||||
|
||||
static bool mi_segment_queue_is_empty(const mi_segment_queue_t* queue) {
|
||||
return (queue->first == NULL);
|
||||
}
|
||||
|
||||
static void mi_segment_queue_remove(mi_segment_queue_t* queue, mi_segment_t* segment) {
|
||||
mi_assert_expensive(mi_segment_queue_contains(queue, segment));
|
||||
if (segment->prev != NULL) segment->prev->next = segment->next;
|
||||
if (segment->next != NULL) segment->next->prev = segment->prev;
|
||||
if (segment == queue->first) queue->first = segment->next;
|
||||
if (segment == queue->last) queue->last = segment->prev;
|
||||
segment->next = NULL;
|
||||
segment->prev = NULL;
|
||||
}
|
||||
|
||||
static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment) {
|
||||
mi_assert_expensive(!mi_segment_queue_contains(queue, segment));
|
||||
segment->next = NULL;
|
||||
segment->prev = queue->last;
|
||||
if (queue->last != NULL) {
|
||||
mi_assert_internal(queue->last->next == NULL);
|
||||
queue->last->next = segment;
|
||||
queue->last = segment;
|
||||
}
|
||||
else {
|
||||
queue->last = queue->first = segment;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Start of the page available memory
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
{
|
||||
size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
|
||||
uint8_t* p = (uint8_t*)segment + page->segment_idx*psize;
|
||||
|
||||
if (page->segment_idx == 0) {
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
psize -= segment->segment_info_size;
|
||||
}
|
||||
long secure = mi_option_get(mi_option_secure);
|
||||
if (secure > 1 || (secure == 1 && page->segment_idx == segment->capacity - 1)) {
|
||||
// secure == 1: the last page has an os guard page at the end
|
||||
// secure > 1: every page has an os guard page
|
||||
psize -= _mi_os_page_size();
|
||||
}
|
||||
|
||||
if (page_size != NULL) *page_size = psize;
|
||||
mi_assert_internal(_mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
return p;
|
||||
}
|
||||
|
||||
static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size, size_t* info_size) {
|
||||
/*
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
// always reserve maximally so the protection falls on
|
||||
// the same address area, as we need to reuse them from the caches interchangably.
|
||||
capacity = MI_SMALL_PAGES_PER_SEGMENT;
|
||||
}
|
||||
*/
|
||||
size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
|
||||
size_t guardsize = 0;
|
||||
size_t isize = 0;
|
||||
|
||||
if (!mi_option_is_enabled(mi_option_secure)) {
|
||||
// normally no guard pages
|
||||
isize = _mi_align_up(minsize, (16 > MI_MAX_ALIGN_SIZE ? 16 : MI_MAX_ALIGN_SIZE));
|
||||
}
|
||||
else {
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
// and the page data (and one at the end of the segment)
|
||||
size_t page_size = _mi_os_page_size();
|
||||
isize = _mi_align_up(minsize, page_size);
|
||||
guardsize = page_size;
|
||||
required = _mi_align_up(required, page_size);
|
||||
}
|
||||
;
|
||||
if (info_size != NULL) *info_size = isize;
|
||||
if (pre_size != NULL) *pre_size = isize + guardsize;
|
||||
return (required==0 ? MI_SEGMENT_SIZE : required + isize + 2*guardsize);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Segment caches
|
||||
We keep a small segment cache per thread to avoid repeated allocation
|
||||
and free in the OS if a program allocates memory and then frees
|
||||
all again repeatedly. (We tried a one-element cache but that
|
||||
proves to be too small for certain workloads).
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static void mi_segments_count_add(long inc, mi_segments_tld_t* tld) {
|
||||
if (inc>=0) mi_stat_increase(tld->stats->segments,inc);
|
||||
else mi_stat_decrease(tld->stats->segments,-inc);
|
||||
mi_assert_internal(inc < 0 ? tld->count >= (size_t)(-inc) : tld->count < (SIZE_MAX - inc));
|
||||
mi_assert_internal(tld->peak >= tld->count);
|
||||
tld->count += inc;
|
||||
if (tld->count > tld->peak) tld->peak = tld->count;
|
||||
}
|
||||
|
||||
static size_t mi_segments_peak(mi_segments_tld_t* tld) {
|
||||
return tld->peak;
|
||||
}
|
||||
|
||||
static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
|
||||
mi_segments_count_add(-1,tld);
|
||||
_mi_os_free(segment, segment_size,tld->stats);
|
||||
}
|
||||
|
||||
// The segment cache is limited to be at most 1/2 of the peak
|
||||
// number of segments in use (and no more than 32)
|
||||
#define MI_SEGMENT_CACHE_MAX (16)
|
||||
#define MI_SEGMENT_CACHE_FRACTION (6)
|
||||
|
||||
|
||||
static mi_segment_t* mi_segment_cache_pop(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment = tld->cache;
|
||||
if (segment == NULL) return NULL;
|
||||
tld->cache_count--;
|
||||
tld->cache = segment->next;
|
||||
segment->next = NULL;
|
||||
return segment;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
|
||||
if (tld->cache_count < MI_SEGMENT_CACHE_MAX &&
|
||||
tld->cache_count*MI_SEGMENT_CACHE_FRACTION < mi_segments_peak(tld)) return false;
|
||||
// take the opportunity to reduce the segment cache if it is too large (now)
|
||||
while (tld->cache_count*MI_SEGMENT_CACHE_FRACTION >= mi_segments_peak(tld) + 1) {
|
||||
mi_segment_t* segment = mi_segment_cache_pop(tld);
|
||||
mi_assert_internal(segment != NULL);
|
||||
if (segment != NULL) mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(!mi_segment_is_in_free_queue(segment,tld));
|
||||
mi_assert_internal(segment->next==NULL);
|
||||
if (mi_segment_cache_full(tld)) return false;
|
||||
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) {
|
||||
_mi_os_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size);
|
||||
}
|
||||
segment->next = tld->cache;
|
||||
tld->cache = segment;
|
||||
tld->cache_count++;
|
||||
return true;
|
||||
}
|
||||
|
||||
// called by ending threads to free cached segments
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment;
|
||||
while ((segment = mi_segment_cache_pop(tld)) != NULL) {
|
||||
mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
|
||||
}
|
||||
mi_assert_internal(tld->cache_count == 0);
|
||||
mi_assert_internal(tld->cache == NULL);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Segment allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||
static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
// calculate needed sizes first
|
||||
|
||||
size_t capacity;
|
||||
if (page_kind == MI_PAGE_HUGE) {
|
||||
mi_assert_internal(page_shift==MI_SEGMENT_SHIFT && required > 0);
|
||||
capacity = 1;
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(required==0);
|
||||
size_t page_size = (size_t)1 << page_shift;
|
||||
capacity = MI_SEGMENT_SIZE / page_size;
|
||||
mi_assert_internal(MI_SEGMENT_SIZE % page_size == 0);
|
||||
mi_assert_internal(capacity >= 1 && capacity <= MI_SMALL_PAGES_PER_SEGMENT);
|
||||
}
|
||||
size_t info_size;
|
||||
size_t pre_size;
|
||||
size_t segment_size = mi_segment_size( capacity, required, &pre_size, &info_size);
|
||||
|
||||
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
|
||||
|
||||
// Allocate the segment
|
||||
mi_segment_t* segment = NULL;
|
||||
|
||||
// try to get it from our caches
|
||||
if (segment_size == MI_SEGMENT_SIZE) {
|
||||
segment = mi_segment_cache_pop(tld);
|
||||
if (segment != NULL && mi_option_is_enabled(mi_option_secure) && segment->page_kind != page_kind) {
|
||||
_mi_os_unprotect(segment,segment->segment_size);
|
||||
}
|
||||
}
|
||||
|
||||
// and otherwise allocate it from the OS
|
||||
if (segment == NULL) {
|
||||
segment = (mi_segment_t*)_mi_os_alloc_aligned(segment_size, MI_SEGMENT_SIZE, os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_segments_count_add(1,tld);
|
||||
}
|
||||
|
||||
mi_assert_internal((uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||
|
||||
memset(segment, 0, info_size);
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
// and the page data
|
||||
mi_assert_internal( info_size == pre_size - _mi_os_page_size() && info_size % _mi_os_page_size() == 0);
|
||||
_mi_os_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
|
||||
size_t os_page_size = _mi_os_page_size();
|
||||
if (mi_option_get(mi_option_secure) <= 1) {
|
||||
// and protect the last page too
|
||||
_mi_os_protect( (uint8_t*)segment + segment_size - os_page_size, os_page_size );
|
||||
}
|
||||
else {
|
||||
// protect every page
|
||||
for (size_t i = 0; i < capacity; i++) {
|
||||
_mi_os_protect( (uint8_t*)segment + (i+1)*page_size - os_page_size, os_page_size );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
segment->page_kind = page_kind;
|
||||
segment->capacity = capacity;
|
||||
segment->page_shift = page_shift;
|
||||
segment->segment_size = segment_size;
|
||||
segment->segment_info_size = pre_size;
|
||||
segment->thread_id = _mi_thread_id();
|
||||
segment->cookie = _mi_ptr_cookie(segment);
|
||||
for (uint8_t i = 0; i < segment->capacity; i++) {
|
||||
segment->pages[i].segment_idx = i;
|
||||
}
|
||||
mi_stat_increase(tld->stats->committed, segment->segment_info_size);
|
||||
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
|
||||
return segment;
|
||||
}
|
||||
|
||||
#if MI_STAT
|
||||
// Available memory in a page
|
||||
static size_t mi_page_size(const mi_page_t* page) {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
|
||||
//fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment);
|
||||
mi_assert(segment != NULL);
|
||||
if (mi_segment_is_in_free_queue(segment,tld)) {
|
||||
if (segment->page_kind != MI_PAGE_SMALL) {
|
||||
fprintf(stderr, "mimalloc: expecting small segment: %i, %p, %p, %p\n", segment->page_kind, segment->prev, segment->next, tld->small_free.first);
|
||||
fflush(stderr);
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(segment->page_kind == MI_PAGE_SMALL); // for now we only support small pages
|
||||
mi_assert_expensive(mi_segment_queue_contains(&tld->small_free, segment));
|
||||
mi_segment_queue_remove(&tld->small_free, segment);
|
||||
}
|
||||
}
|
||||
mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
|
||||
mi_assert(segment->next == NULL);
|
||||
mi_assert(segment->prev == NULL);
|
||||
mi_stat_decrease( tld->stats->committed, segment->segment_info_size);
|
||||
segment->thread_id = 0;
|
||||
|
||||
// update reset memory statistics
|
||||
for (uint8_t i = 0; i < segment->capacity; i++) {
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (page->is_reset) {
|
||||
page->is_reset = false;
|
||||
mi_stat_decrease( tld->stats->reset,mi_page_size(page));
|
||||
}
|
||||
}
|
||||
|
||||
if (segment->page_kind == MI_PAGE_HUGE) {
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
else if (!force && mi_segment_cache_push(segment, tld)) {
|
||||
// it is put in our cache
|
||||
}
|
||||
else {
|
||||
// otherwise return it to the OS
|
||||
mi_segment_os_free(segment, MI_SEGMENT_SIZE,tld);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Free page management inside a segment
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
static bool mi_segment_has_free(const mi_segment_t* segment) {
|
||||
return (segment->used < segment->capacity);
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_find_free(mi_segment_t* segment) {
|
||||
mi_assert_internal(mi_segment_has_free(segment));
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
for (size_t i = 0; i < segment->capacity; i++) {
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (!page->segment_in_use) {
|
||||
return page;
|
||||
}
|
||||
}
|
||||
mi_assert(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Free
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
|
||||
|
||||
static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_stats_t* stats) {
|
||||
UNUSED(stats);
|
||||
mi_assert_internal(page->segment_in_use);
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
size_t inuse = page->capacity * page->block_size;
|
||||
mi_stat_decrease( stats->committed, inuse);
|
||||
mi_stat_decrease( stats->pages, 1);
|
||||
|
||||
// reset the page memory to reduce memory pressure?
|
||||
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
mi_stat_increase( stats->reset, psize); // for stats we assume resetting the full page
|
||||
page->is_reset = true;
|
||||
if (inuse > 0) {
|
||||
_mi_os_reset(start, inuse);
|
||||
}
|
||||
}
|
||||
|
||||
// zero the page data
|
||||
uint8_t idx = page->segment_idx; // don't clear the index
|
||||
bool is_reset = page->is_reset; // don't clear the reset flag
|
||||
memset(page, 0, sizeof(*page));
|
||||
page->segment_idx = idx;
|
||||
page->segment_in_use = false;
|
||||
page->is_reset = is_reset;
|
||||
segment->used--;
|
||||
}
|
||||
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
|
||||
{
|
||||
mi_assert(page != NULL);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
|
||||
// mark it as free now
|
||||
mi_segment_page_clear(segment, page, tld->stats);
|
||||
|
||||
if (segment->used == 0) {
|
||||
// no more used pages; remove from the free list and free the segment
|
||||
mi_segment_free(segment, force, tld);
|
||||
}
|
||||
else {
|
||||
if (segment->used == segment->abandoned) {
|
||||
// only abandoned pages; remove from free list and abandon
|
||||
mi_segment_abandon(segment,tld);
|
||||
}
|
||||
else if (segment->used + 1 == segment->capacity) {
|
||||
mi_assert_internal(segment->page_kind == MI_PAGE_SMALL); // for now we only support small pages
|
||||
// move back to segments small pages free list
|
||||
mi_segment_enqueue(&tld->small_free, segment);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Abandonment
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// When threads terminate, they can leave segments with
|
||||
// live blocks (reached through other threads). Such segments
|
||||
// are "abandoned" and will be reclaimed by other threads to
|
||||
// reuse their pages and/or free them eventually
|
||||
static volatile mi_segment_t* abandoned = NULL;
|
||||
static volatile uintptr_t abandoned_count = 0;
|
||||
|
||||
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->used == segment->abandoned);
|
||||
mi_assert_internal(segment->used > 0);
|
||||
mi_assert_internal(segment->abandoned_next == NULL);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
// remove the segment from the free page queue if needed
|
||||
if (mi_segment_is_in_free_queue(segment,tld)) {
|
||||
mi_assert(segment->page_kind == MI_PAGE_SMALL); // for now we only support small pages
|
||||
mi_assert_expensive(mi_segment_queue_contains(&tld->small_free, segment));
|
||||
mi_segment_queue_remove(&tld->small_free, segment);
|
||||
}
|
||||
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
|
||||
// all pages in the segment are abandoned; add it to the abandoned list
|
||||
segment->thread_id = 0;
|
||||
do {
|
||||
segment->abandoned_next = (mi_segment_t*)abandoned;
|
||||
} while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, segment->abandoned_next));
|
||||
mi_atomic_increment(&abandoned_count);
|
||||
mi_stat_increase( tld->stats->segments_abandoned,1);
|
||||
}
|
||||
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||
mi_assert(page != NULL);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
segment->abandoned++;
|
||||
mi_stat_increase( tld->stats->pages_abandoned, 1);
|
||||
mi_assert_internal(segment->abandoned <= segment->used);
|
||||
if (segment->used == segment->abandoned) {
|
||||
// all pages are abandoned, abandon the entire segment
|
||||
mi_segment_abandon(segment,tld);
|
||||
}
|
||||
}
|
||||
|
||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld) {
|
||||
uintptr_t reclaimed = 0;
|
||||
uintptr_t atmost;
|
||||
if (try_all) {
|
||||
atmost = abandoned_count+16; // close enough
|
||||
}
|
||||
else {
|
||||
atmost = abandoned_count/8; // at most 1/8th of all outstanding (estimated)
|
||||
if (atmost < 8) atmost = 8; // but at least 8
|
||||
}
|
||||
|
||||
// for `atmost` `reclaimed` abandoned segments...
|
||||
while(atmost > reclaimed) {
|
||||
// try to claim the head of the abandoned segments
|
||||
mi_segment_t* segment;
|
||||
do {
|
||||
segment = (mi_segment_t*)abandoned;
|
||||
} while(segment != NULL && !mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment->abandoned_next, segment));
|
||||
if (segment==NULL) break; // stop early if no more segments available
|
||||
|
||||
// got it.
|
||||
mi_atomic_decrement(&abandoned_count);
|
||||
segment->thread_id = _mi_thread_id();
|
||||
segment->abandoned_next = NULL;
|
||||
mi_segments_count_add(1,tld);
|
||||
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
mi_stat_decrease(tld->stats->segments_abandoned,1);
|
||||
// add its free pages to the the current thread
|
||||
if (segment->page_kind == MI_PAGE_SMALL && mi_segment_has_free(segment)) {
|
||||
mi_segment_enqueue(&tld->small_free, segment);
|
||||
}
|
||||
// add its abandoned pages to the current thread
|
||||
mi_assert(segment->abandoned == segment->used);
|
||||
for (size_t i = 0; i < segment->capacity; i++) {
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (page->segment_in_use) {
|
||||
segment->abandoned--;
|
||||
mi_assert(page->next == NULL);
|
||||
mi_stat_decrease( tld->stats->pages_abandoned, 1);
|
||||
if (mi_page_all_free(page)) {
|
||||
// if everything free by now, free the page
|
||||
mi_segment_page_clear(segment,page,tld->stats);
|
||||
}
|
||||
else {
|
||||
// otherwise reclaim it
|
||||
_mi_page_reclaim(heap,page);
|
||||
}
|
||||
}
|
||||
}
|
||||
mi_assert(segment->abandoned == 0);
|
||||
if (segment->used == 0) { // due to page_clear
|
||||
mi_segment_free(segment,false,tld);
|
||||
}
|
||||
else {
|
||||
reclaimed++;
|
||||
}
|
||||
}
|
||||
return (reclaimed>0);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Small page allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Allocate a small page inside a segment.
|
||||
// Requires that the page has free pages
|
||||
static mi_page_t* mi_segment_small_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(mi_segment_has_free(segment));
|
||||
mi_page_t* page = mi_segment_find_free(segment);
|
||||
page->segment_in_use = true;
|
||||
segment->used++;
|
||||
mi_assert_internal(segment->used <= segment->capacity);
|
||||
if (segment->used == segment->capacity) {
|
||||
// if no more free pages, remove from the queue
|
||||
mi_assert_internal(!mi_segment_has_free(segment));
|
||||
mi_assert_expensive(mi_segment_queue_contains(&tld->small_free, segment));
|
||||
mi_segment_queue_remove(&tld->small_free, segment);
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_small_page_alloc(mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
if (mi_segment_queue_is_empty(&tld->small_free)) {
|
||||
mi_segment_t* segment = mi_segment_alloc(0,MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld,os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_segment_enqueue(&tld->small_free, segment);
|
||||
}
|
||||
mi_assert_internal(tld->small_free.first != NULL);
|
||||
return mi_segment_small_page_alloc_in(tld->small_free.first,tld);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
large page allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_page_t* mi_segment_large_page_alloc(mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
mi_segment_t* segment = mi_segment_alloc(0,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld,os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
segment->used = 1;
|
||||
mi_page_t* page = &segment->pages[0];
|
||||
page->segment_in_use = true;
|
||||
return page;
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT,tld,os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_assert_internal(segment->segment_size - segment->segment_info_size >= size);
|
||||
segment->used = 1;
|
||||
mi_page_t* page = &segment->pages[0];
|
||||
page->segment_in_use = true;
|
||||
return page;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Page allocation and free
|
||||
----------------------------------------------------------- */
|
||||
|
||||
mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
mi_page_t* page;
|
||||
if (block_size < MI_SMALL_PAGE_SIZE / 8)
|
||||
// smaller blocks than 8kb (assuming MI_SMALL_PAGE_SIZE == 64kb)
|
||||
page = mi_segment_small_page_alloc(tld,os_tld);
|
||||
else if (block_size < (MI_LARGE_SIZE_MAX - sizeof(mi_segment_t)))
|
||||
page = mi_segment_large_page_alloc(tld, os_tld);
|
||||
else
|
||||
page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
|
||||
mi_assert_expensive(mi_segment_is_valid(_mi_page_segment(page)));
|
||||
return page;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue