mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge from dev
This commit is contained in:
commit
23c35f4aba
13 changed files with 1127 additions and 50 deletions
15
src/alloc.c
15
src/alloc.c
|
@ -115,6 +115,21 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
|||
mi_thread_free_t tfreex;
|
||||
bool use_delayed;
|
||||
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
if (segment->page_kind==MI_PAGE_HUGE) {
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
mi_assert_internal(segment->thread_id==0);
|
||||
mi_assert_internal(segment->abandoned_next==NULL);
|
||||
// claim it and free
|
||||
mi_block_set_next(page, block, page->free);
|
||||
page->free = block;
|
||||
page->used--;
|
||||
mi_heap_t* heap = mi_get_default_heap();
|
||||
segment->thread_id = heap->thread_id;
|
||||
_mi_segment_page_free(page,true,&heap->tld->segments);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
tfree = page->thread_free;
|
||||
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE ||
|
||||
|
|
|
@ -24,6 +24,9 @@ int mi_version(void) mi_attr_noexcept {
|
|||
|
||||
// --------------------------------------------------------
|
||||
// Options
|
||||
// These can be accessed by multiple threads and may be
|
||||
// concurrently initialized, but an initializing data race
|
||||
// is ok since they resolve to the same value.
|
||||
// --------------------------------------------------------
|
||||
typedef enum mi_init_e {
|
||||
UNINIT, // not yet initialized
|
||||
|
@ -63,6 +66,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
|
||||
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(page_reset) },
|
||||
{ 0, UNINIT, MI_OPTION(cache_reset) },
|
||||
{ 0, UNINIT, MI_OPTION(reset_decommits) } // note: cannot enable this if secure is on
|
||||
|
@ -227,27 +231,18 @@ static void mi_strlcat(char* dest, const char* src, size_t dest_size) {
|
|||
// On Windows use GetEnvironmentVariable instead of getenv to work
|
||||
// reliably even when this is invoked before the C runtime is initialized.
|
||||
// i.e. when `_mi_preloading() == true`.
|
||||
// Note: on windows, environment names are not case sensitive.
|
||||
#include <windows.h>
|
||||
static bool mi_getenv(const char* name, char* result, size_t result_size) {
|
||||
result[0] = 0;
|
||||
bool ok = (GetEnvironmentVariableA(name, result, (DWORD)result_size) > 0);
|
||||
if (!ok) {
|
||||
char buf[64+1];
|
||||
size_t len = strlen(name);
|
||||
if (len >= sizeof(buf)) len = sizeof(buf) - 1;
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
buf[i] = toupper(name[i]);
|
||||
}
|
||||
buf[len] = 0;
|
||||
ok = (GetEnvironmentVariableA(name, result, (DWORD)result_size) > 0);
|
||||
}
|
||||
return ok;
|
||||
size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size);
|
||||
return (len > 0 && len < result_size);
|
||||
}
|
||||
#else
|
||||
static bool mi_getenv(const char* name, char* result, size_t result_size) {
|
||||
#pragma warning(suppress:4996)
|
||||
const char* s = getenv(name);
|
||||
if (s == NULL) {
|
||||
// in unix environments we check the upper case name too.
|
||||
char buf[64+1];
|
||||
size_t len = strlen(name);
|
||||
if (len >= sizeof(buf)) len = sizeof(buf) - 1;
|
||||
|
@ -255,7 +250,6 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
|
|||
buf[i] = toupper(name[i]);
|
||||
}
|
||||
buf[len] = 0;
|
||||
#pragma warning(suppress:4996)
|
||||
s = getenv(buf);
|
||||
}
|
||||
if (s != NULL && strlen(s) < result_size) {
|
||||
|
@ -267,8 +261,7 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
|
|||
}
|
||||
}
|
||||
#endif
|
||||
static void mi_option_init(mi_option_desc_t* desc) {
|
||||
desc->init = DEFAULTED;
|
||||
static void mi_option_init(mi_option_desc_t* desc) {
|
||||
// Read option value from the environment
|
||||
char buf[64+1];
|
||||
mi_strlcpy(buf, "mimalloc_", sizeof(buf));
|
||||
|
@ -298,7 +291,12 @@ static void mi_option_init(mi_option_desc_t* desc) {
|
|||
}
|
||||
else {
|
||||
_mi_warning_message("environment option mimalloc_%s has an invalid value: %s\n", desc->name, buf);
|
||||
desc->init = DEFAULTED;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
desc->init = DEFAULTED;
|
||||
}
|
||||
mi_assert_internal(desc->init != UNINIT);
|
||||
}
|
||||
|
|
|
@ -268,6 +268,7 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
|||
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page->heap == NULL);
|
||||
mi_assert_internal(!mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
mi_assert_internal(page->block_size == queue->block_size ||
|
||||
(page->block_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
|
37
src/page.c
37
src/page.c
|
@ -98,11 +98,13 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
|||
#endif
|
||||
if (page->heap!=NULL) {
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id);
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||
mi_assert_internal(mi_heap_contains_queue(page->heap,pq));
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id || segment->thread_id==0);
|
||||
if (segment->page_kind != MI_PAGE_HUGE) {
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||
mi_assert_internal(mi_heap_contains_queue(page->heap,pq));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -208,6 +210,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
|
|||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
mi_assert_internal(page->heap == NULL);
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
_mi_page_free_collect(page,false);
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, page->block_size);
|
||||
mi_page_queue_push(heap, pq, page);
|
||||
|
@ -216,12 +219,13 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
|||
|
||||
// allocate a fresh page from a segment
|
||||
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
|
||||
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
||||
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
|
||||
mi_page_t* page = _mi_segment_page_alloc(block_size, &heap->tld->segments, &heap->tld->os);
|
||||
if (page == NULL) return NULL;
|
||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
mi_page_init(heap, page, block_size, &heap->tld->stats);
|
||||
_mi_stat_increase( &heap->tld->stats.pages, 1);
|
||||
mi_page_queue_push(heap, pq, page);
|
||||
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
return page;
|
||||
}
|
||||
|
@ -393,7 +397,7 @@ void _mi_page_retire(mi_page_t* page) {
|
|||
// is the only page left with free blocks. It is not clear
|
||||
// how to check this efficiently though... for now we just check
|
||||
// if its neighbours are almost fully used.
|
||||
if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) {
|
||||
if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
|
||||
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
|
||||
_mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1);
|
||||
return; // dont't retire after all
|
||||
|
@ -700,16 +704,21 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
|
|||
General allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// A huge page is allocated directly without being in a queue
|
||||
// A huge page is allocated directly without being in a queue.
|
||||
// Because huge pages contain just one block, and the segment contains
|
||||
// just that page, we always treat them as abandoned and any thread
|
||||
// that frees the block can free the whole page and segment directly.
|
||||
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
||||
size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t);
|
||||
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
|
||||
mi_page_queue_t* pq = mi_page_queue(heap,block_size);
|
||||
mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap,pq,block_size);
|
||||
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
mi_assert_internal(page->block_size == block_size);
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||
page->heap = NULL;
|
||||
if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) {
|
||||
_mi_stat_increase(&heap->tld->stats.giant, block_size);
|
||||
_mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);
|
||||
|
@ -718,7 +727,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
|||
_mi_stat_increase(&heap->tld->stats.huge, block_size);
|
||||
_mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment) {
|
|||
if (!segment->pages[i].segment_in_use) nfree++;
|
||||
}
|
||||
mi_assert_internal(nfree + segment->used == segment->capacity);
|
||||
mi_assert_internal(segment->thread_id == _mi_thread_id()); // or 0
|
||||
mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
|
||||
mi_assert_internal(segment->page_kind == MI_PAGE_HUGE ||
|
||||
(mi_segment_pagesize(segment) * segment->capacity == segment->segment_size));
|
||||
return true;
|
||||
|
@ -236,8 +236,6 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
|||
|
||||
|
||||
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
|
||||
// and no more than 4.
|
||||
#define MI_SEGMENT_CACHE_MAX (4)
|
||||
#define MI_SEGMENT_CACHE_FRACTION (8)
|
||||
|
||||
// note: returned segment may be partially reset
|
||||
|
@ -253,15 +251,18 @@ static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t
|
|||
return segment;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
|
||||
if (tld->cache_count < MI_SEGMENT_CACHE_MAX
|
||||
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))
|
||||
) { // always allow 1 element cache
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
|
||||
{
|
||||
if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
|
||||
size_t max_cache = mi_option_get(mi_option_segment_cache);
|
||||
if (tld->cache_count < max_cache
|
||||
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
// take the opportunity to reduce the segment cache if it is too large (now)
|
||||
// TODO: this never happens as we check against peak usage, should we use current usage instead?
|
||||
while (tld->cache_count > MI_SEGMENT_CACHE_MAX ) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
|
||||
while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
|
||||
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
|
||||
mi_assert_internal(segment != NULL);
|
||||
if (segment != NULL) mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
|
@ -700,6 +701,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
|
|||
if (segment == NULL) return NULL;
|
||||
mi_assert_internal(segment->segment_size - segment->segment_info_size >= size);
|
||||
segment->used = 1;
|
||||
segment->thread_id = 0; // huge pages are immediately abandoned
|
||||
mi_page_t* page = &segment->pages[0];
|
||||
page->segment_in_use = true;
|
||||
return page;
|
||||
|
@ -708,20 +710,16 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
|
|||
/* -----------------------------------------------------------
|
||||
Page allocation and free
|
||||
----------------------------------------------------------- */
|
||||
static bool mi_is_good_fit(size_t bsize, size_t size) {
|
||||
// good fit if no more than 25% wasted
|
||||
return (bsize > 0 && size > 0 && bsize < size && (size - (size % bsize)) < (size/4));
|
||||
}
|
||||
|
||||
mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
mi_page_t* page;
|
||||
if (block_size <= MI_SMALL_OBJ_SIZE_MAX || mi_is_good_fit(block_size,MI_SMALL_PAGE_SIZE)) {
|
||||
if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
|
||||
page = mi_segment_small_page_alloc(tld,os_tld);
|
||||
}
|
||||
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX || mi_is_good_fit(block_size, MI_MEDIUM_PAGE_SIZE)) {
|
||||
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
page = mi_segment_medium_page_alloc(tld, os_tld);
|
||||
}
|
||||
else if (block_size < MI_LARGE_OBJ_SIZE_MAX || mi_is_good_fit(block_size, MI_LARGE_PAGE_SIZE - sizeof(mi_segment_t))) {
|
||||
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
page = mi_segment_large_page_alloc(tld, os_tld);
|
||||
}
|
||||
else {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue