merge from dev-win

This commit is contained in:
daan 2019-08-25 13:12:57 -07:00
commit b0e38d5697
10 changed files with 1084 additions and 34 deletions

View file

@ -24,6 +24,9 @@ int mi_version(void) mi_attr_noexcept {
// --------------------------------------------------------
// Options
// These can be accessed by multiple threads and may be
// concurrently initialized, but an initializing data race
// is ok since they resolve to the same value.
// --------------------------------------------------------
typedef enum mi_init_e {
UNINIT, // not yet initialized
@ -63,6 +66,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(decommit) },
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(page_reset) },
{ 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) } // note: cannot enable this if secure is on
@ -227,27 +231,18 @@ static void mi_strlcat(char* dest, const char* src, size_t dest_size) {
// On Windows use GetEnvironmentVariable instead of getenv to work
// reliably even when this is invoked before the C runtime is initialized.
// i.e. when `_mi_preloading() == true`.
// Note: on windows, environment names are not case sensitive.
#include <windows.h>
static bool mi_getenv(const char* name, char* result, size_t result_size) {
result[0] = 0;
bool ok = (GetEnvironmentVariableA(name, result, (DWORD)result_size) > 0);
if (!ok) {
char buf[64+1];
size_t len = strlen(name);
if (len >= sizeof(buf)) len = sizeof(buf) - 1;
for (size_t i = 0; i < len; i++) {
buf[i] = toupper(name[i]);
}
buf[len] = 0;
ok = (GetEnvironmentVariableA(name, result, (DWORD)result_size) > 0);
}
return ok;
size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size);
return (len > 0 && len < result_size);
}
#else
static bool mi_getenv(const char* name, char* result, size_t result_size) {
#pragma warning(suppress:4996)
const char* s = getenv(name);
if (s == NULL) {
// in unix environments we check the upper case name too.
char buf[64+1];
size_t len = strlen(name);
if (len >= sizeof(buf)) len = sizeof(buf) - 1;
@ -255,7 +250,6 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
buf[i] = toupper(name[i]);
}
buf[len] = 0;
#pragma warning(suppress:4996)
s = getenv(buf);
}
if (s != NULL && strlen(s) < result_size) {
@ -267,8 +261,7 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
}
}
#endif
static void mi_option_init(mi_option_desc_t* desc) {
desc->init = DEFAULTED;
static void mi_option_init(mi_option_desc_t* desc) {
// Read option value from the environment
char buf[64+1];
mi_strlcpy(buf, "mimalloc_", sizeof(buf));
@ -298,7 +291,12 @@ static void mi_option_init(mi_option_desc_t* desc) {
}
else {
_mi_warning_message("environment option mimalloc_%s has an invalid value: %s\n", desc->name, buf);
desc->init = DEFAULTED;
}
}
}
else {
desc->init = DEFAULTED;
}
mi_assert_internal(desc->init != UNINIT);
}

View file

@ -396,7 +396,7 @@ void _mi_page_retire(mi_page_t* page) {
// is the only page left with free blocks. It is not clear
// how to check this efficiently though... for now we just check
// if its neighbours are almost fully used.
if (mi_likely(page->block_size <= 32*MI_INTPTR_SIZE)) {
if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
_mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1);
return; // dont't retire after all
@ -705,7 +705,11 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
General allocation
----------------------------------------------------------- */
// Large and huge pages are allocated directly
// Large and huge page allocation.
// Huge pages are allocated directly without being in a queue.
// Because huge pages contain just one block, and the segment contains
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
static mi_page_t* mi_large_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);

View file

@ -289,8 +289,6 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
// and no more than 1.
#define MI_SEGMENT_CACHE_MAX (4)
#define MI_SEGMENT_CACHE_FRACTION (8)
// note: returned segment may be partially reset
@ -306,15 +304,18 @@ static mi_segment_t* mi_segment_cache_pop(size_t segment_slices, mi_segments_tld
return segment;
}
static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
if (tld->cache_count < MI_SEGMENT_CACHE_MAX
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))
) { // always allow 1 element cache
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
{
if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
size_t max_cache = mi_option_get(mi_option_segment_cache);
if (tld->cache_count < max_cache
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
) {
return false;
}
// take the opportunity to reduce the segment cache if it is too large (now)
// TODO: this never happens as we check against peak usage, should we use current usage instead?
while (tld->cache_count > MI_SEGMENT_CACHE_MAX ) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
mi_assert_internal(segment != NULL);
if (segment != NULL) mi_segment_os_free(segment, tld);
@ -922,19 +923,12 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
/* -----------------------------------------------------------
Page allocation and free
----------------------------------------------------------- */
/*
static bool mi_is_good_fit(size_t bsize, size_t size) {
// good fit if no more than 25% wasted
return (bsize > 0 && size > 0 && bsize < size && (size - (size % bsize)) < (size/4));
}
*/
mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_page_t* page;
if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {// || mi_is_good_fit(block_size,MI_SMALL_PAGE_SIZE)) {
if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
page = mi_segments_page_alloc(MI_PAGE_SMALL,block_size,tld,os_tld);
}
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {// || mi_is_good_fit(block_size, MI_MEDIUM_PAGE_SIZE)) {
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
page = mi_segments_page_alloc(MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,tld, os_tld);
}
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {