Merge branch 'dev' into dev-slice

This commit is contained in:
Daan Leijen 2022-04-02 11:42:02 -07:00
commit 1f089e99f6
4 changed files with 70 additions and 56 deletions

Binary file not shown.

Binary file not shown.

View file

@ -19,8 +19,8 @@ terms of the MIT license. A copy of the license can be found in the file
#endif #endif
static size_t mi_max_error_count = 16; // stop outputting errors after this static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit)
static size_t mi_max_warning_count = 16; // stop outputting warnings after this static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit)
static void mi_add_stderr_output(void); static void mi_add_stderr_output(void);
@ -322,11 +322,22 @@ void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
va_end(args); va_end(args);
} }
static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) {
if (prefix != NULL && strlen(prefix) <= 32 && !_mi_is_main_thread()) {
char tprefix[64];
snprintf(tprefix, sizeof(tprefix), "%sthread 0x%zx: ", prefix, _mi_thread_id());
mi_vfprintf(out, arg, tprefix, fmt, args);
}
else {
mi_vfprintf(out, arg, prefix, fmt, args);
}
}
void _mi_trace_message(const char* fmt, ...) { void _mi_trace_message(const char* fmt, ...) {
if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args); mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args);
va_end(args); va_end(args);
} }
@ -339,17 +350,21 @@ void _mi_verbose_message(const char* fmt, ...) {
} }
static void mi_show_error_message(const char* fmt, va_list args) { static void mi_show_error_message(const char* fmt, va_list args) {
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return; if (!mi_option_is_enabled(mi_option_verbose)) {
if (mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return; if (!mi_option_is_enabled(mi_option_show_errors)) return;
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args); if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
}
mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args);
} }
void _mi_warning_message(const char* fmt, ...) { void _mi_warning_message(const char* fmt, ...) {
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return; if (!mi_option_is_enabled(mi_option_verbose)) {
if (mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return; if (!mi_option_is_enabled(mi_option_show_errors)) return;
if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return;
}
va_list args; va_list args;
va_start(args,fmt); va_start(args,fmt);
mi_vfprintf(NULL, NULL, "mimalloc: warning: ", fmt, args); mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args);
va_end(args); va_end(args);
} }

View file

@ -68,6 +68,7 @@ terms of the MIT license. A copy of the license can be found in the file
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true). large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
----------------------------------------------------------- */ ----------------------------------------------------------- */
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
static void* mi_align_up_ptr(void* p, size_t alignment) { static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment); return (void*)_mi_align_up((uintptr_t)p, alignment);
@ -283,21 +284,35 @@ static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats
if (addr == NULL || size == 0) return true; // || _mi_os_is_huge_reserved(addr) if (addr == NULL || size == 0) return true; // || _mi_os_is_huge_reserved(addr)
bool err = false; bool err = false;
#if defined(_WIN32) #if defined(_WIN32)
DWORD errcode = 0;
err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
if (err) { errcode = GetLastError(); }
if (errcode == ERROR_INVALID_ADDRESS) {
// In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside
// the memory region returned by VirtualAlloc; in that case we need to free using
// the start of the region.
MEMORY_BASIC_INFORMATION info = { 0, 0 };
VirtualQuery(addr, &info, sizeof(info));
if (info.AllocationBase < addr) {
errcode = 0;
err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0);
if (err) { errcode = GetLastError(); }
}
}
if (errcode != 0) {
_mi_warning_message("unable to release OS memory: error code 0x%x, addr: %p, size: %zu\n", errcode, addr, size);
}
#elif defined(MI_USE_SBRK) || defined(__wasi__) #elif defined(MI_USE_SBRK) || defined(__wasi__)
err = 0; // sbrk heap cannot be shrunk err = false; // sbrk heap cannot be shrunk
#else #else
err = (munmap(addr, size) == -1); err = (munmap(addr, size) == -1);
#endif
if (was_committed) _mi_stat_decrease(&stats->committed, size);
_mi_stat_decrease(&stats->reserved, size);
if (err) { if (err) {
_mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size); _mi_warning_message("unable to release OS memory: %s, addr: %p, size: %zu\n", strerror(errno), addr, size);
return false;
}
else {
return true;
} }
#endif
if (was_committed) { _mi_stat_decrease(&stats->committed, size); }
_mi_stat_decrease(&stats->reserved, size);
return !err;
} }
#if !(defined(__wasi__) || defined(MI_USE_SBRK) || defined(MAP_ALIGNED)) #if !(defined(__wasi__) || defined(MI_USE_SBRK) || defined(MAP_ALIGNED))
@ -328,7 +343,7 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
return NULL; return NULL;
} }
*/ */
_mi_warning_message("unable to allocate hinted aligned OS memory (%zu bytes, error code: %x, address: %p, alignment: %d, flags: %x)\n", size, GetLastError(), hint, try_alignment, flags); _mi_warning_message("unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %d, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags);
} }
} }
#endif #endif
@ -342,7 +357,7 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
param.Pointer = &reqs; param.Pointer = &reqs;
void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, &param, 1); void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, &param, 1);
if (p != NULL) return p; if (p != NULL) return p;
_mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: %x, address: %p, alignment: %d, flags: %x)\n", size, GetLastError(), addr, try_alignment, flags); _mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %d, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
// fall through on error // fall through on error
} }
#endif #endif
@ -354,6 +369,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
mi_assert_internal(!(large_only && !allow_large)); mi_assert_internal(!(large_only && !allow_large));
static _Atomic(size_t) large_page_try_ok; // = 0; static _Atomic(size_t) large_page_try_ok; // = 0;
void* p = NULL; void* p = NULL;
// Try to allocate large OS pages (2MiB) if allowed or required.
if ((large_only || use_large_os_page(size, try_alignment)) if ((large_only || use_large_os_page(size, try_alignment))
&& allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
@ -373,12 +389,13 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
} }
} }
} }
// Fall back to regular page allocation
if (p == NULL) { if (p == NULL) {
*is_large = ((flags&MEM_LARGE_PAGES) != 0); *is_large = ((flags&MEM_LARGE_PAGES) != 0);
p = mi_win_virtual_allocx(addr, size, try_alignment, flags); p = mi_win_virtual_allocx(addr, size, try_alignment, flags);
} }
if (p == NULL) { if (p == NULL) {
_mi_warning_message("unable to allocate OS memory (%zu bytes, error code: %i, address: %p, large only: %d, allow large: %d)\n", size, GetLastError(), addr, large_only, allow_large); _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %d, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large);
} }
return p; return p;
} }
@ -692,7 +709,7 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
#if defined(_WIN32) #if defined(_WIN32)
int flags = MEM_RESERVE; int flags = MEM_RESERVE;
if (commit) flags |= MEM_COMMIT; if (commit) { flags |= MEM_COMMIT; }
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
#elif defined(MI_USE_SBRK) || defined(__wasi__) #elif defined(MI_USE_SBRK) || defined(__wasi__)
MI_UNUSED(allow_large); MI_UNUSED(allow_large);
@ -727,41 +744,23 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
// if not aligned, free it, overallocate, and unmap around it // if not aligned, free it, overallocate, and unmap around it
if (((uintptr_t)p % alignment != 0)) { if (((uintptr_t)p % alignment != 0)) {
mi_os_mem_free(p, size, commit, stats); mi_os_mem_free(p, size, commit, stats);
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (%zu bytes, address: %p, commit: %d, is_large: %d)\n", size, p, commit, *is_large);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
size_t over_size = size + alignment; const size_t over_size = size + alignment;
#if _WIN32 #if _WIN32
// over-allocate and than re-allocate exactly at an aligned address in there. // over-allocate uncommitted (virtual) memory
// this may fail due to threads allocating at the same time so we p = mi_os_mem_alloc(over_size, 0 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, stats);
// retry this at most 3 times before giving up. if (p == NULL) return NULL;
// (we can not decommit around the overallocation on Windows, because we can only
// free the original pointer, not one pointing inside the area) // set p to the aligned part in the full region
int flags = MEM_RESERVE; // note: this is dangerous on Windows as VirtualFree needs the actual region pointer
if (commit) flags |= MEM_COMMIT; // but in mi_os_mem_free we handle this (hopefully exceptional) situation.
for (int tries = 0; tries < 3; tries++) { p = mi_align_up_ptr(p, alignment);
// over-allocate to determine a virtual memory range
p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats); // explicitly commit only the aligned part
if (p == NULL) return NULL; // error if (commit) {
if (((uintptr_t)p % alignment) == 0) { _mi_os_commit(p, size, NULL, stats);
// if p happens to be aligned, just decommit the left-over area
_mi_os_decommit((uint8_t*)p + size, over_size - size, stats);
break;
}
else {
// otherwise free and allocate at an aligned address in there
mi_os_mem_free(p, over_size, commit, stats);
void* aligned_p = mi_align_up_ptr(p, alignment);
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
if (commit) { _mi_stat_increase(&stats->committed, size); }
}
if (p == aligned_p) break; // success!
if (p != NULL) { // should not happen?
mi_os_mem_free(p, size, commit, stats);
p = NULL;
}
}
} }
#else #else
// overallocate... // overallocate...