mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 20:08:41 +03:00
Merge branch 'dev' into dev-trace
This commit is contained in:
commit
0dafa1e0a0
15 changed files with 142 additions and 98 deletions
|
@ -43,7 +43,7 @@ extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_im
|
|||
|
||||
static size_t zone_size(malloc_zone_t* zone, const void* p) {
|
||||
MI_UNUSED(zone);
|
||||
//if (!mi_is_in_heap_region(p)){ return 0; } // not our pointer, bail out
|
||||
if (!mi_is_in_heap_region(p)){ return 0; } // not our pointer, bail out
|
||||
return mi_usable_size(p);
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ static void* zone_valloc(malloc_zone_t* zone, size_t size) {
|
|||
|
||||
static void zone_free(malloc_zone_t* zone, void* p) {
|
||||
MI_UNUSED(zone);
|
||||
mi_free(p);
|
||||
mi_cfree(p);
|
||||
}
|
||||
|
||||
static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
|
||||
|
@ -373,7 +373,7 @@ __attribute__((used)) static const struct mi_interpose_s _mi_zone_interposes[]
|
|||
MI_INTERPOSE_MI(_malloc_fork_child),
|
||||
MI_INTERPOSE_MI(_malloc_fork_parent),
|
||||
MI_INTERPOSE_MI(_malloc_fork_prepare),
|
||||
|
||||
|
||||
MI_INTERPOSE_ZONE(zone_batch_free),
|
||||
MI_INTERPOSE_ZONE(zone_batch_malloc),
|
||||
MI_INTERPOSE_ZONE(zone_calloc),
|
||||
|
|
|
@ -16,6 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32))
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#include <AvailabilityMacros.h>
|
||||
mi_decl_externc void vfree(void* p);
|
||||
mi_decl_externc size_t malloc_size(const void* p);
|
||||
mi_decl_externc size_t malloc_good_size(size_t size);
|
||||
|
@ -77,7 +78,9 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
MI_INTERPOSE_MI(valloc),
|
||||
MI_INTERPOSE_MI(malloc_size),
|
||||
MI_INTERPOSE_MI(malloc_good_size),
|
||||
#if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
|
||||
MI_INTERPOSE_MI(aligned_alloc),
|
||||
#endif
|
||||
#ifdef MI_OSX_ZONE
|
||||
// we interpose malloc_default_zone in alloc-override-osx.c so we can use mi_free safely
|
||||
MI_INTERPOSE_MI(free),
|
||||
|
@ -91,15 +94,18 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
void _ZdlPv(void* p); // delete
|
||||
void _ZdaPv(void* p); // delete[]
|
||||
void _ZdlPvm(void* p, size_t n); // delete
|
||||
void _ZdaPvm(void* p, size_t n); // delete[]
|
||||
void* _Znwm(size_t n); // new
|
||||
void* _Znam(size_t n); // new[]
|
||||
void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new nothrow
|
||||
void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new[] nothrow
|
||||
}
|
||||
#endif
|
||||
void _ZdlPv(void* p); // delete
|
||||
void _ZdaPv(void* p); // delete[]
|
||||
void _ZdlPvm(void* p, size_t n); // delete
|
||||
void _ZdaPvm(void* p, size_t n); // delete[]
|
||||
void* _Znwm(size_t n); // new
|
||||
void* _Znam(size_t n); // new[]
|
||||
void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new nothrow
|
||||
void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new[] nothrow
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
__attribute__((used)) static struct mi_interpose_s _mi_cxx_interposes[] __attribute__((section("__DATA, __interpose"))) =
|
||||
{
|
||||
MI_INTERPOSE_FUN(_ZdlPv,mi_free),
|
||||
|
@ -111,7 +117,6 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
MI_INTERPOSE_FUN(_ZnwmRKSt9nothrow_t,mi_new_nothrow),
|
||||
MI_INTERPOSE_FUN(_ZnamRKSt9nothrow_t,mi_new_nothrow),
|
||||
};
|
||||
#endif // __cplusplus
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
// cannot override malloc unless using a dll.
|
||||
|
@ -161,7 +166,9 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
|
||||
void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast<size_t>(al)); };
|
||||
void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast<size_t>(al)); };
|
||||
|
||||
void operator delete (void* p, std::align_val_t al, const std::nothrow_t& tag) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
|
||||
void operator delete[](void* p, std::align_val_t al, const std::nothrow_t& tag) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
|
||||
|
||||
void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast<size_t>(al)); }
|
||||
void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast<size_t>(al)); }
|
||||
void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast<size_t>(al)); }
|
||||
|
|
|
@ -546,6 +546,7 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
|
|||
#if (MI_DEBUG>0 || MI_SECURE>=4)
|
||||
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
|
||||
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
return segment;
|
||||
|
|
|
@ -19,8 +19,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#endif
|
||||
|
||||
|
||||
static size_t mi_max_error_count = 16; // stop outputting errors after this
|
||||
static size_t mi_max_warning_count = 16; // stop outputting warnings after this
|
||||
static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit)
|
||||
static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit)
|
||||
|
||||
static void mi_add_stderr_output(void);
|
||||
|
||||
|
@ -163,10 +163,22 @@ void mi_option_disable(mi_option_t option) {
|
|||
|
||||
static void mi_out_stderr(const char* msg, void* arg) {
|
||||
MI_UNUSED(arg);
|
||||
if (msg == NULL) return;
|
||||
#ifdef _WIN32
|
||||
// on windows with redirection, the C runtime cannot handle locale dependent output
|
||||
// after the main thread closes so we use direct console output.
|
||||
if (!_mi_preloading()) { _cputs(msg); }
|
||||
if (!_mi_preloading()) {
|
||||
// _cputs(msg); // _cputs cannot be used at is aborts if it fails to lock the console
|
||||
static HANDLE hcon = INVALID_HANDLE_VALUE;
|
||||
if (hcon == INVALID_HANDLE_VALUE) {
|
||||
hcon = GetStdHandle(STD_ERROR_HANDLE);
|
||||
}
|
||||
const size_t len = strlen(msg);
|
||||
if (hcon != INVALID_HANDLE_VALUE && len > 0 && len < UINT32_MAX) {
|
||||
DWORD written = 0;
|
||||
WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL);
|
||||
}
|
||||
}
|
||||
#else
|
||||
fputs(msg, stderr);
|
||||
#endif
|
||||
|
@ -322,11 +334,22 @@ void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
|
|||
va_end(args);
|
||||
}
|
||||
|
||||
static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) {
|
||||
if (prefix != NULL && strlen(prefix) <= 32 && !_mi_is_main_thread()) {
|
||||
char tprefix[64];
|
||||
snprintf(tprefix, sizeof(tprefix), "%sthread 0x%zx: ", prefix, _mi_thread_id());
|
||||
mi_vfprintf(out, arg, tprefix, fmt, args);
|
||||
}
|
||||
else {
|
||||
mi_vfprintf(out, arg, prefix, fmt, args);
|
||||
}
|
||||
}
|
||||
|
||||
void _mi_trace_message(const char* fmt, ...) {
|
||||
if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args);
|
||||
mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
|
@ -339,17 +362,21 @@ void _mi_verbose_message(const char* fmt, ...) {
|
|||
}
|
||||
|
||||
static void mi_show_error_message(const char* fmt, va_list args) {
|
||||
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
|
||||
if (mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
|
||||
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
|
||||
if (!mi_option_is_enabled(mi_option_verbose)) {
|
||||
if (!mi_option_is_enabled(mi_option_show_errors)) return;
|
||||
if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
|
||||
}
|
||||
mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args);
|
||||
}
|
||||
|
||||
void _mi_warning_message(const char* fmt, ...) {
|
||||
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
|
||||
if (mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return;
|
||||
if (!mi_option_is_enabled(mi_option_verbose)) {
|
||||
if (!mi_option_is_enabled(mi_option_show_errors)) return;
|
||||
if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return;
|
||||
}
|
||||
va_list args;
|
||||
va_start(args,fmt);
|
||||
mi_vfprintf(NULL, NULL, "mimalloc: warning: ", fmt, args);
|
||||
mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
|
|
107
src/os.c
107
src/os.c
|
@ -67,7 +67,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
On windows initializes support for aligned allocation and
|
||||
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
|
||||
----------------------------------------------------------- */
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
|
||||
|
||||
static void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
|
@ -294,24 +295,38 @@ static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats
|
|||
if (addr == NULL || size == 0) return true; // || _mi_os_is_huge_reserved(addr)
|
||||
bool err = false;
|
||||
#if defined(_WIN32)
|
||||
DWORD errcode = 0;
|
||||
err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
|
||||
if (err) { errcode = GetLastError(); }
|
||||
if (errcode == ERROR_INVALID_ADDRESS) {
|
||||
// In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside
|
||||
// the memory region returned by VirtualAlloc; in that case we need to free using
|
||||
// the start of the region.
|
||||
MEMORY_BASIC_INFORMATION info = { 0, 0 };
|
||||
VirtualQuery(addr, &info, sizeof(info));
|
||||
if (info.AllocationBase < addr) {
|
||||
errcode = 0;
|
||||
err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0);
|
||||
if (err) { errcode = GetLastError(); }
|
||||
}
|
||||
}
|
||||
if (errcode != 0) {
|
||||
_mi_warning_message("unable to release OS memory: error code 0x%x, addr: %p, size: %zu\n", errcode, addr, size);
|
||||
}
|
||||
#elif defined(MI_USE_SBRK) || defined(__wasi__)
|
||||
err = 0; // sbrk heap cannot be shrunk
|
||||
err = false; // sbrk heap cannot be shrunk
|
||||
#else
|
||||
err = (munmap(addr, size) == -1);
|
||||
#endif
|
||||
if (was_committed) _mi_stat_decrease(&stats->committed, size);
|
||||
_mi_stat_decrease(&stats->reserved, size);
|
||||
if (err) {
|
||||
_mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size);
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
return true;
|
||||
_mi_warning_message("unable to release OS memory: %s, addr: %p, size: %zu\n", strerror(errno), addr, size);
|
||||
}
|
||||
#endif
|
||||
if (was_committed) { _mi_stat_decrease(&stats->committed, size); }
|
||||
_mi_stat_decrease(&stats->reserved, size);
|
||||
return !err;
|
||||
}
|
||||
|
||||
#if !defined(MI_USE_SBRK) && !defined(__wasi__)
|
||||
#if !(defined(__wasi__) || defined(MI_USE_SBRK) || defined(MAP_ALIGNED))
|
||||
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
||||
#endif
|
||||
|
||||
|
@ -328,15 +343,8 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
|
|||
if (hint != NULL) {
|
||||
void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
|
||||
if (p != NULL) return p;
|
||||
// for robustness always fall through in case of an error
|
||||
/*
|
||||
DWORD err = GetLastError();
|
||||
if (err != ERROR_INVALID_ADDRESS && // If linked with multiple instances, we may have tried to allocate at an already allocated area (#210)
|
||||
err != ERROR_INVALID_PARAMETER) { // Windows7 instability (#230)
|
||||
return NULL;
|
||||
}
|
||||
*/
|
||||
_mi_warning_message("unable to allocate hinted aligned OS memory (%zu bytes, error code: %x, address: %p, alignment: %d, flags: %x)\n", size, GetLastError(), hint, try_alignment, flags);
|
||||
_mi_warning_message("unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags);
|
||||
// fall through on error
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -350,7 +358,7 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
|
|||
param.Pointer = &reqs;
|
||||
void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1);
|
||||
if (p != NULL) return p;
|
||||
_mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: %x, address: %p, alignment: %d, flags: %x)\n", size, GetLastError(), addr, try_alignment, flags);
|
||||
_mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
|
||||
// fall through on error
|
||||
}
|
||||
#endif
|
||||
|
@ -362,6 +370,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
|
|||
mi_assert_internal(!(large_only && !allow_large));
|
||||
static _Atomic(size_t) large_page_try_ok; // = 0;
|
||||
void* p = NULL;
|
||||
// Try to allocate large OS pages (2MiB) if allowed or required.
|
||||
if ((large_only || use_large_os_page(size, try_alignment))
|
||||
&& allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
|
||||
size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
|
||||
|
@ -381,12 +390,13 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
|
|||
}
|
||||
}
|
||||
}
|
||||
// Fall back to regular page allocation
|
||||
if (p == NULL) {
|
||||
*is_large = ((flags&MEM_LARGE_PAGES) != 0);
|
||||
p = mi_win_virtual_allocx(addr, size, try_alignment, flags);
|
||||
}
|
||||
if (p == NULL) {
|
||||
_mi_warning_message("unable to allocate OS memory (%zu bytes, error code: %i, address: %p, large only: %d, allow large: %d)\n", size, GetLastError(), addr, large_only, allow_large);
|
||||
_mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
@ -662,7 +672,7 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size)
|
|||
if (hint%try_alignment != 0) return NULL;
|
||||
return (void*)hint;
|
||||
}
|
||||
#elif defined(__wasi__) || defined(MI_USE_SBRK)
|
||||
#elif defined(__wasi__) || defined(MI_USE_SBRK) || defined(MAP_ALIGNED)
|
||||
// no need for mi_os_get_aligned_hint
|
||||
#else
|
||||
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
|
||||
|
@ -695,7 +705,7 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
|
|||
|
||||
#if defined(_WIN32)
|
||||
int flags = MEM_RESERVE;
|
||||
if (commit) flags |= MEM_COMMIT;
|
||||
if (commit) { flags |= MEM_COMMIT; }
|
||||
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
|
||||
#elif defined(MI_USE_SBRK) || defined(__wasi__)
|
||||
MI_UNUSED(allow_large);
|
||||
|
@ -719,6 +729,7 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
|
|||
static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) {
|
||||
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
|
||||
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
|
||||
mi_assert_internal(is_large != NULL);
|
||||
if (!commit) allow_large = false;
|
||||
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
|
||||
size = _mi_align_up(size, _mi_os_page_size());
|
||||
|
@ -726,45 +737,27 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
|
|||
// try first with a hint (this will be aligned directly on Win 10+ or BSD)
|
||||
void* p = mi_os_mem_alloc(size, alignment, commit, allow_large, is_large, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
|
||||
// if not aligned, free it, overallocate, and unmap around it
|
||||
if (((uintptr_t)p % alignment != 0)) {
|
||||
mi_os_mem_free(p, size, commit, stats);
|
||||
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (%zu bytes, address: %p, alignment: %zu, commit: %d)\n", size, p, alignment, commit);
|
||||
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
|
||||
size_t over_size = size + alignment;
|
||||
const size_t over_size = size + alignment;
|
||||
|
||||
#if _WIN32
|
||||
// over-allocate and than re-allocate exactly at an aligned address in there.
|
||||
// this may fail due to threads allocating at the same time so we
|
||||
// retry this at most 3 times before giving up.
|
||||
// (we can not decommit around the overallocation on Windows, because we can only
|
||||
// free the original pointer, not one pointing inside the area)
|
||||
int flags = MEM_RESERVE;
|
||||
if (commit) flags |= MEM_COMMIT;
|
||||
for (int tries = 0; tries < 3; tries++) {
|
||||
// over-allocate to determine a virtual memory range
|
||||
p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats);
|
||||
if (p == NULL) return NULL; // error
|
||||
if (((uintptr_t)p % alignment) == 0) {
|
||||
// if p happens to be aligned, just decommit the left-over area
|
||||
_mi_os_decommit((uint8_t*)p + size, over_size - size, stats);
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// otherwise free and allocate at an aligned address in there
|
||||
mi_os_mem_free(p, over_size, commit, stats);
|
||||
void* aligned_p = mi_align_up_ptr(p, alignment);
|
||||
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large);
|
||||
if (p != NULL) {
|
||||
_mi_stat_increase(&stats->reserved, size);
|
||||
if (commit) { _mi_stat_increase(&stats->committed, size); }
|
||||
}
|
||||
if (p == aligned_p) break; // success!
|
||||
if (p != NULL) { // should not happen?
|
||||
mi_os_mem_free(p, size, commit, stats);
|
||||
p = NULL;
|
||||
}
|
||||
}
|
||||
// over-allocate uncommitted (virtual) memory
|
||||
p = mi_os_mem_alloc(over_size, 0 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
// set p to the aligned part in the full region
|
||||
// note: this is dangerous on Windows as VirtualFree needs the actual region pointer
|
||||
// but in mi_os_mem_free we handle this (hopefully exceptional) situation.
|
||||
p = mi_align_up_ptr(p, alignment);
|
||||
|
||||
// explicitly commit only the aligned part
|
||||
if (commit) {
|
||||
_mi_os_commit(p, size, NULL, stats);
|
||||
}
|
||||
#else
|
||||
// overallocate...
|
||||
|
|
12
src/page.c
12
src/page.c
|
@ -252,7 +252,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
|||
// a fresh page was found, initialize it
|
||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
mi_page_init(heap, page, block_size, heap->tld);
|
||||
_mi_stat_increase(&heap->tld->stats.pages, 1);
|
||||
mi_heap_stat_increase(heap, pages, 1);
|
||||
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
return page;
|
||||
|
@ -688,7 +688,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
page = next;
|
||||
} // for each page
|
||||
|
||||
mi_stat_counter_increase(heap->tld->stats.searches, count);
|
||||
mi_heap_stat_counter_increase(heap, searches, count);
|
||||
|
||||
if (page == NULL) {
|
||||
_mi_heap_collect_retired(heap, false); // perhaps make a page available
|
||||
|
@ -780,12 +780,12 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
|||
mi_page_set_heap(page, NULL);
|
||||
|
||||
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
||||
_mi_stat_increase(&heap->tld->stats.giant, bsize);
|
||||
_mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);
|
||||
mi_heap_stat_increase(heap, giant, bsize);
|
||||
mi_heap_stat_counter_increase(heap, giant_count, 1);
|
||||
}
|
||||
else {
|
||||
_mi_stat_increase(&heap->tld->stats.huge, bsize);
|
||||
_mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
|
||||
mi_heap_stat_increase(heap, huge, bsize);
|
||||
mi_heap_stat_counter_increase(heap, huge_count, 1);
|
||||
}
|
||||
}
|
||||
return page;
|
||||
|
|
|
@ -195,6 +195,7 @@ static bool os_random_buf(void* buf, size_t buf_len) {
|
|||
#elif defined(__APPLE__)
|
||||
#include <AvailabilityMacros.h>
|
||||
#if defined(MAC_OS_X_VERSION_10_10) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_10
|
||||
#include <CommonCrypto/CommonCryptoError.h>
|
||||
#include <CommonCrypto/CommonRandom.h>
|
||||
#endif
|
||||
static bool os_random_buf(void* buf, size_t buf_len) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue