mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 22:49:32 +03:00
improved malloc zone handling on macOSX (not working yet)
This commit is contained in:
parent
757dcc8411
commit
f3c47c7c91
6 changed files with 44 additions and 22 deletions
|
@ -298,7 +298,7 @@ mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing hea
|
|||
#endif
|
||||
|
||||
#if defined(MI_TLS_SLOT)
|
||||
static inline void* mi_tls_slot(size_t slot); // forward declaration
|
||||
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
#include <pthread.h>
|
||||
static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
|
||||
|
|
|
@ -14,6 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#error "this file should only be included on macOS"
|
||||
#endif
|
||||
|
||||
#warning "malloc zones do not seem to work for now; use MI_INTERPOSE instead"
|
||||
/* ------------------------------------------------------
|
||||
Override system malloc on macOS
|
||||
This is done through the malloc zone interface.
|
||||
|
@ -35,34 +36,42 @@ extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_im
|
|||
------------------------------------------------------ */
|
||||
|
||||
static size_t zone_size(malloc_zone_t* zone, const void* p) {
|
||||
UNUSED(zone); UNUSED(p);
|
||||
return 0; // as we cannot guarantee that `p` comes from us, just return 0
|
||||
}
|
||||
|
||||
static void* zone_malloc(malloc_zone_t* zone, size_t size) {
|
||||
UNUSED(zone);
|
||||
return mi_malloc(size);
|
||||
}
|
||||
|
||||
static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) {
|
||||
UNUSED(zone);
|
||||
return mi_calloc(count, size);
|
||||
}
|
||||
|
||||
static void* zone_valloc(malloc_zone_t* zone, size_t size) {
|
||||
UNUSED(zone);
|
||||
return mi_malloc_aligned(size, _mi_os_page_size());
|
||||
}
|
||||
|
||||
static void zone_free(malloc_zone_t* zone, void* p) {
|
||||
UNUSED(zone);
|
||||
return mi_free(p);
|
||||
}
|
||||
|
||||
static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
|
||||
UNUSED(zone);
|
||||
return mi_realloc(p, newsize);
|
||||
}
|
||||
|
||||
static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) {
|
||||
UNUSED(zone);
|
||||
return mi_malloc_aligned(size,alignment);
|
||||
}
|
||||
|
||||
static void zone_destroy(malloc_zone_t* zone) {
|
||||
UNUSED(zone);
|
||||
// todo: ignore for now?
|
||||
}
|
||||
|
||||
|
@ -83,11 +92,13 @@ static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) {
|
|||
}
|
||||
|
||||
static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) {
|
||||
UNUSED(zone); UNUSED(size);
|
||||
mi_collect(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) {
|
||||
UNUSED(size);
|
||||
zone_free(zone,p);
|
||||
}
|
||||
|
||||
|
@ -102,34 +113,43 @@ static kern_return_t intro_enumerator(task_t task, void* p,
|
|||
vm_range_recorder_t recorder)
|
||||
{
|
||||
// todo: enumerate all memory
|
||||
UNUSED(task); UNUSED(p); UNUSED(type_mask); UNUSED(zone_address);
|
||||
UNUSED(reader); UNUSED(recorder);
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
|
||||
static size_t intro_good_size(malloc_zone_t* zone, size_t size) {
|
||||
UNUSED(zone);
|
||||
return mi_good_size(size);
|
||||
}
|
||||
|
||||
static boolean_t intro_check(malloc_zone_t* zone) {
|
||||
UNUSED(zone);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intro_print(malloc_zone_t* zone, boolean_t verbose) {
|
||||
UNUSED(zone); UNUSED(verbose);
|
||||
mi_stats_print(NULL);
|
||||
}
|
||||
|
||||
static void intro_log(malloc_zone_t* zone, void* p) {
|
||||
UNUSED(zone); UNUSED(p);
|
||||
// todo?
|
||||
}
|
||||
|
||||
static void intro_force_lock(malloc_zone_t* zone) {
|
||||
UNUSED(zone);
|
||||
// todo?
|
||||
}
|
||||
|
||||
static void intro_force_unlock(malloc_zone_t* zone) {
|
||||
UNUSED(zone);
|
||||
// todo?
|
||||
}
|
||||
|
||||
static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
|
||||
UNUSED(zone);
|
||||
// todo...
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
|
@ -138,6 +158,7 @@ static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
|
|||
}
|
||||
|
||||
static boolean_t intro_zone_locked(malloc_zone_t* zone) {
|
||||
UNUSED(zone);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -161,7 +182,6 @@ static malloc_zone_t* mi_get_default_zone()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void __attribute__((constructor)) _mi_macos_override_malloc()
|
||||
{
|
||||
static malloc_introspection_t intro;
|
||||
|
@ -201,6 +221,7 @@ static void __attribute__((constructor)) _mi_macos_override_malloc()
|
|||
zone.free_definite_size = &zone_free_definite_size;
|
||||
zone.pressure_relief = &zone_pressure_relief;
|
||||
intro.zone_locked = &intro_zone_locked;
|
||||
intro.statistics = &intro_statistics;
|
||||
|
||||
// force the purgeable zone to exist to avoid strange bugs
|
||||
if (malloc_default_purgeable_zone) {
|
||||
|
@ -225,6 +246,7 @@ static void __attribute__((constructor)) _mi_macos_override_malloc()
|
|||
malloc_zone_unregister(purgeable_zone);
|
||||
malloc_zone_register(purgeable_zone);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // MI_MALLOC_OVERRIDE
|
||||
|
|
|
@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)"
|
||||
#endif
|
||||
|
||||
#if defined(MI_MALLOC_OVERRIDE) && !defined(_WIN32)
|
||||
#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32) || (defined(__MACH__) && !defined(MI_INTERPOSE)))
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Override system malloc
|
||||
|
@ -68,10 +68,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// we just override new/delete which does work in a static library.
|
||||
#else
|
||||
// On all other systems forward to our API
|
||||
void* malloc(size_t size) mi_attr_noexcept MI_FORWARD1(mi_malloc, size);
|
||||
void* calloc(size_t size, size_t n) mi_attr_noexcept MI_FORWARD2(mi_calloc, size, n);
|
||||
void* realloc(void* p, size_t newsize) mi_attr_noexcept MI_FORWARD2(mi_realloc, p, newsize);
|
||||
void free(void* p) mi_attr_noexcept MI_FORWARD0(mi_free, p);
|
||||
void* malloc(size_t size) MI_FORWARD1(mi_malloc, size);
|
||||
void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n);
|
||||
void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize);
|
||||
void free(void* p) MI_FORWARD0(mi_free, p);
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__MACH__)
|
||||
|
@ -99,8 +99,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { UNUSED(tag); return mi_new_nothrow(n); }
|
||||
|
||||
#if (__cplusplus >= 201402L || _MSC_VER >= 1916)
|
||||
void operator delete (void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n);
|
||||
void operator delete[](void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n);
|
||||
void operator delete (void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
|
||||
void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
|
||||
#endif
|
||||
|
||||
#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5))
|
||||
|
|
16
src/alloc.c
16
src/alloc.c
|
@ -212,7 +212,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
|
|||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
||||
return (ok ? bsize - delta : 0);
|
||||
return (ok ? bsize - delta : 0);
|
||||
}
|
||||
|
||||
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
|
||||
|
@ -259,7 +259,7 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
|
|||
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
|
||||
padding->delta = (uint32_t)new_delta;
|
||||
}
|
||||
#else
|
||||
#else
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
UNUSED(page);
|
||||
UNUSED(block);
|
||||
|
@ -359,7 +359,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
|
|||
}
|
||||
else if (mi_unlikely(mi_page_is_in_full(page))) {
|
||||
_mi_page_unfull(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
_mi_free_block_mt(page,block);
|
||||
|
@ -401,7 +401,7 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
"(this may still be a valid very large allocation (over 64MiB))\n", p);
|
||||
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
|
||||
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if (MI_DEBUG!=0 || MI_SECURE>=4)
|
||||
|
@ -421,11 +421,11 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
mi_heap_stat_decrease(heap, malloc, bsize);
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire`
|
||||
mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
|
||||
// local, and not full or aligned
|
||||
// local, and not full or aligned
|
||||
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
|
||||
mi_check_padding(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
|
@ -436,7 +436,7 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
page->used--;
|
||||
if (mi_unlikely(mi_page_all_free(page))) {
|
||||
_mi_page_retire(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// non-local, aligned blocks, or a full page; use the more generic path
|
||||
|
@ -473,7 +473,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
|
|||
const mi_segment_t* const segment = _mi_ptr_segment(p);
|
||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
const mi_block_t* const block = (const mi_block_t*)p;
|
||||
const size_t size = mi_page_usable_size_of(page, block);
|
||||
const size_t size = mi_page_usable_size_of(page, block);
|
||||
if (mi_unlikely(mi_page_has_aligned(page))) {
|
||||
ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
|
||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||
|
|
|
@ -34,7 +34,7 @@ const mi_page_t _mi_page_empty = {
|
|||
|
||||
#if defined(MI_PADDING) && (MI_INTPTR_SIZE >= 8)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#elif defined(MI_PADDING)
|
||||
#elif defined(MI_PADDING)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#else
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
|
||||
|
@ -190,7 +190,7 @@ static bool _mi_heap_init(void) {
|
|||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||
heap->keys[0] = _mi_heap_random_next(heap);
|
||||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->tld = tld;
|
||||
heap->tld = tld;
|
||||
tld->heap_backing = heap;
|
||||
tld->segments.stats = &tld->stats;
|
||||
tld->segments.os = &tld->os;
|
||||
|
@ -421,9 +421,9 @@ static void mi_process_load(void) {
|
|||
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
|
||||
UNUSED(dummy);
|
||||
#endif
|
||||
os_preloading = false;
|
||||
os_preloading = false;
|
||||
atexit(&mi_process_done);
|
||||
_mi_options_init();
|
||||
_mi_options_init();
|
||||
mi_process_init();
|
||||
//mi_stats_reset();-
|
||||
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
|
||||
|
|
|
@ -38,7 +38,7 @@ static bool allow_large_objects = true; // allow very large objects?
|
|||
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
|
||||
|
||||
|
||||
#ifdef USE_STD_MALLOC
|
||||
#ifndef USE_STD_MALLOC
|
||||
#define custom_calloc(n,s) calloc(n,s)
|
||||
#define custom_realloc(p,s) realloc(p,s)
|
||||
#define custom_free(p) free(p)
|
||||
|
|
Loading…
Add table
Reference in a new issue