improved malloc zone handling on macOSX (not working yet)

This commit is contained in:
daan 2020-02-02 21:03:09 -08:00
parent 757dcc8411
commit f3c47c7c91
6 changed files with 44 additions and 22 deletions

View file

@ -298,7 +298,7 @@ mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing hea
#endif #endif
#if defined(MI_TLS_SLOT) #if defined(MI_TLS_SLOT)
static inline void* mi_tls_slot(size_t slot); // forward declaration static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
#elif defined(MI_TLS_PTHREAD_SLOT_OFS) #elif defined(MI_TLS_PTHREAD_SLOT_OFS)
#include <pthread.h> #include <pthread.h>
static inline mi_heap_t** mi_tls_pthread_heap_slot(void) { static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {

View file

@ -14,6 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
#error "this file should only be included on macOS" #error "this file should only be included on macOS"
#endif #endif
#warning "malloc zones do not seem to work for now; use MI_INTERPOSE instead"
/* ------------------------------------------------------ /* ------------------------------------------------------
Override system malloc on macOS Override system malloc on macOS
This is done through the malloc zone interface. This is done through the malloc zone interface.
@ -35,34 +36,42 @@ extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_im
------------------------------------------------------ */ ------------------------------------------------------ */
static size_t zone_size(malloc_zone_t* zone, const void* p) { static size_t zone_size(malloc_zone_t* zone, const void* p) {
UNUSED(zone); UNUSED(p);
return 0; // as we cannot guarantee that `p` comes from us, just return 0 return 0; // as we cannot guarantee that `p` comes from us, just return 0
} }
static void* zone_malloc(malloc_zone_t* zone, size_t size) { static void* zone_malloc(malloc_zone_t* zone, size_t size) {
UNUSED(zone);
return mi_malloc(size); return mi_malloc(size);
} }
static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) { static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) {
UNUSED(zone);
return mi_calloc(count, size); return mi_calloc(count, size);
} }
static void* zone_valloc(malloc_zone_t* zone, size_t size) { static void* zone_valloc(malloc_zone_t* zone, size_t size) {
UNUSED(zone);
return mi_malloc_aligned(size, _mi_os_page_size()); return mi_malloc_aligned(size, _mi_os_page_size());
} }
static void zone_free(malloc_zone_t* zone, void* p) { static void zone_free(malloc_zone_t* zone, void* p) {
UNUSED(zone);
return mi_free(p); return mi_free(p);
} }
static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) { static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
UNUSED(zone);
return mi_realloc(p, newsize); return mi_realloc(p, newsize);
} }
static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) { static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) {
UNUSED(zone);
return mi_malloc_aligned(size,alignment); return mi_malloc_aligned(size,alignment);
} }
static void zone_destroy(malloc_zone_t* zone) { static void zone_destroy(malloc_zone_t* zone) {
UNUSED(zone);
// todo: ignore for now? // todo: ignore for now?
} }
@ -83,11 +92,13 @@ static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) {
} }
static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) { static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) {
UNUSED(zone); UNUSED(size);
mi_collect(false); mi_collect(false);
return 0; return 0;
} }
static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) { static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) {
UNUSED(size);
zone_free(zone,p); zone_free(zone,p);
} }
@ -102,34 +113,43 @@ static kern_return_t intro_enumerator(task_t task, void* p,
vm_range_recorder_t recorder) vm_range_recorder_t recorder)
{ {
// todo: enumerate all memory // todo: enumerate all memory
UNUSED(task); UNUSED(p); UNUSED(type_mask); UNUSED(zone_address);
UNUSED(reader); UNUSED(recorder);
return KERN_SUCCESS; return KERN_SUCCESS;
} }
static size_t intro_good_size(malloc_zone_t* zone, size_t size) { static size_t intro_good_size(malloc_zone_t* zone, size_t size) {
UNUSED(zone);
return mi_good_size(size); return mi_good_size(size);
} }
static boolean_t intro_check(malloc_zone_t* zone) { static boolean_t intro_check(malloc_zone_t* zone) {
UNUSED(zone);
return true; return true;
} }
static void intro_print(malloc_zone_t* zone, boolean_t verbose) { static void intro_print(malloc_zone_t* zone, boolean_t verbose) {
UNUSED(zone); UNUSED(verbose);
mi_stats_print(NULL); mi_stats_print(NULL);
} }
static void intro_log(malloc_zone_t* zone, void* p) { static void intro_log(malloc_zone_t* zone, void* p) {
UNUSED(zone); UNUSED(p);
// todo? // todo?
} }
static void intro_force_lock(malloc_zone_t* zone) { static void intro_force_lock(malloc_zone_t* zone) {
UNUSED(zone);
// todo? // todo?
} }
static void intro_force_unlock(malloc_zone_t* zone) { static void intro_force_unlock(malloc_zone_t* zone) {
UNUSED(zone);
// todo? // todo?
} }
static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) { static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
UNUSED(zone);
// todo... // todo...
stats->blocks_in_use = 0; stats->blocks_in_use = 0;
stats->size_in_use = 0; stats->size_in_use = 0;
@ -138,6 +158,7 @@ static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
} }
static boolean_t intro_zone_locked(malloc_zone_t* zone) { static boolean_t intro_zone_locked(malloc_zone_t* zone) {
UNUSED(zone);
return false; return false;
} }
@ -161,7 +182,6 @@ static malloc_zone_t* mi_get_default_zone()
} }
} }
static void __attribute__((constructor)) _mi_macos_override_malloc() static void __attribute__((constructor)) _mi_macos_override_malloc()
{ {
static malloc_introspection_t intro; static malloc_introspection_t intro;
@ -201,6 +221,7 @@ static void __attribute__((constructor)) _mi_macos_override_malloc()
zone.free_definite_size = &zone_free_definite_size; zone.free_definite_size = &zone_free_definite_size;
zone.pressure_relief = &zone_pressure_relief; zone.pressure_relief = &zone_pressure_relief;
intro.zone_locked = &intro_zone_locked; intro.zone_locked = &intro_zone_locked;
intro.statistics = &intro_statistics;
// force the purgeable zone to exist to avoid strange bugs // force the purgeable zone to exist to avoid strange bugs
if (malloc_default_purgeable_zone) { if (malloc_default_purgeable_zone) {
@ -225,6 +246,7 @@ static void __attribute__((constructor)) _mi_macos_override_malloc()
malloc_zone_unregister(purgeable_zone); malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone); malloc_zone_register(purgeable_zone);
} }
} }
#endif // MI_MALLOC_OVERRIDE #endif // MI_MALLOC_OVERRIDE

View file

@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)" #error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)"
#endif #endif
#if defined(MI_MALLOC_OVERRIDE) && !defined(_WIN32) #if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32) || (defined(__MACH__) && !defined(MI_INTERPOSE)))
// ------------------------------------------------------ // ------------------------------------------------------
// Override system malloc // Override system malloc
@ -68,10 +68,10 @@ terms of the MIT license. A copy of the license can be found in the file
// we just override new/delete which does work in a static library. // we just override new/delete which does work in a static library.
#else #else
// On all other systems forward to our API // On all other systems forward to our API
void* malloc(size_t size) mi_attr_noexcept MI_FORWARD1(mi_malloc, size); void* malloc(size_t size) MI_FORWARD1(mi_malloc, size);
void* calloc(size_t size, size_t n) mi_attr_noexcept MI_FORWARD2(mi_calloc, size, n); void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n);
void* realloc(void* p, size_t newsize) mi_attr_noexcept MI_FORWARD2(mi_realloc, p, newsize); void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize);
void free(void* p) mi_attr_noexcept MI_FORWARD0(mi_free, p); void free(void* p) MI_FORWARD0(mi_free, p);
#endif #endif
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__MACH__) #if (defined(__GNUC__) || defined(__clang__)) && !defined(__MACH__)
@ -99,8 +99,8 @@ terms of the MIT license. A copy of the license can be found in the file
void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { UNUSED(tag); return mi_new_nothrow(n); } void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { UNUSED(tag); return mi_new_nothrow(n); }
#if (__cplusplus >= 201402L || _MSC_VER >= 1916) #if (__cplusplus >= 201402L || _MSC_VER >= 1916)
void operator delete (void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n); void operator delete (void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
void operator delete[](void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n); void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
#endif #endif
#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5)) #if (__cplusplus > 201402L || defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5))

View file

@ -212,7 +212,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
size_t delta; size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize); bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize); mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
return (ok ? bsize - delta : 0); return (ok ? bsize - delta : 0);
} }
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
@ -259,7 +259,7 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
padding->delta = (uint32_t)new_delta; padding->delta = (uint32_t)new_delta;
} }
#else #else
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
UNUSED(page); UNUSED(page);
UNUSED(block); UNUSED(block);
@ -359,7 +359,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
} }
else if (mi_unlikely(mi_page_is_in_full(page))) { else if (mi_unlikely(mi_page_is_in_full(page))) {
_mi_page_unfull(page); _mi_page_unfull(page);
} }
} }
else { else {
_mi_free_block_mt(page,block); _mi_free_block_mt(page,block);
@ -401,7 +401,7 @@ void mi_free(void* p) mi_attr_noexcept
"(this may still be a valid very large allocation (over 64MiB))\n", p); "(this may still be a valid very large allocation (over 64MiB))\n", p);
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) { if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
} }
} }
#endif #endif
#if (MI_DEBUG!=0 || MI_SECURE>=4) #if (MI_DEBUG!=0 || MI_SECURE>=4)
@ -421,11 +421,11 @@ void mi_free(void* p) mi_attr_noexcept
mi_heap_stat_decrease(heap, malloc, bsize); mi_heap_stat_decrease(heap, malloc, bsize);
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire` if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire`
mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1); mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
} }
#endif #endif
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned // local, and not full or aligned
if (mi_unlikely(mi_check_is_double_free(page,block))) return; if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block); mi_check_padding(page, block);
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
@ -436,7 +436,7 @@ void mi_free(void* p) mi_attr_noexcept
page->used--; page->used--;
if (mi_unlikely(mi_page_all_free(page))) { if (mi_unlikely(mi_page_all_free(page))) {
_mi_page_retire(page); _mi_page_retire(page);
} }
} }
else { else {
// non-local, aligned blocks, or a full page; use the more generic path // non-local, aligned blocks, or a full page; use the more generic path
@ -473,7 +473,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
const mi_segment_t* const segment = _mi_ptr_segment(p); const mi_segment_t* const segment = _mi_ptr_segment(p);
const mi_page_t* const page = _mi_segment_page_of(segment, p); const mi_page_t* const page = _mi_segment_page_of(segment, p);
const mi_block_t* const block = (const mi_block_t*)p; const mi_block_t* const block = (const mi_block_t*)p;
const size_t size = mi_page_usable_size_of(page, block); const size_t size = mi_page_usable_size_of(page, block);
if (mi_unlikely(mi_page_has_aligned(page))) { if (mi_unlikely(mi_page_has_aligned(page))) {
ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p); ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);

View file

@ -34,7 +34,7 @@ const mi_page_t _mi_page_empty = {
#if defined(MI_PADDING) && (MI_INTPTR_SIZE >= 8) #if defined(MI_PADDING) && (MI_INTPTR_SIZE >= 8)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } #define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#elif defined(MI_PADDING) #elif defined(MI_PADDING)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } #define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#else #else
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() } #define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
@ -190,7 +190,7 @@ static bool _mi_heap_init(void) {
heap->cookie = _mi_heap_random_next(heap) | 1; heap->cookie = _mi_heap_random_next(heap) | 1;
heap->keys[0] = _mi_heap_random_next(heap); heap->keys[0] = _mi_heap_random_next(heap);
heap->keys[1] = _mi_heap_random_next(heap); heap->keys[1] = _mi_heap_random_next(heap);
heap->tld = tld; heap->tld = tld;
tld->heap_backing = heap; tld->heap_backing = heap;
tld->segments.stats = &tld->stats; tld->segments.stats = &tld->stats;
tld->segments.os = &tld->os; tld->segments.os = &tld->os;
@ -421,9 +421,9 @@ static void mi_process_load(void) {
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
UNUSED(dummy); UNUSED(dummy);
#endif #endif
os_preloading = false; os_preloading = false;
atexit(&mi_process_done); atexit(&mi_process_done);
_mi_options_init(); _mi_options_init();
mi_process_init(); mi_process_init();
//mi_stats_reset();- //mi_stats_reset();-
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n"); if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");

View file

@ -38,7 +38,7 @@ static bool allow_large_objects = true; // allow very large objects?
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`? static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
#ifdef USE_STD_MALLOC #ifndef USE_STD_MALLOC
#define custom_calloc(n,s) calloc(n,s) #define custom_calloc(n,s) calloc(n,s)
#define custom_realloc(p,s) realloc(p,s) #define custom_realloc(p,s) realloc(p,s)
#define custom_free(p) free(p) #define custom_free(p) free(p)