fix compilation with C++, fix overrides in C++ to adhere to the spec (issue #26)

This commit is contained in:
daan 2019-07-07 18:11:21 -07:00
parent dd59a917ce
commit c3528203b5
10 changed files with 133 additions and 35 deletions

View file

@ -30,12 +30,14 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_FORWARD2(fun,x,y) MI_FORWARD(fun)
#define MI_FORWARD3(fun,x,y,z) MI_FORWARD(fun)
#define MI_FORWARD0(fun,x) MI_FORWARD(fun)
#define MI_FORWARD02(fun,x,y) MI_FORWARD(fun)
#else
// use forwarding by calling our `mi_` function
#define MI_FORWARD1(fun,x) { return fun(x); }
#define MI_FORWARD2(fun,x,y) { return fun(x,y); }
#define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); }
#define MI_FORWARD0(fun,x) { fun(x); }
#define MI_FORWARD02(fun,x,y) { fun(x,y); }
#endif
#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_INTERPOSE)
@ -81,17 +83,42 @@ terms of the MIT license. A copy of the license can be found in the file
#include <new>
void operator delete(void* p) noexcept MI_FORWARD0(mi_free,p);
void operator delete[](void* p) noexcept MI_FORWARD0(mi_free,p);
void* operator new(std::size_t n) noexcept(false) MI_FORWARD1(mi_malloc,n);
void* operator new[](std::size_t n) noexcept(false) MI_FORWARD1(mi_malloc,n);
#if (__cplusplus >= 201703L)
void* operator new( std::size_t n, std::align_val_t align) noexcept(false) MI_FORWARD2(mi_malloc_aligned,n,align);
void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false) MI_FORWARD2(mi_malloc_aligned,n,align);
void* operator new(std::size_t n) noexcept(false) { return mi_new(n); }
void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); }
void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept MI_FORWARD1(mi_malloc, n);
void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept MI_FORWARD1(mi_malloc, n);
#if (__cplusplus >= 201402L)
void operator delete (void* p, std::size_t sz) MI_FORWARD02(mi_free_size,p,sz);
void operator delete[](void* p, std::size_t sz) MI_FORWARD02(mi_free_size,p,sz);
#endif
#if (__cplusplus > 201402L || defined(__cpp_aligned_new))
void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
void operator delete (void* p, std::size_t sz, std::align_val_t al) noexcept { mi_free_size_aligned(p, sz, static_cast<size_t>(al)); };
void operator delete[](void* p, std::size_t sz, std::align_val_t al) noexcept { mi_free_size_aligned(p, sz, static_cast<size_t>(al)); };
void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n,al); }
void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n,al); }
void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_malloc_aligned(n, static_cast<size_t>(al)); }
void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_malloc_aligned(n, static_cast<size_t>(al)); }
#endif
#else
// ------------------------------------------------------
// With a C compiler we override the new/delete operators
// by defining the mangled C++ names of the operators (as
// With a C compiler we cannot override the new/delete operators
// as the standard requires calling into `get_new_handler` and/or
// throwing C++ exceptions (and we cannot do that from C). So, we
// hope the standard new uses `malloc` internally which will be
// redirected anyways.
// ------------------------------------------------------
#if 0
// ------------------------------------------------------
// Override by defining the mangled C++ names of the operators (as
// used by GCC and CLang).
// See <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling>
// ------------------------------------------------------
@ -110,6 +137,7 @@ terms of the MIT license. A copy of the license can be found in the file
#else
#error "define overloads for new/delete for this platform (just for performance, can be skipped)"
#endif
#endif
#endif // __cplusplus

View file

@ -111,7 +111,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
bool use_delayed;
do {
tfreex = tfree = page->thread_free;
tfreex.value = tfree.value = page->thread_free.value;
use_delayed = (tfree.delayed == MI_USE_DELAYED_FREE);
if (mi_unlikely(use_delayed)) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
@ -143,7 +143,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// and reset the MI_DELAYED_FREEING flag
do {
tfreex = tfree = page->thread_free;
tfreex.value = tfree.value = page->thread_free.value;
tfreex.delayed = MI_NO_DELAYED_FREE;
} while (!mi_atomic_compare_exchange((volatile uintptr_t*)&page->thread_free, tfreex.value, tfree.value));
}
@ -282,8 +282,11 @@ size_t mi_usable_size(void* p) mi_attr_noexcept {
#ifdef __cplusplus
void* _mi_externs[] = {
(void*)&_mi_page_malloc,
(void*)&mi_malloc_small,
(void*)&mi_malloc,
(void*)&mi_malloc_small,
(void*)&mi_heap_malloc,
(void*)&mi_heap_zalloc,
(void*)&mi_heap_malloc_small
};
@ -294,6 +297,24 @@ void* _mi_externs[] = {
// Allocation extensions
// ------------------------------------------------------
void mi_free_size(void* p, size_t size) mi_attr_noexcept {
UNUSED_RELEASE(size);
mi_assert(size <= mi_usable_size(p));
mi_free(p);
}
void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
UNUSED_RELEASE(alignment);
mi_assert(((uintptr_t)p % alignment) == 0);
mi_free_size(p,size);
}
void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
UNUSED_RELEASE(alignment);
mi_assert(((uintptr_t)p % alignment) == 0);
mi_free(p);
}
extern inline void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count,size,&total)) return NULL;
@ -444,3 +465,40 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
}
#ifdef __cplusplus
#include <new>
static mi_decl_noinline void* mi_new_try(std::size_t n) noexcept(false) {
void* p;
do {
std::new_handler h = std::get_new_handler();
if (h==NULL) throw std::bad_alloc();
h();
// and try again
p = mi_malloc(n);
} while (p==NULL);
return p;
}
// spit out `new_try` for better assembly code
void* mi_new(std::size_t n) noexcept(false) {
void* p = mi_malloc(n);
if (mi_likely(p != NULL)) return p;
else return mi_new_try(n);
}
// for aligned allocation its fine as it is not inlined anyways
void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false) {
void* p;
while ((p = mi_malloc_aligned(n,static_cast<size_t>(alignment))) == NULL) {
std::new_handler h = std::get_new_handler();
if (h==NULL) throw std::bad_alloc();
h();
// and try again
};
return p;
}
#endif

View file

@ -84,7 +84,7 @@ typedef enum mi_collect_e {
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
UNUSED(arg2);
UNUSED(heap);
mi_collect_t collect = (mi_collect_t)arg_collect;
mi_collect_t collect = *((mi_collect_t*)arg_collect);
_mi_page_free_collect(page);
if (mi_page_all_free(page)) {
// no more used blocks, free the page. TODO: should we retire here and be less aggressive?
@ -131,7 +131,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
_mi_heap_delayed_free(heap);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, (void*)(collect), NULL);
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != ABANDON || heap->thread_delayed_free == NULL );
// collect segment caches
@ -480,7 +480,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
// Visit all heap pages as areas
static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
if (visitor == NULL) return false;
return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, visitor, arg);
return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
}
// Just to pass arguments

View file

@ -426,7 +426,7 @@ static void mi_process_done(void) {
// C++: use static initialization to detect process start
static bool _mi_process_init(void) {
mi_process_init();
return (mi_main_thread_id != 0);
return (_mi_heap_main.thread_id != 0);
}
static bool mi_initialized = _mi_process_init();

View file

@ -114,7 +114,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, bool enable) {
mi_thread_free_t tfreex;
do {
tfreex = tfree = page->thread_free;
tfreex.value = tfree.value = page->thread_free.value;
tfreex.delayed = (enable ? MI_USE_DELAYED_FREE : MI_NO_DELAYED_FREE);
if (mi_unlikely(tfree.delayed == MI_DELAYED_FREEING)) {
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
@ -140,7 +140,7 @@ static void mi_page_thread_free_collect(mi_page_t* page)
mi_thread_free_t tfree;
mi_thread_free_t tfreex;
do {
tfreex = tfree = page->thread_free;
tfreex.value = tfree.value = page->thread_free.value;
head = (mi_block_t*)((uintptr_t)tfree.head << MI_TF_PTR_SHIFT);
tfreex.head = 0;
} while (!mi_atomic_compare_exchange((volatile uintptr_t*)&page->thread_free, tfreex.value, tfree.value));