Add ability to register custom error function called on various error conditions; including ENOMEM

This commit is contained in:
daan 2020-01-17 19:59:55 -08:00
parent 3e982a3813
commit dc58388968
20 changed files with 342 additions and 127 deletions

View file

@ -79,7 +79,7 @@ mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, siz
mi_decl_allocator void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count, size, &total)) return NULL;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
}
@ -168,13 +168,13 @@ mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_
mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(newcount, size, &total)) return NULL;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
}
mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(newcount, size, &total)) return NULL;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
}

View file

@ -146,7 +146,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
mi_list_contains(page, page->local_free, block) ||
mi_list_contains(page, mi_page_thread_free(page), block))
{
_mi_fatal_error("double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
return true;
}
return false;
@ -300,7 +300,7 @@ void mi_free(void* p) mi_attr_noexcept
{
#if (MI_DEBUG>0)
if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
_mi_error_message("trying to free an invalid (unaligned) pointer: %p\n", p);
_mi_error_message(EINVAL, "trying to free an invalid (unaligned) pointer: %p\n", p);
return;
}
#endif
@ -310,16 +310,16 @@ void mi_free(void* p) mi_attr_noexcept
#if (MI_DEBUG!=0)
if (mi_unlikely(!mi_is_in_heap_region(p))) {
_mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: 0x%p\n"
_mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", p);
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
_mi_warning_message("(yes, the previous pointer 0x%p was valid after all)\n", p);
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
#endif
#if (MI_DEBUG!=0 || MI_SECURE>=4)
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
_mi_error_message("trying to free a pointer that does not point to a valid heap space: %p\n", p);
_mi_error_message(EINVAL, "trying to free a pointer that does not point to a valid heap space: %p\n", p);
return;
}
#endif
@ -432,7 +432,7 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
extern inline mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count,size,&total)) return NULL;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
}
@ -443,7 +443,7 @@ mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
// Uninitialized `calloc`
extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count, size, &total)) return NULL;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
}
@ -484,7 +484,7 @@ mi_decl_allocator void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize
mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count, size, &total)) return NULL;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
}
@ -502,7 +502,7 @@ mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsiz
mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count, size, &total)) return NULL;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
@ -570,7 +570,6 @@ char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#define PATH_MAX MAX_PATH
#endif
#include <windows.h>
#include <errno.h>
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
@ -645,10 +644,6 @@ static bool mi_try_new_handler(bool nothrow) {
}
}
#else
#include <errno.h>
#ifndef ENOMEM
#define ENOMEM 12
#endif
typedef void (*std_new_handler_t)();
#if (defined(__GNUC__) || defined(__clang__))
@ -668,7 +663,7 @@ std_new_handler_t mi_get_new_handler() {
static bool mi_try_new_handler(bool nothrow) {
std_new_handler_t h = mi_get_new_handler();
if (h==NULL) {
if (!nothrow) exit(ENOMEM);
if (!nothrow) exit(ENOMEM); // cannot throw in plain C, use exit as we are out of memory anyway.
return false;
}
else {
@ -718,7 +713,7 @@ void* mi_new_aligned_nothrow(size_t size, size_t alignment) {
void* mi_new_n(size_t count, size_t size) {
size_t total;
if (mi_unlikely(mi_mul_overflow(count, size, &total))) {
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}

View file

@ -229,18 +229,18 @@ void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats) {
mi_arena_t* arena = (mi_arena_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &mi_arenas[arena_idx]));
mi_assert_internal(arena != NULL);
if (arena == NULL) {
_mi_fatal_error("trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
_mi_fatal_error("trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
const size_t blocks = mi_block_count_of_size(size);
bool ones = mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!ones) {
_mi_fatal_error("trying to free an already freed block: %p, size %zu\n", p, size);
_mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
return;
};
}

View file

@ -157,7 +157,7 @@ static bool _mi_heap_init(void) {
// use `_mi_os_alloc` to allocate directly from the OS
mi_thread_data_t* td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t),&_mi_stats_main); // Todo: more efficient allocation?
if (td == NULL) {
_mi_error_message("failed to allocate thread local heap memory\n");
_mi_error_message(ENOMEM, "failed to allocate thread local heap memory\n");
return false;
}
mi_tld_t* tld = &td->tld;

View file

@ -287,14 +287,10 @@ void _mi_verbose_message(const char* fmt, ...) {
va_end(args);
}
void _mi_error_message(const char* fmt, ...) {
static void mi_show_error_message(const char* fmt, va_list args) {
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
if (mi_atomic_increment(&error_count) > mi_max_error_count) return;
va_list args;
va_start(args,fmt);
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
va_end(args);
mi_assert(false);
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
}
void _mi_warning_message(const char* fmt, ...) {
@ -314,14 +310,40 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, co
}
#endif
mi_attr_noreturn void _mi_fatal_error(const char* fmt, ...) {
// --------------------------------------------------------
// Errors
// --------------------------------------------------------
static mi_error_fun* volatile mi_error_handler; // = NULL
static volatile _Atomic(void*) mi_error_arg; // = NULL
static void mi_error_default(int err) {
UNUSED(err);
#if (MI_SECURE>0)
if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data)
abort();
}
#endif
}
void mi_register_error(mi_error_fun* fun, void* arg) {
mi_error_handler = fun; // can be NULL
mi_atomic_write_ptr(&mi_error_arg, arg);
}
void _mi_error_message(int err, const char* fmt, ...) {
// show detailed error message
va_list args;
va_start(args, fmt);
mi_vfprintf(NULL, NULL, "mimalloc: fatal: ", fmt, args);
mi_show_error_message(fmt, args);
va_end(args);
#if (MI_SECURE>=0)
abort();
#endif
// and call the error handler which may abort (or return normally)
if (mi_error_handler != NULL) {
mi_error_handler(err, mi_atomic_read_ptr(&mi_error_arg));
}
else {
mi_error_default(err);
}
}
// --------------------------------------------------------

View file

@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc-atomic.h"
#include <string.h> // strerror
#include <errno.h>
#if defined(_WIN32)
#include <windows.h>
@ -655,7 +655,7 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
if (err != 0) { err = errno; }
#endif
if (err != 0) {
_mi_warning_message("%s error: start: 0x%p, csize: 0x%x, err: %i\n", commit ? "commit" : "decommit", start, csize, err);
_mi_warning_message("%s error: start: %p, csize: 0x%x, err: %i\n", commit ? "commit" : "decommit", start, csize, err);
mi_mprotect_hint(err);
}
mi_assert_internal(err == 0);
@ -719,7 +719,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
int err = madvise(start, csize, MADV_DONTNEED);
#endif
if (err != 0) {
_mi_warning_message("madvise reset error: start: 0x%p, csize: 0x%x, errno: %i\n", start, csize, errno);
_mi_warning_message("madvise reset error: start: %p, csize: 0x%x, errno: %i\n", start, csize, errno);
}
//mi_assert(err == 0);
if (err != 0) return false;
@ -774,7 +774,7 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) {
if (err != 0) { err = errno; }
#endif
if (err != 0) {
_mi_warning_message("mprotect error: start: 0x%p, csize: 0x%x, err: %i\n", start, csize, err);
_mi_warning_message("mprotect error: start: %p, csize: 0x%x, err: %i\n", start, csize, err);
mi_mprotect_hint(err);
}
return (err == 0);
@ -961,7 +961,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
if (p != addr) {
// no success, issue a warning and break
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
_mi_warning_message("could not allocate contiguous huge page %zu at %p\n", page, addr);
_mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main);
}
break;

View file

@ -175,7 +175,7 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
}
// if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
if (count > max_count) {
_mi_fatal_error("corrupted thread-free list\n");
_mi_error_message(EFAULT, "corrupted thread-free list\n");
return; // the thread-free items cannot be freed
}
@ -796,7 +796,8 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
mi_page_t* page;
if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {
if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
page = NULL;
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu b requested)\n", size);
return NULL;
}
else {
page = mi_huge_page_alloc(heap,size);
@ -806,7 +807,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// otherwise find a page with free blocks in our size segregated queues
page = mi_find_free_page(heap,size);
}
if (page == NULL) return NULL; // out of memory
if (mi_unlikely(page == NULL)) { // out of memory
_mi_error_message(ENOMEM, "cannot allocate memory (%zu bytes requested)\n", size);
return NULL;
}
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(mi_page_block_size(page) >= size);