follow-up reallocarr(ay) BSD implementations,

here OpenBSD malloc_conceal api. not much different of classic
malloc/calloc but content not to be dumped in core files.
This commit is contained in:
David CARLIER 2022-01-07 22:07:49 +00:00
parent 43e5cd2671
commit 93797b8b86
5 changed files with 60 additions and 0 deletions

View file

@ -248,6 +248,9 @@ void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray
int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); }
void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
void* malloc_conceal(size_t size) { return mi_malloc_conceal(size); }
void* calloc_conceal(size_t count, size_t size) { return mi_calloc_conceal(count, size); }
void freezero(void* p, size_t size) { mi_freezero(p, size); }
#if defined(__GLIBC__) && defined(__linux__)
// forward __libc interface (needed for glibc-based Linux distributions)

View file

@ -119,6 +119,49 @@ extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size);
}
mi_decl_restrict void* mi_malloc_conceal(size_t size) mi_attr_noexcept {
void* p;
p = mi_heap_malloc(mi_get_default_heap(), size);
if (mi_likely(p != NULL)) {
#if defined(MADV_DONTDUMP)
madvise(p, size, MADV_DONTDUMP);
#elif defined(MADV_NOCORE)
madvise(p, size, MADV_NOCORE);
#endif
}
return p;
}
mi_decl_restrict void* mi_calloc_conceal(size_t count, size_t size) mi_attr_noexcept {
void* p;
p = mi_heap_calloc(mi_get_default_heap(),count,size);
if (mi_likely(p != NULL)) {
#if defined(MADV_DONTDUMP)
madvise(p, size, MADV_DONTDUMP);
#elif defined(MADV_NOCORE)
madvise(p, size, MADV_NOCORE);
#endif
}
return p;
}
void mi_freezero(void* p, size_t size) mi_attr_noexcept {
mi_free(p);
#if (MI_DEBUG==0)
if (size > 0) {
#if defined(_MSC_VER)
SecureZeroMemory(p, size);
#else
// reusing memset return value and using memory fence
// so memset call is generated regardless of the optimisation level
p = memset(p, 0, size);
__asm__ volatile("" :: "r"(p) : "memory");
#endif
}
#else
MI_UNUSED(size);
#endif
}
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// note: we need to initialize the whole usable block size to zero, not just the requested size,