mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-08-25 16:54:47 +03:00
merge with dev
This commit is contained in:
commit
ff2fe673e5
11 changed files with 348 additions and 242 deletions
|
@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// Aligned Allocation
|
||||
// ------------------------------------------------------
|
||||
|
||||
static void* mi_base_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero MI_SOURCE_XPARAM) mi_attr_noexcept {
|
||||
static mi_decl_restrict void* mi_base_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero MI_SOURCE_XPARAM) mi_attr_noexcept {
|
||||
// note: we don't require `size > offset`, we just guarantee that
|
||||
// the address at offset is aligned regardless of the allocated size.
|
||||
mi_assert(alignment > 0 && alignment % sizeof(void*) == 0);
|
||||
|
@ -64,34 +64,34 @@ static void* mi_base_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
}
|
||||
|
||||
|
||||
MI_ALLOC_API3(void*, malloc_aligned_at, mi_heap_t*, heap, size_t, size, size_t, alignment, size_t, offset)
|
||||
MI_ALLOC_API3(mi_decl_restrict void*, malloc_aligned_at, mi_heap_t*, heap, size_t, size, size_t, alignment, size_t, offset)
|
||||
{
|
||||
return mi_base_malloc_zero_aligned_at(heap, size, alignment, offset, false MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
MI_ALLOC_API2(void*, malloc_aligned, mi_heap_t*,heap, size_t, size, size_t, alignment)
|
||||
MI_ALLOC_API2(mi_decl_restrict void*, malloc_aligned, mi_heap_t*,heap, size_t, size, size_t, alignment)
|
||||
{
|
||||
return mi_base_malloc_zero_aligned_at(heap, size, alignment, 0, false MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
MI_ALLOC_API3(void*, zalloc_aligned_at, mi_heap_t*, heap, size_t, size, size_t, alignment, size_t, offset)
|
||||
MI_ALLOC_API3(mi_decl_restrict void*, zalloc_aligned_at, mi_heap_t*, heap, size_t, size, size_t, alignment, size_t, offset)
|
||||
{
|
||||
return mi_base_malloc_zero_aligned_at(heap, size, alignment, offset, true MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
MI_ALLOC_API2(void*, zalloc_aligned, mi_heap_t*,heap, size_t, size, size_t, alignment)
|
||||
MI_ALLOC_API2(mi_decl_restrict void*, zalloc_aligned, mi_heap_t*,heap, size_t, size, size_t, alignment)
|
||||
{
|
||||
return mi_base_malloc_zero_aligned_at(heap, size, alignment, 0, true MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
MI_ALLOC_API4(void*, calloc_aligned_at, mi_heap_t*, heap, size_t, count, size_t, size, size_t, alignment, size_t, offset)
|
||||
MI_ALLOC_API4(mi_decl_restrict void*, calloc_aligned_at, mi_heap_t*, heap, size_t, count, size_t, size, size_t, alignment, size_t, offset)
|
||||
{
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(count, size, &total)) return NULL;
|
||||
return mi_base_malloc_zero_aligned_at(heap, total, alignment, offset, true MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
MI_ALLOC_API3(void*, calloc_aligned, mi_heap_t*, heap, size_t, count, size_t, size, size_t, alignment)
|
||||
MI_ALLOC_API3(mi_decl_restrict void*, calloc_aligned, mi_heap_t*, heap, size_t, count, size_t, size, size_t, alignment)
|
||||
{
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(count, size, &total)) return NULL;
|
||||
|
@ -99,7 +99,6 @@ MI_ALLOC_API3(void*, calloc_aligned, mi_heap_t*, heap, size_t, count, size_t, si
|
|||
}
|
||||
|
||||
|
||||
|
||||
static void* mi_base_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero MI_SOURCE_XPARAM) mi_attr_noexcept {
|
||||
mi_assert(alignment > 0);
|
||||
if (alignment <= sizeof(uintptr_t)) return _mi_base_realloc_zero(heap,p,newsize,zero MI_SOURCE_XARG);
|
||||
|
@ -138,6 +137,7 @@ static void* mi_base_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsi
|
|||
return mi_base_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
|
||||
MI_ALLOC_API4(void*, realloc_aligned_at, mi_heap_t*, heap, void*, p, size_t, newsize, size_t, alignment, size_t, offset)
|
||||
{
|
||||
return mi_base_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false MI_SOURCE_XARG);
|
||||
|
@ -165,6 +165,7 @@ MI_ALLOC_API5(void*, recalloc_aligned_at, mi_heap_t*, heap, void*, p, size_t, ne
|
|||
return mi_base_realloc_zero_aligned_at(heap, p, total, alignment, offset, true MI_SOURCE_XARG);
|
||||
}
|
||||
|
||||
|
||||
MI_ALLOC_API4(void*, recalloc_aligned, mi_heap_t*, heap, void*, p, size_t, newcount, size_t, size, size_t, alignment)
|
||||
{
|
||||
size_t total;
|
||||
|
|
|
@ -56,7 +56,7 @@ MI_SOURCE_API3(void*, reallocarray, void*, p, size_t, count, size_t, size)
|
|||
return newp;
|
||||
}
|
||||
|
||||
MI_SOURCE_API2(void*, memalign, size_t, alignment, size_t, size)
|
||||
MI_SOURCE_API2(mi_decl_restrict void*, memalign, size_t, alignment, size_t, size)
|
||||
{
|
||||
void* p;
|
||||
if (alignment <= MI_MAX_ALIGN_SIZE) {
|
||||
|
@ -69,12 +69,12 @@ MI_SOURCE_API2(void*, memalign, size_t, alignment, size_t, size)
|
|||
return p;
|
||||
}
|
||||
|
||||
MI_SOURCE_API1(void*, valloc, size_t, size)
|
||||
MI_SOURCE_API1(mi_decl_restrict void*, valloc, size_t, size)
|
||||
{
|
||||
return MI_SOURCE_ARG(mi_malloc_aligned, size, _mi_os_page_size());
|
||||
}
|
||||
|
||||
MI_SOURCE_API1(void*, pvalloc, size_t, size)
|
||||
MI_SOURCE_API1(mi_decl_restrict void*, pvalloc, size_t, size)
|
||||
{
|
||||
size_t psize = _mi_os_page_size();
|
||||
if (size >= SIZE_MAX - psize) return NULL; // overflow
|
||||
|
@ -82,7 +82,7 @@ MI_SOURCE_API1(void*, pvalloc, size_t, size)
|
|||
return MI_SOURCE_ARG(mi_malloc_aligned, asize, psize);
|
||||
}
|
||||
|
||||
MI_SOURCE_API2(void*, aligned_alloc, size_t, alignment, size_t, size)
|
||||
MI_SOURCE_API2(mi_decl_restrict void*, aligned_alloc, size_t, alignment, size_t, size)
|
||||
{
|
||||
if (alignment==0 || !_mi_is_power_of_two(alignment)) return NULL;
|
||||
if ((size&(alignment-1)) != 0) return NULL; // C11 requires integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
|
||||
|
@ -128,8 +128,7 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
|
|||
return mi_base_posix_memalign(p, alignment, size MI_SOURCE_XRET());
|
||||
}
|
||||
|
||||
|
||||
MI_SOURCE_API1(unsigned short*, wcsdup, const unsigned short*, s)
|
||||
MI_SOURCE_API1(mi_decl_restrict unsigned short*, wcsdup, const unsigned short*, s)
|
||||
{
|
||||
if (s==NULL) return NULL;
|
||||
size_t len;
|
||||
|
@ -142,7 +141,7 @@ MI_SOURCE_API1(unsigned short*, wcsdup, const unsigned short*, s)
|
|||
return p;
|
||||
}
|
||||
|
||||
MI_SOURCE_API1(unsigned char*, mbsdup, const unsigned char*, s)
|
||||
MI_SOURCE_API1(mi_decl_restrict unsigned char*, mbsdup, const unsigned char*, s)
|
||||
{
|
||||
return (unsigned char*)MI_SOURCE_ARG(mi_strdup,(const char*)s);
|
||||
}
|
||||
|
@ -215,19 +214,19 @@ int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name)
|
|||
|
||||
|
||||
#ifndef NDEBUG
|
||||
mi_decl_restrict void* dbg_mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset, mi_source_t __mi_source) mi_attr_noexcept { // Microsoft
|
||||
void* dbg_mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset, mi_source_t __mi_source) mi_attr_noexcept { // Microsoft
|
||||
return dbg_mi_recalloc_aligned_at(p, newcount, size, alignment, offset, __mi_source);
|
||||
}
|
||||
|
||||
mi_decl_restrict void* dbg_mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment, mi_source_t __mi_source) mi_attr_noexcept { // Microsoft
|
||||
void* dbg_mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment, mi_source_t __mi_source) mi_attr_noexcept { // Microsoft
|
||||
return dbg_mi_recalloc_aligned(p, newcount, size, alignment, __mi_source);
|
||||
}
|
||||
#endif
|
||||
|
||||
mi_decl_restrict void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
|
||||
void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
|
||||
return MI_SOURCE_RET(mi_recalloc_aligned_at,p, newcount, size, alignment, offset);
|
||||
}
|
||||
|
||||
mi_decl_restrict void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
|
||||
void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
|
||||
return MI_SOURCE_RET(mi_recalloc_aligned,p, newcount, size, alignment);
|
||||
}
|
||||
|
|
42
src/alloc.c
42
src/alloc.c
|
@ -61,7 +61,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
}
|
||||
|
||||
// allocate a small block
|
||||
MI_ALLOC_API1(void*, malloc_small, mi_heap_t*, heap, size_t, size)
|
||||
MI_ALLOC_API1(mi_decl_restrict void*, malloc_small, mi_heap_t*, heap, size_t, size)
|
||||
{
|
||||
mi_assert(heap!=NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
|
@ -80,7 +80,7 @@ MI_ALLOC_API1(void*, malloc_small, mi_heap_t*, heap, size_t, size)
|
|||
|
||||
|
||||
// The main allocation function
|
||||
MI_ALLOC_API1(void*, malloc, mi_heap_t*, heap, size_t, size)
|
||||
MI_ALLOC_API1(mi_decl_restrict void*, malloc, mi_heap_t*, heap, size_t, size)
|
||||
{
|
||||
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
|
||||
return mi_base_malloc_small(heap, size MI_SOURCE_XARG);
|
||||
|
@ -120,7 +120,7 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
|
|||
}
|
||||
|
||||
// zero initialized small block
|
||||
MI_ALLOC_API1(void*, zalloc_small, mi_heap_t*, heap, size_t, size)
|
||||
MI_ALLOC_API1(mi_decl_restrict void*, zalloc_small, mi_heap_t*, heap, size_t, size)
|
||||
{
|
||||
void* p = mi_base_malloc_small(heap, size MI_SOURCE_XARG);
|
||||
if (p != NULL) {
|
||||
|
@ -137,7 +137,7 @@ mi_decl_restrict void* _mi_base_malloc_zero(mi_heap_t* heap, size_t size, bool z
|
|||
return p;
|
||||
}
|
||||
|
||||
MI_ALLOC_API1(void*, zalloc, mi_heap_t*,heap, size_t,size)
|
||||
MI_ALLOC_API1(mi_decl_restrict void*, zalloc, mi_heap_t*,heap, size_t,size)
|
||||
{
|
||||
return _mi_base_malloc_zero(heap, size, true MI_SOURCE_XARG);
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
|
|||
mi_free(p);
|
||||
}
|
||||
|
||||
MI_ALLOC_API2(void*, calloc, mi_heap_t*, heap, size_t, count, size_t, size)
|
||||
MI_ALLOC_API2(mi_decl_restrict void*, calloc, mi_heap_t*, heap, size_t, count, size_t, size)
|
||||
{
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(count,size,&total)) return NULL;
|
||||
|
@ -527,7 +527,7 @@ MI_ALLOC_API2(void*, calloc, mi_heap_t*, heap, size_t, count, size_t, size)
|
|||
}
|
||||
|
||||
// Uninitialized `calloc`
|
||||
MI_ALLOC_API2(void*, mallocn, mi_heap_t*, heap, size_t, count, size_t, size)
|
||||
MI_ALLOC_API2(mi_decl_restrict void*, mallocn, mi_heap_t*, heap, size_t, count, size_t, size)
|
||||
{
|
||||
size_t total;
|
||||
if (mi_count_size_overflow(count, size, &total)) return NULL;
|
||||
|
@ -536,14 +536,19 @@ MI_ALLOC_API2(void*, mallocn, mi_heap_t*, heap, size_t, count, size_t, size)
|
|||
|
||||
|
||||
// Expand in place or fail
|
||||
mi_decl_restrict void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
|
||||
MI_ALLOC_API2(void*, expand, mi_heap_t*, heap, void*, p, size_t, newsize)
|
||||
{
|
||||
UNUSED(heap);
|
||||
#ifndef NDEBUG
|
||||
UNUSED(__mi_source);
|
||||
#endif
|
||||
if (p == NULL) return NULL;
|
||||
size_t size = mi_usable_size(p);
|
||||
if (newsize > size) return NULL;
|
||||
return p; // it fits
|
||||
}
|
||||
|
||||
mi_decl_restrict void* _mi_base_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero MI_SOURCE_XPARAM) {
|
||||
void* _mi_base_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero MI_SOURCE_XPARAM) {
|
||||
if (p == NULL) return _mi_base_malloc_zero(heap,newsize,zero MI_SOURCE_XARG);
|
||||
size_t size = mi_usable_size(p);
|
||||
if (newsize <= size && newsize >= (size / 2)) {
|
||||
|
@ -601,7 +606,7 @@ MI_ALLOC_API3(void*, recalloc, mi_heap_t*, heap, void*, p, size_t, count, size_t
|
|||
// ------------------------------------------------------
|
||||
|
||||
// `strdup` using mi_malloc
|
||||
MI_ALLOC_API1(char*, strdup, mi_heap_t*,heap, const char*,s)
|
||||
MI_ALLOC_API1(mi_decl_restrict char*, strdup, mi_heap_t*,heap, const char*,s)
|
||||
{
|
||||
if (s == NULL) return NULL;
|
||||
size_t n = strlen(s);
|
||||
|
@ -610,9 +615,8 @@ MI_ALLOC_API1(char*, strdup, mi_heap_t*,heap, const char*,s)
|
|||
return t;
|
||||
}
|
||||
|
||||
|
||||
// `strndup` using mi_malloc
|
||||
MI_ALLOC_API2(char*, strndup, mi_heap_t*, heap, const char*, s, size_t, n)
|
||||
MI_ALLOC_API2(mi_decl_restrict char*, strndup, mi_heap_t*, heap, const char*, s, size_t, n)
|
||||
{
|
||||
if (s == NULL) return NULL;
|
||||
size_t m = strlen(s);
|
||||
|
@ -632,7 +636,7 @@ MI_ALLOC_API2(char*, strndup, mi_heap_t*, heap, const char*, s, size_t, n)
|
|||
#define PATH_MAX MAX_PATH
|
||||
#endif
|
||||
#include <windows.h>
|
||||
MI_ALLOC_API2(char*, realpath, mi_heap_t*, heap, const char*, fname, char*, resolved_name)
|
||||
MI_ALLOC_API2(mi_decl_restrict char*, realpath, mi_heap_t*, heap, const char*, fname, char*, resolved_name)
|
||||
{
|
||||
// todo: use GetFullPathNameW to allow longer file names
|
||||
char buf[PATH_MAX];
|
||||
|
@ -663,7 +667,7 @@ static size_t mi_path_max() {
|
|||
return path_max;
|
||||
}
|
||||
|
||||
MI_ALLOC_API2(char*, realpath, mi_heap_t*, heap, const char*, fname, char*, resolved_name)
|
||||
MI_ALLOC_API2(mi_decl_restrict char*, realpath, mi_heap_t*, heap, const char*, fname, char*, resolved_name)
|
||||
{
|
||||
if (resolved_name != NULL) {
|
||||
return realpath(fname,resolved_name);
|
||||
|
@ -734,7 +738,7 @@ static bool mi_try_new_handler(bool nothrow) {
|
|||
}
|
||||
#endif
|
||||
|
||||
static mi_decl_noinline void* mi_base_try_new(size_t size, bool nothrow MI_SOURCE_XPARAM) {
|
||||
static mi_decl_noinline mi_decl_restrict void* mi_base_try_new(size_t size, bool nothrow MI_SOURCE_XPARAM) {
|
||||
void* p = NULL;
|
||||
while(p == NULL && mi_try_new_handler(nothrow)) {
|
||||
p = MI_SOURCE_ARG(mi_malloc, size);
|
||||
|
@ -742,14 +746,14 @@ static mi_decl_noinline void* mi_base_try_new(size_t size, bool nothrow MI_SOU
|
|||
return p;
|
||||
}
|
||||
|
||||
MI_NEW_API1(void*, new, size_t, size)
|
||||
MI_NEW_API1(mi_decl_restrict void*, new, size_t, size)
|
||||
{
|
||||
void* p = MI_SOURCE_ARG(mi_malloc, size);
|
||||
if (mi_unlikely(p == NULL)) return mi_base_try_new(size, false MI_SOURCE_XARG);
|
||||
return p;
|
||||
}
|
||||
|
||||
MI_NEW_API1(void*, new_nothrow, size_t, size)
|
||||
MI_SOURCE_API1(mi_decl_restrict void*, new_nothrow, size_t, size)
|
||||
{
|
||||
void* p = MI_SOURCE_ARG(mi_malloc, size);
|
||||
if (mi_unlikely(p == NULL)) return mi_base_try_new(size, true MI_SOURCE_XARG);
|
||||
|
@ -757,7 +761,7 @@ MI_NEW_API1(void*, new_nothrow, size_t, size)
|
|||
}
|
||||
|
||||
|
||||
MI_NEW_API2(void*, new_aligned, size_t, size, size_t, alignment)
|
||||
MI_NEW_API2(mi_decl_restrict void*, new_aligned, size_t, size, size_t, alignment)
|
||||
{
|
||||
void* p;
|
||||
do {
|
||||
|
@ -767,7 +771,7 @@ MI_NEW_API2(void*, new_aligned, size_t, size, size_t, alignment)
|
|||
return p;
|
||||
}
|
||||
|
||||
MI_NEW_API2(void*, new_aligned_nothrow, size_t, size, size_t, alignment)
|
||||
MI_SOURCE_API2(mi_decl_restrict void*, new_aligned_nothrow, size_t, size, size_t, alignment)
|
||||
{
|
||||
void* p;
|
||||
do {
|
||||
|
@ -777,7 +781,7 @@ MI_NEW_API2(void*, new_aligned_nothrow, size_t, size, size_t, alignment)
|
|||
return p;
|
||||
}
|
||||
|
||||
MI_NEW_API2(void*, new_n, size_t, count, size_t, size)
|
||||
MI_NEW_API2(mi_decl_restrict void*, new_n, size_t, count, size_t, size)
|
||||
{
|
||||
size_t total;
|
||||
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
|
||||
|
|
56
src/heap.c
56
src/heap.c
|
@ -191,7 +191,7 @@ mi_heap_t* mi_heap_get_backing(void) {
|
|||
|
||||
mi_heap_t* mi_heap_new(void) {
|
||||
mi_heap_t* bheap = mi_heap_get_backing();
|
||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t);
|
||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
||||
if (heap==NULL) return NULL;
|
||||
memcpy(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
||||
heap->tld = bheap->tld;
|
||||
|
@ -201,6 +201,9 @@ mi_heap_t* mi_heap_new(void) {
|
|||
heap->keys[0] = _mi_heap_random_next(heap);
|
||||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||
// push on the thread local heaps list
|
||||
heap->next = heap->tld->heaps;
|
||||
heap->tld->heaps = heap;
|
||||
return heap;
|
||||
}
|
||||
|
||||
|
@ -223,6 +226,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
|
|||
|
||||
// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
|
||||
static void mi_heap_free(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
|
||||
|
||||
|
@ -230,6 +234,22 @@ static void mi_heap_free(mi_heap_t* heap) {
|
|||
if (mi_heap_is_default(heap)) {
|
||||
_mi_heap_set_default_direct(heap->tld->heap_backing);
|
||||
}
|
||||
|
||||
// remove ourselves from the thread local heaps list
|
||||
// linear search but we expect the number of heaps to be relatively small
|
||||
mi_heap_t* prev = NULL;
|
||||
mi_heap_t* curr = heap->tld->heaps;
|
||||
while (curr != heap && curr != NULL) {
|
||||
prev = curr;
|
||||
curr = curr->next;
|
||||
}
|
||||
mi_assert_internal(curr == heap);
|
||||
if (curr == heap) {
|
||||
if (prev != NULL) { prev->next = heap->next; }
|
||||
else { heap->tld->heaps = heap->next; }
|
||||
}
|
||||
mi_assert_internal(heap->tld->heaps != NULL);
|
||||
|
||||
// and free the used memory
|
||||
mi_free(heap);
|
||||
}
|
||||
|
@ -286,6 +306,7 @@ void _mi_heap_destroy_pages(mi_heap_t* heap) {
|
|||
}
|
||||
|
||||
void mi_heap_destroy(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
mi_assert(heap->no_reclaim);
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
|
@ -312,38 +333,37 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
|
|||
mi_assert_internal(heap!=NULL);
|
||||
if (from==NULL || from->page_count == 0) return;
|
||||
|
||||
// unfull all full pages in the `from` heap
|
||||
mi_page_t* page = from->pages[MI_BIN_FULL].first;
|
||||
while (page != NULL) {
|
||||
mi_page_t* next = page->next;
|
||||
_mi_page_unfull(page);
|
||||
page = next;
|
||||
}
|
||||
mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL);
|
||||
|
||||
// free outstanding thread delayed free blocks
|
||||
// reduce the size of the delayed frees
|
||||
_mi_heap_delayed_free(from);
|
||||
|
||||
// transfer all pages by appending the queues; this will set
|
||||
// a new heap field which is ok as all pages are unfull'd and thus
|
||||
// other threads won't access this field anymore (see `mi_free_block_mt`)
|
||||
for (size_t i = 0; i < MI_BIN_FULL; i++) {
|
||||
|
||||
// transfer all pages by appending the queues; this will set a new heap field
|
||||
// so threads may do delayed frees in either heap for a while.
|
||||
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
|
||||
// so after this only the new heap will get delayed frees
|
||||
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
||||
mi_page_queue_t* pq = &heap->pages[i];
|
||||
mi_page_queue_t* append = &from->pages[i];
|
||||
size_t pcount = _mi_page_queue_append(heap, pq, append);
|
||||
heap->page_count += pcount;
|
||||
from->page_count -= pcount;
|
||||
}
|
||||
mi_assert_internal(from->thread_delayed_free == NULL);
|
||||
mi_assert_internal(from->page_count == 0);
|
||||
|
||||
// and do outstanding delayed frees in the `from` heap
|
||||
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
|
||||
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
|
||||
// the regular `_mi_free_delayed_block` which is safe.
|
||||
_mi_heap_delayed_free(from);
|
||||
mi_assert_internal(from->thread_delayed_free == NULL);
|
||||
|
||||
// and reset the `from` heap
|
||||
mi_heap_reset_pages(from);
|
||||
mi_heap_reset_pages(from);
|
||||
}
|
||||
|
||||
// Safe delete a heap without freeing any still allocated blocks in that heap.
|
||||
void mi_heap_delete(mi_heap_t* heap)
|
||||
{
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (!mi_heap_is_initialized(heap)) return;
|
||||
|
|
23
src/init.c
23
src/init.c
|
@ -97,6 +97,7 @@ const mi_heap_t _mi_heap_empty = {
|
|||
{ 0, 0 }, // keys
|
||||
{ {0}, {0}, 0 },
|
||||
0, // page count
|
||||
NULL, // next
|
||||
false
|
||||
};
|
||||
|
||||
|
@ -111,7 +112,7 @@ extern mi_heap_t _mi_heap_main;
|
|||
|
||||
static mi_tld_t tld_main = {
|
||||
0, false,
|
||||
&_mi_heap_main,
|
||||
&_mi_heap_main, &_mi_heap_main,
|
||||
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
|
||||
0, 0, 0, 0, 0, 0, NULL,
|
||||
tld_main_stats, tld_main_os
|
||||
|
@ -130,6 +131,7 @@ mi_heap_t _mi_heap_main = {
|
|||
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
||||
{ {0x846ca68b}, {0}, 0 }, // random
|
||||
0, // page count
|
||||
NULL, // next heap
|
||||
false // can reclaim
|
||||
};
|
||||
|
||||
|
@ -192,6 +194,7 @@ static bool _mi_heap_init(void) {
|
|||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->tld = tld;
|
||||
tld->heap_backing = heap;
|
||||
tld->heaps = heap;
|
||||
tld->segments.stats = &tld->stats;
|
||||
tld->segments.os = &tld->os;
|
||||
tld->os.stats = &tld->stats;
|
||||
|
@ -207,12 +210,24 @@ static bool _mi_heap_done(mi_heap_t* heap) {
|
|||
// reset default heap
|
||||
_mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty);
|
||||
|
||||
// todo: delete all non-backing heaps?
|
||||
|
||||
// switch to backing heap and free it
|
||||
// switch to backing heap
|
||||
heap = heap->tld->heap_backing;
|
||||
if (!mi_heap_is_initialized(heap)) return false;
|
||||
|
||||
|
||||
// delete all non-backing heaps in this thread
|
||||
mi_heap_t* curr = heap->tld->heaps;
|
||||
while (curr != NULL) {
|
||||
mi_heap_t* next = curr->next; // save `next` as `curr` will be freed
|
||||
if (curr != heap) {
|
||||
mi_assert_internal(!mi_heap_is_backing(curr));
|
||||
mi_heap_delete(curr);
|
||||
}
|
||||
curr = next;
|
||||
}
|
||||
mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL);
|
||||
mi_assert_internal(mi_heap_is_backing(heap));
|
||||
|
||||
// collect if not the main thread
|
||||
if (heap != &_mi_heap_main) {
|
||||
_mi_heap_collect_abandon(heap);
|
||||
|
|
|
@ -329,6 +329,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
|||
mi_page_set_in_full(page, mi_page_queue_is_full(to));
|
||||
}
|
||||
|
||||
// Only called from `mi_heap_absorb`.
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
|
||||
mi_assert_internal(mi_heap_contains_queue(heap,pq));
|
||||
mi_assert_internal(pq->block_size == append->block_size);
|
||||
|
@ -339,6 +340,10 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue
|
|||
size_t count = 0;
|
||||
for (mi_page_t* page = append->first; page != NULL; page = page->next) {
|
||||
mi_page_set_heap(page,heap);
|
||||
// set it to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
|
||||
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
|
||||
// that after appending only the new heap will be used for delayed free operations.
|
||||
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
|
||||
count++;
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
|||
bool _mi_page_is_valid(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_is_valid_init(page));
|
||||
#if MI_SECURE
|
||||
mi_assert_internal(page->key != 0);
|
||||
mi_assert_internal(page->keys[0] != 0);
|
||||
#endif
|
||||
if (mi_page_heap(page)!=NULL) {
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue