mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge from dev-track
This commit is contained in:
commit
66525ccae3
15 changed files with 221 additions and 45 deletions
|
@ -41,6 +41,11 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
|
||||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
||||
|
||||
if (p != aligned_p) {
|
||||
mi_track_free(p);
|
||||
mi_track_malloc(aligned_p,size,zero);
|
||||
}
|
||||
return aligned_p;
|
||||
}
|
||||
|
||||
|
@ -82,6 +87,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
|
||||
mi_assert_internal(p != NULL);
|
||||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
||||
mi_track_malloc(p,size,zero);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
// Override system malloc
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !defined(MI_VALGRIND)
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !MI_TRACK_ENABLED
|
||||
// gcc, clang: use aliasing to alias the exported function to one of our `mi_` functions
|
||||
#if (defined(__GNUC__) && __GNUC__ >= 9)
|
||||
#pragma GCC diagnostic ignored "-Wattributes" // or we get warnings that nodiscard is ignored on a forward
|
||||
|
@ -44,7 +44,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
#define MI_FORWARD02(fun,x,y) MI_FORWARD(fun)
|
||||
#else
|
||||
// otherwise use forwarding by calling our `mi_` function
|
||||
#define MI_FORWARD1(fun,x) { return fun(x); }
|
||||
#define MI_FORWARD1(fun,x) { return fun(x); }
|
||||
#define MI_FORWARD2(fun,x,y) { return fun(x,y); }
|
||||
#define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); }
|
||||
#define MI_FORWARD0(fun,x) { fun(x); }
|
||||
|
@ -123,10 +123,10 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
|
|||
// we just override new/delete which does work in a static library.
|
||||
#else
|
||||
// On all other systems forward to our API
|
||||
void* malloc(size_t size) MI_FORWARD1(mi_malloc, size)
|
||||
void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n)
|
||||
void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize)
|
||||
void free(void* p) MI_FORWARD0(mi_free, p)
|
||||
mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size)
|
||||
mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n)
|
||||
mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize)
|
||||
mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p)
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__)
|
||||
|
|
71
src/alloc.c
71
src/alloc.c
|
@ -12,6 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#include "mimalloc-internal.h"
|
||||
#include "mimalloc-atomic.h"
|
||||
|
||||
|
||||
#include <string.h> // memset, strlen
|
||||
#include <stdlib.h> // malloc, exit
|
||||
|
||||
|
@ -37,15 +38,20 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
page->free = mi_block_next(page, block);
|
||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||
|
||||
// allow use of the block internally
|
||||
// note: when tracking we need to avoid ever touching the MI_PADDING since
|
||||
// that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`)
|
||||
mi_track_mem_undefined(block, mi_page_usable_block_size(page));
|
||||
|
||||
// zero the block? note: we need to zero the full block size (issue #63)
|
||||
if mi_unlikely(zero) {
|
||||
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
|
||||
_mi_memzero_aligned(block, zsize);
|
||||
const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size);
|
||||
_mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
|
||||
}
|
||||
|
||||
#if (MI_DEBUG>0)
|
||||
if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size); }
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED
|
||||
if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); }
|
||||
#elif (MI_SECURE!=0)
|
||||
if (!zero) { block->next = 0; } // don't leak internal data
|
||||
#endif
|
||||
|
@ -62,10 +68,13 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
}
|
||||
#endif
|
||||
|
||||
#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
|
||||
#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
|
||||
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
||||
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
|
||||
#if (MI_DEBUG>1)
|
||||
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
|
||||
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
|
||||
#endif
|
||||
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
||||
padding->delta = (uint32_t)(delta);
|
||||
uint8_t* fill = (uint8_t*)padding - delta;
|
||||
|
@ -94,6 +103,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
mi_track_malloc(p,size,zero);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -122,6 +132,7 @@ extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero
|
|||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
mi_track_malloc(p,size,zero);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
@ -176,16 +187,19 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
|
|||
return false;
|
||||
}
|
||||
|
||||
#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
|
||||
|
||||
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
bool is_double_free = false;
|
||||
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
|
||||
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
||||
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
|
||||
{
|
||||
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
|
||||
// (continue in separate function to improve code generation)
|
||||
return mi_check_is_double_freex(page, block);
|
||||
is_double_free = mi_check_is_double_freex(page, block);
|
||||
}
|
||||
return false;
|
||||
return is_double_free;
|
||||
}
|
||||
#else
|
||||
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
|
@ -199,12 +213,19 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
|||
// Check for heap block overflow by setting up padding at the end of the block
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
|
||||
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
||||
*bsize = mi_page_usable_block_size(page);
|
||||
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
||||
mi_track_mem_defined(padding,sizeof(mi_padding_t));
|
||||
*delta = padding->delta;
|
||||
return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
|
||||
uint32_t canary = padding->canary;
|
||||
uintptr_t keys[2];
|
||||
keys[0] = page->keys[0];
|
||||
keys[1] = page->keys[1];
|
||||
bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
|
||||
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
|
||||
return ok;
|
||||
}
|
||||
|
||||
// Return the exact usable size of a block.
|
||||
|
@ -212,7 +233,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
|
|||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
||||
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
||||
return (ok ? bsize - delta : 0);
|
||||
}
|
||||
|
||||
|
@ -226,13 +247,16 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
|
|||
*size = bsize - delta;
|
||||
uint8_t* fill = (uint8_t*)block + bsize - delta;
|
||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
|
||||
mi_track_mem_defined(fill,maxpad);
|
||||
for (size_t i = 0; i < maxpad; i++) {
|
||||
if (fill[i] != MI_DEBUG_PADDING) {
|
||||
*wrong = bsize - delta + i;
|
||||
return false;
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
mi_track_mem_noaccess(fill,maxpad);
|
||||
return ok;
|
||||
}
|
||||
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
|
@ -331,14 +355,14 @@ static void mi_stat_huge_free(const mi_page_t* page) {
|
|||
// Free
|
||||
// ------------------------------------------------------
|
||||
|
||||
// multi-threaded free
|
||||
// multi-threaded free (or free in huge block)
|
||||
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||
{
|
||||
// The padding check may access the non-thread-owned page for the key values.
|
||||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||
mi_check_padding(page, block);
|
||||
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
#if (MI_DEBUG!=0)
|
||||
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
|
||||
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
||||
#endif
|
||||
|
||||
|
@ -393,11 +417,12 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
|||
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
|
||||
{
|
||||
// and push it on the free list
|
||||
//const size_t bsize = mi_page_block_size(page);
|
||||
if mi_likely(local) {
|
||||
// owning thread can free a block directly
|
||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||
mi_check_padding(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
#endif
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
|
@ -428,8 +453,9 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
|
|||
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept {
|
||||
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
|
||||
mi_stat_free(page, block);
|
||||
_mi_free_block(page, local, block);
|
||||
mi_stat_free(page, block); // stat_free may access the padding
|
||||
mi_track_free(p);
|
||||
_mi_free_block(page, local, block);
|
||||
}
|
||||
|
||||
// Get the segment data belonging to a pointer
|
||||
|
@ -481,20 +507,21 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
if mi_unlikely(mi_check_is_double_free(page,block)) return;
|
||||
mi_check_padding(page, block);
|
||||
mi_stat_free(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
#endif
|
||||
mi_track_free(p);
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
|
||||
_mi_page_retire(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// non-local, aligned blocks, or a full page; use the more generic path
|
||||
// note: recalc page in generic to improve code generation
|
||||
mi_free_generic(segment, tid == segment->thread_id, p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool _mi_free_delayed_block(mi_block_t* block) {
|
||||
|
@ -627,10 +654,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
|
|||
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
|
||||
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
|
||||
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
|
||||
#if !MI_TRACK_ENABLED
|
||||
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
|
||||
// todo: adjust potential padding to reflect the new size?
|
||||
return p; // reallocation still fits and not more than 50% waste
|
||||
}
|
||||
#endif
|
||||
void* newp = mi_heap_malloc(heap,newsize);
|
||||
if mi_likely(newp != NULL) {
|
||||
if (zero && newsize > size) {
|
||||
|
|
2
src/os.c
2
src/os.c
|
@ -986,7 +986,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
|
|||
else _mi_stat_decrease(&stats->reset, csize);
|
||||
if (!reset) return true; // nothing to do on unreset!
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
|
||||
if (MI_SECURE==0) {
|
||||
memset(start, 0, csize); // pretend it is eagerly reset
|
||||
}
|
||||
|
|
|
@ -619,7 +619,9 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_page_set_heap(page, heap);
|
||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
|
||||
size_t page_size;
|
||||
_mi_segment_page_start(segment, page, &page_size);
|
||||
const void* page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||
MI_UNUSED(page_start);
|
||||
mi_track_mem_noaccess(page_start,page_size);
|
||||
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
||||
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
|
|
|
@ -330,7 +330,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
|
|||
}
|
||||
mi_assert_internal(!_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx));
|
||||
|
||||
#if (MI_DEBUG>=2)
|
||||
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
|
||||
if (*commit) { ((uint8_t*)p)[0] = 0; }
|
||||
#endif
|
||||
|
||||
|
@ -376,9 +376,9 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
|
|||
|
||||
if (p != NULL) {
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
#if (MI_DEBUG>=2)
|
||||
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
|
||||
if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
|
|||
if (p==NULL) return;
|
||||
if (size==0) return;
|
||||
size = _mi_align_up(size, _mi_os_page_size());
|
||||
|
||||
|
||||
size_t arena_memid = 0;
|
||||
mi_bitmap_index_t bit_idx;
|
||||
mem_region_t* region;
|
||||
|
|
|
@ -817,6 +817,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||
if (!ok) return NULL; // failed to commit
|
||||
mi_commit_mask_set(&commit_mask, &commit_needed_mask);
|
||||
}
|
||||
mi_track_mem_undefined(segment,commit_needed);
|
||||
segment->memid = memid;
|
||||
segment->mem_is_pinned = is_pinned;
|
||||
segment->mem_is_large = mem_large;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue