mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
bug fixes
This commit is contained in:
parent
8f2a5864b8
commit
1d7a9f62a5
6 changed files with 34 additions and 32 deletions
|
@ -598,7 +598,7 @@ static inline bool mi_page_mostly_used(const mi_page_t* page) {
|
||||||
|
|
||||||
static inline bool mi_page_is_abandoned(const mi_page_t* page) {
|
static inline bool mi_page_is_abandoned(const mi_page_t* page) {
|
||||||
// note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free)
|
// note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free)
|
||||||
return (mi_page_thread_id(page) == 0);
|
return (mi_atomic_load_acquire(&page->xthread_id) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||||
|
|
17
src/arena.c
17
src/arena.c
|
@ -646,11 +646,12 @@ bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||||
// we only call this when on free (and thus there is still an object alive in the page)
|
// we only call this when on free (and thus there is still an object alive in the page)
|
||||||
mi_memid_t memid = page->memid;
|
mi_memid_t memid = page->memid;
|
||||||
if (!_mi_arena_memid_is_suitable(memid, heap->arena_id)) return false; // don't reclaim between exclusive and non-exclusive arena's
|
if (!_mi_arena_memid_is_suitable(memid, heap->arena_id)) return false; // don't reclaim between exclusive and non-exclusive arena's
|
||||||
|
if (mi_atomic_load_acquire(&page->xheap) != (uintptr_t)heap->tld->subproc) return false;
|
||||||
|
|
||||||
if mi_likely(memid.memkind == MI_MEM_ARENA) {
|
if mi_likely(memid.memkind == MI_MEM_ARENA) {
|
||||||
size_t slice_index;
|
size_t slice_index;
|
||||||
mi_arena_t* arena = mi_page_arena(page, &slice_index, NULL);
|
mi_arena_t* arena = mi_page_arena(page, &slice_index, NULL);
|
||||||
if (arena->subproc != heap->tld->subproc) return false; // only reclaim within the same subprocess
|
//if (arena->subproc != heap->tld->subproc) return false; // only reclaim within the same subprocess
|
||||||
|
|
||||||
// don't reclaim more from a `free` call than half the current segments
|
// don't reclaim more from a `free` call than half the current segments
|
||||||
// this is to prevent a pure free-ing thread to start owning too many segments
|
// this is to prevent a pure free-ing thread to start owning too many segments
|
||||||
|
@ -665,6 +666,11 @@ bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||||
mi_assert_internal(!mi_page_is_abandoned(page));
|
mi_assert_internal(!mi_page_is_abandoned(page));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
if (mi_page_is_abandoned(page)) {
|
||||||
|
mi_assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// A page in OS or external memory
|
// A page in OS or external memory
|
||||||
|
@ -1089,15 +1095,6 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
|
||||||
Abandoned pages
|
|
||||||
----------------------------------------------------------- */
|
|
||||||
|
|
||||||
void mi_arena_page_abandon(mi_page_t* page) {
|
|
||||||
mi_assert_internal(mi_page_is_abandoned(page));
|
|
||||||
if (mi_page_is_full(page)) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
|
|
15
src/free.c
15
src/free.c
|
@ -225,11 +225,10 @@ static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block
|
||||||
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||||
{
|
{
|
||||||
// first see if the page was abandoned and if we can reclaim it into our thread
|
// first see if the page was abandoned and if we can reclaim it into our thread
|
||||||
if (mi_page_is_abandoned(page) &&
|
if (mi_page_is_abandoned(page)) {
|
||||||
(_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 ||
|
if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 ||
|
||||||
mi_page_is_singleton(page) // only one block, and we are free-ing it
|
mi_page_is_singleton(page)) { // only one block, and we are free-ing it
|
||||||
) &&
|
if (mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
|
||||||
mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
|
|
||||||
{
|
{
|
||||||
// the page is abandoned, try to reclaim it into our heap
|
// the page is abandoned, try to reclaim it into our heap
|
||||||
if (_mi_arena_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue
|
if (_mi_arena_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue
|
||||||
|
@ -239,9 +238,15 @@ static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
if (mi_page_is_abandoned(page)) {
|
||||||
|
mi_assert(false);
|
||||||
|
}
|
||||||
mi_assert_internal(!mi_page_is_singleton(page)); // we should have succeeded on singleton pages
|
mi_assert_internal(!mi_page_is_singleton(page)); // we should have succeeded on singleton pages
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// The padding check may access the non-thread-owned page for the key values.
|
// The padding check may access the non-thread-owned page for the key values.
|
||||||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||||
|
|
|
@ -396,8 +396,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
|
||||||
tld->heap_backing = bheap;
|
tld->heap_backing = bheap;
|
||||||
tld->heaps = NULL;
|
tld->heaps = NULL;
|
||||||
tld->subproc = &mi_subproc_default;
|
tld->subproc = &mi_subproc_default;
|
||||||
tld->tseq = 0;
|
tld->tseq = 0; // mi_atomic_add_acq_rel(&mi_tcount, 1);
|
||||||
mi_atomic_add_acq_rel(&mi_tcount, 1);
|
|
||||||
tld->os.stats = &tld->stats;
|
tld->os.stats = &tld->stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
7
src/os.c
7
src/os.c
|
@ -219,11 +219,12 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
||||||
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
|
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
|
||||||
size = _mi_align_up(size, _mi_os_page_size());
|
size = _mi_align_up(size, _mi_os_page_size());
|
||||||
|
|
||||||
const bool use_overalloc = (alignment > mi_os_mem_config.alloc_granularity && alignment <= size/8);
|
// try a direct allocation if the alignment is below the default, or if larger than 1/64 fraction of the size (to avoid waste).
|
||||||
|
const bool try_direct_alloc = (alignment <= mi_os_mem_config.alloc_granularity || alignment > size/64);
|
||||||
|
|
||||||
// try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
|
// try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
|
||||||
void* p = NULL;
|
void* p = NULL;
|
||||||
if (!use_overalloc) {
|
if (try_direct_alloc) {
|
||||||
p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
|
p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,7 +235,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
||||||
else {
|
else {
|
||||||
// if not aligned, free it, overallocate, and unmap around it
|
// if not aligned, free it, overallocate, and unmap around it
|
||||||
#if !MI_TRACK_ASAN
|
#if !MI_TRACK_ASAN
|
||||||
if (!use_overalloc) {
|
if (try_direct_alloc) {
|
||||||
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
|
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -36,7 +36,7 @@ static int ITER = 400;
|
||||||
static int THREADS = 8;
|
static int THREADS = 8;
|
||||||
static int SCALE = 25;
|
static int SCALE = 25;
|
||||||
static int ITER = 20;
|
static int ITER = 20;
|
||||||
#elif defined(xMI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
|
#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
|
||||||
static int THREADS = 8;
|
static int THREADS = 8;
|
||||||
static int SCALE = 10;
|
static int SCALE = 10;
|
||||||
static int ITER = 10;
|
static int ITER = 10;
|
||||||
|
|
Loading…
Add table
Reference in a new issue