mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
revise free reclaim; ensure unown cannot race with a free
This commit is contained in:
parent
833b091ff9
commit
666c089fc8
10 changed files with 281 additions and 79 deletions
|
@ -144,6 +144,7 @@ mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_
|
|||
void _mi_arena_page_free(mi_page_t* page);
|
||||
void _mi_arena_page_abandon(mi_page_t* page);
|
||||
void _mi_arena_page_unabandon(mi_page_t* page);
|
||||
bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page);
|
||||
|
||||
bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page);
|
||||
void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap);
|
||||
|
@ -572,29 +573,6 @@ static inline bool mi_page_is_owned(const mi_page_t* page) {
|
|||
return mi_tf_is_owned(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free));
|
||||
}
|
||||
|
||||
// Unown a page that is currently owned
|
||||
static inline void _mi_page_unown(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_thread_id(page)==0);
|
||||
const uintptr_t old = mi_atomic_and_acq_rel(&page->xthread_free, ~((uintptr_t)1));
|
||||
mi_assert_internal((old&1)==1); MI_UNUSED(old);
|
||||
/*
|
||||
mi_thread_free_t tf_new;
|
||||
mi_thread_free_t tf_old;
|
||||
do {
|
||||
tf_old = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
mi_assert_internal(mi_tf_is_owned(tf_old));
|
||||
tf_new = mi_tf_create(mi_tf_block(tf_old), false);
|
||||
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tf_old, tf_new));
|
||||
*/
|
||||
}
|
||||
|
||||
// get ownership if it is not yet owned
|
||||
static inline bool mi_page_try_claim_ownership(mi_page_t* page) {
|
||||
const uintptr_t old = mi_atomic_or_acq_rel(&page->xthread_free, 1);
|
||||
return ((old&1)==0);
|
||||
}
|
||||
|
||||
|
||||
//static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
|
||||
// return mi_tf_make(mi_tf_block(tf),delayed);
|
||||
|
@ -638,7 +616,7 @@ static inline bool mi_page_is_full(mi_page_t* page) {
|
|||
}
|
||||
|
||||
// is more than 7/8th of a page in use?
|
||||
static inline bool mi_page_mostly_used(const mi_page_t* page) {
|
||||
static inline bool mi_page_is_mostly_used(const mi_page_t* page) {
|
||||
if (page==NULL) return true;
|
||||
uint16_t frac = page->reserved / 8U;
|
||||
return (page->reserved - page->used <= frac);
|
||||
|
@ -646,9 +624,22 @@ static inline bool mi_page_mostly_used(const mi_page_t* page) {
|
|||
|
||||
static inline bool mi_page_is_abandoned(const mi_page_t* page) {
|
||||
// note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free)
|
||||
return (mi_atomic_load_acquire(&page->xthread_id) == 0);
|
||||
return (mi_atomic_load_acquire(&page->xthread_id) <= 1);
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_abandoned_mapped(const mi_page_t* page) {
|
||||
return (mi_atomic_load_acquire(&page->xthread_id) == 1);
|
||||
}
|
||||
|
||||
static inline void mi_page_set_abandoned_mapped(mi_page_t* page) {
|
||||
mi_atomic_or_acq_rel(&page->xthread_id, (uintptr_t)1);
|
||||
}
|
||||
|
||||
static inline void mi_page_clear_abandoned_mapped(mi_page_t* page) {
|
||||
mi_atomic_and_acq_rel(&page->xthread_id, ~(uintptr_t)1);
|
||||
}
|
||||
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
return (page->block_size > MI_LARGE_MAX_OBJ_SIZE || (mi_memkind_is_os(page->memid.memkind) && page->memid.mem.os.alignment > MI_PAGE_MAX_OVERALLOC_ALIGN));
|
||||
}
|
||||
|
@ -659,6 +650,51 @@ static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size)
|
|||
}
|
||||
|
||||
|
||||
// Unown a page that is currently owned
|
||||
static inline void _mi_page_unown_unconditional(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_thread_id(page)==0);
|
||||
const uintptr_t old = mi_atomic_and_acq_rel(&page->xthread_free, ~((uintptr_t)1));
|
||||
mi_assert_internal((old&1)==1); MI_UNUSED(old);
|
||||
/*
|
||||
mi_thread_free_t tf_new;
|
||||
mi_thread_free_t tf_old;
|
||||
do {
|
||||
tf_old = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
mi_assert_internal(mi_tf_is_owned(tf_old));
|
||||
tf_new = mi_tf_create(mi_tf_block(tf_old), false);
|
||||
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tf_old, tf_new));
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
// get ownership if it is not yet owned
|
||||
static inline bool mi_page_try_claim_ownership(mi_page_t* page) {
|
||||
const uintptr_t old = mi_atomic_or_acq_rel(&page->xthread_free, 1);
|
||||
return ((old&1)==0);
|
||||
}
|
||||
|
||||
static inline void _mi_page_unown(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_assert_internal(mi_page_thread_id(page)==0);
|
||||
mi_thread_free_t tf_new;
|
||||
mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
do {
|
||||
mi_assert_internal(mi_tf_is_owned(tf_old));
|
||||
while mi_unlikely(mi_tf_block(tf_old) != NULL) {
|
||||
_mi_page_free_collect(page, false); // update used
|
||||
if (mi_page_all_free(page)) { // it may become free just before unowning it
|
||||
_mi_arena_page_unabandon(page);
|
||||
_mi_arena_page_free(page);
|
||||
return;
|
||||
}
|
||||
tf_old = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
}
|
||||
mi_assert_internal(mi_tf_block(tf_old)==NULL);
|
||||
tf_new = mi_tf_create(NULL, false);
|
||||
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tf_old, tf_new));
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------
|
||||
// Page flags
|
||||
|
|
|
@ -505,6 +505,10 @@ typedef struct mi_stats_s {
|
|||
mi_stat_count_t giant;
|
||||
mi_stat_count_t malloc;
|
||||
mi_stat_counter_t pages_extended;
|
||||
mi_stat_counter_t pages_reclaim_on_alloc;
|
||||
mi_stat_counter_t pages_reclaim_on_free;
|
||||
mi_stat_counter_t pages_reabandon_full;
|
||||
mi_stat_counter_t pages_unabandon_busy_wait;
|
||||
mi_stat_counter_t mmap_calls;
|
||||
mi_stat_counter_t commit_calls;
|
||||
mi_stat_counter_t reset_calls;
|
||||
|
|
33
src/arena.c
33
src/arena.c
|
@ -465,6 +465,9 @@ static mi_page_t* mi_arena_page_try_find_abandoned(size_t slice_count, size_t bl
|
|||
// we got ownership, clear the abandoned entry (unblocking busy waiters)
|
||||
mi_pairmap_clear(pairmap, slice_index);
|
||||
mi_atomic_decrement_relaxed(&subproc->abandoned_count[bin]);
|
||||
_mi_stat_decrease(&_mi_stats_main.pages_abandoned, 1);
|
||||
_mi_stat_counter_increase(&_mi_stats_main.pages_reclaim_on_alloc, 1);
|
||||
|
||||
_mi_page_free_collect(page, false); // update `used` count
|
||||
mi_assert_internal(mi_bitmap_is_clearN(&arena->slices_free, slice_index, slice_count));
|
||||
mi_assert_internal(mi_bitmap_is_setN(&arena->slices_committed, slice_index, slice_count));
|
||||
|
@ -646,7 +649,7 @@ void _mi_arena_page_free(mi_page_t* page) {
|
|||
Arena abandon
|
||||
----------------------------------------------------------- */
|
||||
|
||||
void _mi_arena_page_abandon(mi_page_t* page) {
|
||||
static void mi_arena_page_abandon_no_stat(mi_page_t* page) {
|
||||
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
||||
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
|
@ -667,6 +670,7 @@ void _mi_arena_page_abandon(mi_page_t* page) {
|
|||
mi_assert_internal(mi_bitmap_is_clearN(&arena->slices_purge, slice_index, slice_count));
|
||||
// mi_assert_internal(mi_bitmap_is_setN(&arena->slices_dirty, slice_index, slice_count));
|
||||
|
||||
mi_page_set_abandoned_mapped(page);
|
||||
bool were_zero = mi_pairmap_set(&arena->pages_abandoned[bin], slice_index);
|
||||
MI_UNUSED(were_zero); mi_assert_internal(were_zero);
|
||||
mi_atomic_increment_relaxed(&subproc->abandoned_count[bin]);
|
||||
|
@ -676,9 +680,32 @@ void _mi_arena_page_abandon(mi_page_t* page) {
|
|||
// leave as is; it will be reclaimed when an object is free'd in the page
|
||||
}
|
||||
_mi_page_unown(page);
|
||||
}
|
||||
|
||||
void _mi_arena_page_abandon(mi_page_t* page) {
|
||||
mi_arena_page_abandon_no_stat(page);
|
||||
_mi_stat_increase(&_mi_stats_main.pages_abandoned, 1);
|
||||
}
|
||||
|
||||
bool _mi_arena_page_try_reabandon_to_mapped(mi_page_t* page) {
|
||||
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
||||
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_assert_internal(!mi_page_is_abandoned_mapped(page));
|
||||
mi_assert_internal(!mi_page_is_full(page));
|
||||
mi_assert_internal(!mi_page_all_free(page));
|
||||
mi_assert_internal(!mi_page_is_singleton(page));
|
||||
if (mi_page_is_full(page) || mi_page_is_abandoned_mapped(page) || page->memid.memkind != MI_MEM_ARENA) {
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
_mi_stat_counter_increase(&_mi_stats_main.pages_reabandon_full, 1);
|
||||
mi_arena_page_abandon_no_stat(page);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// called from `mi_free` if trying to unabandon an abandoned page
|
||||
void _mi_arena_page_unabandon(mi_page_t* page) {
|
||||
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
||||
|
@ -686,7 +713,8 @@ void _mi_arena_page_unabandon(mi_page_t* page) {
|
|||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
|
||||
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
|
||||
if (mi_page_is_abandoned_mapped(page)) {
|
||||
mi_assert_internal(page->memid.memkind==MI_MEM_ARENA);
|
||||
// remove from the abandoned map
|
||||
size_t bin = _mi_bin(mi_page_block_size(page));
|
||||
size_t slice_index;
|
||||
|
@ -699,6 +727,7 @@ void _mi_arena_page_unabandon(mi_page_t* page) {
|
|||
|
||||
// this busy waits until a concurrent reader (from alloc_abandoned) is done
|
||||
mi_pairmap_clear_while_not_busy(&arena->pages_abandoned[bin], slice_index);
|
||||
mi_page_clear_abandoned_mapped(page);
|
||||
mi_atomic_decrement_relaxed(&page->subproc->abandoned_count[bin]);
|
||||
}
|
||||
else {
|
||||
|
|
14
src/bitmap.c
14
src/bitmap.c
|
@ -80,7 +80,7 @@ static inline bool mi_bfield_atomic_set2(_Atomic(mi_bfield_t)*b, size_t idx, boo
|
|||
mi_assert_internal(idx < MI_BFIELD_BITS-1);
|
||||
const size_t mask = (mi_bfield_t)0x03 << idx;
|
||||
mi_bfield_t old = mi_atomic_load_relaxed(b);
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old|mask)); // try to atomically set the mask bits until success
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old|mask)) { }; // try to atomically set the mask bits until success
|
||||
if (all_already_set!=NULL) { *all_already_set = ((old&mask)==mask); }
|
||||
return ((old&mask) == 0);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ static inline bool mi_bfield_atomic_clear2(_Atomic(mi_bfield_t)*b, size_t idx, b
|
|||
mi_assert_internal(idx < MI_BFIELD_BITS-1);
|
||||
const size_t mask = (mi_bfield_t)0x03 << idx;
|
||||
mi_bfield_t old = mi_atomic_load_relaxed(b);
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old&~mask)); // try to atomically clear the mask bits until success
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old&~mask)) { }; // try to atomically clear the mask bits until success
|
||||
if (all_already_clear!=NULL) { *all_already_clear = ((old&mask) == 0); }
|
||||
return ((old&mask) == mask);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ static inline bool mi_bfield_atomic_xset2(mi_bit_t set, _Atomic(mi_bfield_t)*b,
|
|||
static inline bool mi_bfield_atomic_set_mask(_Atomic(mi_bfield_t)*b, mi_bfield_t mask, size_t* already_set) {
|
||||
mi_assert_internal(mask != 0);
|
||||
mi_bfield_t old = mi_atomic_load_relaxed(b);
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old|mask)); // try to atomically set the mask bits until success
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old|mask)) { }; // try to atomically set the mask bits until success
|
||||
if (already_set!=NULL) { *already_set = mi_bfield_popcount(old&mask); }
|
||||
return ((old&mask) == 0);
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ static inline bool mi_bfield_atomic_set_mask(_Atomic(mi_bfield_t)*b, mi_bfield_t
|
|||
static inline bool mi_bfield_atomic_clear_mask(_Atomic(mi_bfield_t)*b, mi_bfield_t mask, size_t* already_clear) {
|
||||
mi_assert_internal(mask != 0);
|
||||
mi_bfield_t old = mi_atomic_load_relaxed(b);
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old&~mask)); // try to atomically clear the mask bits until success
|
||||
while (!mi_atomic_cas_weak_acq_rel(b, &old, old&~mask)) { }; // try to atomically clear the mask bits until success
|
||||
if (already_clear!=NULL) { *already_clear = mi_bfield_popcount(~(old&mask)); }
|
||||
return ((old&mask) == mask);
|
||||
}
|
||||
|
@ -1115,12 +1115,14 @@ static inline bool mi_bfield_atomic_clear_while_not_busy(_Atomic(mi_bfield_t)*b,
|
|||
mi_assert_internal(idx < MI_BFIELD_BITS-1);
|
||||
const mi_bfield_t mask = ((mi_bfield_t)0x03 << idx);
|
||||
const mi_bfield_t mask_busy = ((mi_bfield_t)MI_PAIR_BUSY << idx);
|
||||
mi_bfield_t old;
|
||||
mi_bfield_t bnew;
|
||||
mi_bfield_t old = mi_atomic_load_relaxed(b);
|
||||
do {
|
||||
old = mi_atomic_load_relaxed(b);
|
||||
if mi_unlikely((old&mask)==mask_busy) {
|
||||
old = mi_atomic_load_acquire(b);
|
||||
if ((old&mask)==mask_busy) {
|
||||
_mi_stat_counter_increase(&_mi_stats_main.pages_unabandon_busy_wait, 1);
|
||||
}
|
||||
while ((old&mask)==mask_busy) { // busy wait
|
||||
mi_atomic_yield();
|
||||
old = mi_atomic_load_acquire(b);
|
||||
|
|
138
src/free.c
138
src/free.c
|
@ -156,23 +156,131 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_thread_id(page)==0);
|
||||
|
||||
#if 1
|
||||
// we own the page now..
|
||||
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page); // this must be before collect
|
||||
|
||||
// collect the thread atomic free list
|
||||
// safe to collect the thread atomic free list
|
||||
_mi_page_free_collect(page, false); // update `used` count
|
||||
#if MI_DEBUG > 1
|
||||
if (mi_page_is_singleton(page)) { mi_assert_internal(mi_page_all_free(page)); }
|
||||
#endif
|
||||
|
||||
if (mi_page_all_free(page)) {
|
||||
// 1. free if the page is free now
|
||||
if (mi_page_all_free(page))
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
|
||||
_mi_arena_page_unabandon(page);
|
||||
// we can free the page directly
|
||||
_mi_arena_page_free(page);
|
||||
return;
|
||||
}
|
||||
// 2. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
|
||||
else if (!mi_page_is_mostly_used(page) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
|
||||
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
|
||||
_mi_arena_page_try_reabandon_to_mapped(page))
|
||||
{
|
||||
return;
|
||||
}
|
||||
// 3. if the page is not too full, we can try to reclaim it for ourselves
|
||||
else if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 &&
|
||||
!mi_page_is_mostly_used(page))
|
||||
{
|
||||
// the page has still some blocks in use (but not too many)
|
||||
// reclaim in our heap if compatible, or otherwise abandon again
|
||||
// todo: optimize this check further?
|
||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
||||
mi_heap_t* const heap = mi_prim_get_default_heap();
|
||||
if (heap != (mi_heap_t*)&_mi_heap_empty) // we did not already terminate our thread (can this happen?
|
||||
{
|
||||
mi_heap_t* const tagheap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||
if ((tagheap != NULL) && // don't reclaim across heap object types
|
||||
(page->subproc == tagheap->tld->subproc) && // don't reclaim across sub-processes; todo: make this check faster (integrate with _mi_heap_by_tag ? )
|
||||
(_mi_arena_memid_is_suitable(page->memid, tagheap->arena_id)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
||||
)
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page);
|
||||
_mi_heap_page_reclaim(tagheap, page);
|
||||
_mi_stat_counter_increase(&_mi_stats_main.pages_reclaim_on_free, 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// not reclaimed or free'd, unown again
|
||||
_mi_page_unown(page);
|
||||
|
||||
#else
|
||||
if (!mi_page_is_abandoned_mapped(page)) {
|
||||
// singleton or OS allocated
|
||||
if (mi_page_is_singleton(page)) {
|
||||
// free singleton pages
|
||||
#if MI_DEBUG>1
|
||||
_mi_page_free_collect(page, false); // update `used` count
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
#endif
|
||||
// we can free the page directly
|
||||
_mi_arena_page_free(page);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
// the page has still some blocks in use
|
||||
const bool was_full = mi_page_is_full(page);
|
||||
_mi_page_free_collect(page,false); // update used
|
||||
if (mi_page_all_free(page)) {
|
||||
// no need to unabandon as it is unmapped
|
||||
_mi_arena_page_free(page);
|
||||
return;
|
||||
}
|
||||
else if (was_full && _mi_arena_page_reabandon_full(page)) {
|
||||
return;
|
||||
}
|
||||
else if (!mi_page_is_mostly_used(page) && _mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0) {
|
||||
// the page has still some blocks in use (but not too many)
|
||||
// reclaim in our heap if compatible, or otherwise abandon again
|
||||
// todo: optimize this check further?
|
||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
||||
mi_heap_t* const heap = mi_prim_get_default_heap();
|
||||
if (heap != (mi_heap_t*)&_mi_heap_empty) { // we did not already terminate our thread (can this happen?
|
||||
mi_heap_t* const tagheap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||
if ((tagheap != NULL) && // don't reclaim across heap object types
|
||||
(page->subproc == tagheap->tld->subproc) && // don't reclaim across sub-processes; todo: make this check faster (integrate with _mi_heap_by_tag ? )
|
||||
(_mi_arena_memid_is_suitable(page->memid, tagheap->arena_id)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
||||
)
|
||||
{
|
||||
_mi_stat_counter_increase(&_mi_stats_main.pages_reclaim_on_free, 1);
|
||||
// make it part of our heap (no need to unabandon as is unmapped)
|
||||
_mi_heap_page_reclaim(tagheap, page);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// don't reclaim pages that can be found for fresh page allocations
|
||||
}
|
||||
|
||||
// not reclaimed or free'd, unown again
|
||||
_mi_page_unown(page);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
// we own the page now..
|
||||
// safe to collect the thread atomic free list
|
||||
_mi_page_free_collect(page, false); // update `used` count
|
||||
if (mi_page_is_singleton(page)) { mi_assert_internal(mi_page_all_free(page)); }
|
||||
|
||||
if (mi_page_all_free(page)) {
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page); // this must be before free'ing
|
||||
// we can free the page directly
|
||||
_mi_arena_page_free(page);
|
||||
return;
|
||||
}
|
||||
else if (!mi_page_is_mostly_used(page)) {
|
||||
// the page has still some blocks in use (but not too many)
|
||||
// reclaim in our heap if compatible, or otherwise abandon again
|
||||
// todo: optimize this check further?
|
||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||
|
@ -188,17 +296,23 @@ static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
|
|||
(_mi_arena_memid_is_suitable(page->memid, tagheap->arena_id)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
||||
)
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page);
|
||||
_mi_stat_counter_increase(&_mi_stats_main.pages_reclaim_on_free, 1);
|
||||
// make it part of our heap
|
||||
_mi_heap_page_reclaim(tagheap, page);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// we cannot reclaim this page.. abandon it again
|
||||
_mi_arena_page_abandon(page);
|
||||
}
|
||||
}
|
||||
|
||||
// we cannot reclaim this page.. leave it abandoned
|
||||
// todo: should re-abandon or otherwise a partly used page could never be re-used if the
|
||||
// objects in it are not freed explicitly.
|
||||
_mi_page_unown(page);
|
||||
*/
|
||||
|
||||
|
||||
// Push a block that is owned by another thread (or abandoned) on its page-local thread free list.
|
||||
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||
{
|
||||
|
|
|
@ -83,7 +83,7 @@ const mi_page_t _mi_page_empty = {
|
|||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 } \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
MI_STAT_COUNT_END_NULL()
|
||||
|
||||
// --------------------------------------------------------
|
||||
|
|
|
@ -143,7 +143,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
|
||||
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
|
||||
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
|
||||
{ 0, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
|
||||
{ MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
|
||||
#if defined(MI_VISIT_ABANDONED)
|
||||
|
|
|
@ -811,7 +811,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
_mi_page_free(page_candidate, pq);
|
||||
page_candidate = page;
|
||||
}
|
||||
else if (page->used >= page_candidate->used) {
|
||||
else if (page->used >= page_candidate->used && !mi_page_is_mostly_used(page)) {
|
||||
page_candidate = page;
|
||||
}
|
||||
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
|
||||
|
|
|
@ -331,6 +331,10 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
|
|||
mi_stat_print_ex(&stats->page_committed, "touched", 1, out, arg, "");
|
||||
mi_stat_print_ex(&stats->pages, "pages", -1, out, arg, "");
|
||||
mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg);
|
||||
mi_stat_counter_print(&stats->pages_reclaim_on_alloc, "-reclaima", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_reclaim_on_free, "-reclaimf", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_reabandon_full, "-reabandon", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_unabandon_busy_wait, "-waits", out, arg);
|
||||
mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg);
|
||||
mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg);
|
||||
mi_stat_counter_print(&stats->arena_count, "arenas", out, arg);
|
||||
|
|
|
@ -43,7 +43,13 @@ static int ITER = 10;
|
|||
#elif 0
|
||||
static int THREADS = 4;
|
||||
static int SCALE = 100;
|
||||
static int ITER = 10;
|
||||
#define ALLOW_LARGE false
|
||||
#elif 1
|
||||
static int THREADS = 32;
|
||||
static int SCALE = 50;
|
||||
static int ITER = 50;
|
||||
#define ALLOW_LARGE false
|
||||
#else
|
||||
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
||||
static int SCALE = 50; // scaling factor
|
||||
|
@ -54,7 +60,12 @@ static int ITER = 50; // N full iterations destructing and re-creating a
|
|||
|
||||
#define STRESS // undefine for leak test
|
||||
|
||||
static bool allow_large_objects = false; // allow very large objects? (set to `true` if SCALE>100)
|
||||
#ifndef ALLOW_LARGE
|
||||
#define ALLOW_LARGE true
|
||||
#endif
|
||||
|
||||
static bool allow_large_objects = ALLOW_LARGE; // allow very large objects? (set to `true` if SCALE>100)
|
||||
|
||||
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
|
||||
|
||||
static bool main_participates = false; // main thread participates as a worker too
|
||||
|
@ -332,6 +343,8 @@ int main(int argc, char** argv) {
|
|||
mi_debug_show_arenas(true,true,false);
|
||||
#endif
|
||||
// mi_stats_print(NULL);
|
||||
#else
|
||||
mi_stats_print(NULL); // so we see rss/commit/elapsed
|
||||
#endif
|
||||
//bench_end_program();
|
||||
return 0;
|
||||
|
|
Loading…
Add table
Reference in a new issue