mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 22:49:32 +03:00
use atomic yield on delayed-freeing; clarify code
This commit is contained in:
parent
3bbbe6c686
commit
66818bf632
4 changed files with 39 additions and 38 deletions
46
src/heap.c
46
src/heap.c
|
@ -76,9 +76,9 @@ static bool mi_heap_is_valid(mi_heap_t* heap) {
|
|||
----------------------------------------------------------- */
|
||||
|
||||
typedef enum mi_collect_e {
|
||||
NORMAL,
|
||||
FORCE,
|
||||
ABANDON
|
||||
MI_NORMAL,
|
||||
MI_FORCE,
|
||||
MI_ABANDON
|
||||
} mi_collect_t;
|
||||
|
||||
|
||||
|
@ -87,12 +87,13 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
|
|||
UNUSED(heap);
|
||||
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
|
||||
mi_collect_t collect = *((mi_collect_t*)arg_collect);
|
||||
_mi_page_free_collect(page, collect >= ABANDON);
|
||||
_mi_page_free_collect(page, collect >= MI_FORCE);
|
||||
if (mi_page_all_free(page)) {
|
||||
// no more used blocks, free the page. TODO: should we retire here and be less aggressive?
|
||||
_mi_page_free(page, pq, collect != NORMAL);
|
||||
// no more used blocks, free the page.
|
||||
// note: this will free retired pages as well.
|
||||
_mi_page_free(page, pq, collect >= MI_FORCE);
|
||||
}
|
||||
else if (collect == ABANDON) {
|
||||
else if (collect == MI_ABANDON) {
|
||||
// still used blocks but the thread is done; abandon the page
|
||||
_mi_page_abandon(page, pq);
|
||||
}
|
||||
|
@ -111,61 +112,60 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq
|
|||
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||
{
|
||||
if (!mi_heap_is_initialized(heap)) return;
|
||||
_mi_deferred_free(heap, collect > NORMAL);
|
||||
_mi_deferred_free(heap, collect >= MI_FORCE);
|
||||
|
||||
// collect (some) abandoned pages
|
||||
if (collect >= NORMAL && !heap->no_reclaim) {
|
||||
if (collect == NORMAL) {
|
||||
if (collect >= MI_NORMAL && !heap->no_reclaim) {
|
||||
if (collect == MI_NORMAL) {
|
||||
// this may free some segments (but also take ownership of abandoned pages)
|
||||
_mi_segment_try_reclaim_abandoned(heap, false, &heap->tld->segments);
|
||||
}
|
||||
else if (
|
||||
#ifdef NDEBUG
|
||||
collect == FORCE
|
||||
collect == MI_FORCE
|
||||
#else
|
||||
collect >= FORCE
|
||||
collect >= MI_FORCE
|
||||
#endif
|
||||
&& _mi_is_main_thread() && mi_heap_is_backing(heap))
|
||||
{
|
||||
// the main thread is abandoned, try to free all abandoned segments.
|
||||
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
|
||||
// if all memory is freed by now, all segments should be freed.
|
||||
_mi_segment_try_reclaim_abandoned(heap, true, &heap->tld->segments);
|
||||
}
|
||||
}
|
||||
|
||||
// if abandoning, mark all pages to no longer add to delayed_free
|
||||
if (collect == ABANDON) {
|
||||
//for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) {
|
||||
// _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
|
||||
//}
|
||||
if (collect == MI_ABANDON) {
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
|
||||
}
|
||||
|
||||
// free thread delayed blocks.
|
||||
// (if abandoning, after this there are no more local references into the pages.)
|
||||
// (if abandoning, after this there are no more thread-delayed references into the pages.)
|
||||
_mi_heap_delayed_free(heap);
|
||||
|
||||
// collect all pages owned by this thread
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||
mi_assert_internal( collect != ABANDON || mi_atomic_read_ptr(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||
mi_assert_internal( collect != MI_ABANDON || mi_atomic_read_ptr(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||
|
||||
// collect segment caches
|
||||
if (collect >= FORCE) {
|
||||
if (collect >= MI_FORCE) {
|
||||
_mi_segment_thread_collect(&heap->tld->segments);
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
// collect regions
|
||||
if (collect >= FORCE && _mi_is_main_thread()) {
|
||||
if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
||||
_mi_mem_collect(&heap->tld->os);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
||||
mi_heap_collect_ex(heap, ABANDON);
|
||||
mi_heap_collect_ex(heap, MI_ABANDON);
|
||||
}
|
||||
|
||||
void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
|
||||
mi_heap_collect_ex(heap, (force ? FORCE : NORMAL));
|
||||
mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
|
||||
}
|
||||
|
||||
void mi_collect(bool force) mi_attr_noexcept {
|
||||
|
|
|
@ -126,12 +126,12 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
|
|||
mi_thread_free_t tfreex;
|
||||
mi_delayed_t old_delay;
|
||||
do {
|
||||
tfree = mi_atomic_read(&page->xthread_free);
|
||||
tfree = mi_atomic_read(&page->xthread_free); // note: must acquire as we can break this loop and not do a CAS
|
||||
tfreex = mi_tf_set_delayed(tfree, delay);
|
||||
old_delay = mi_tf_delayed(tfree);
|
||||
if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) {
|
||||
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
|
||||
tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
|
||||
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
|
||||
}
|
||||
else if (delay == old_delay) {
|
||||
break; // avoid atomic operation if already equal
|
||||
|
@ -139,7 +139,8 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
|
|||
else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) {
|
||||
break; // leave never-delayed flag set
|
||||
}
|
||||
} while (!mi_atomic_cas_weak(&page->xthread_free, tfreex, tfree));
|
||||
} while ((old_delay == MI_DELAYED_FREEING) ||
|
||||
!mi_atomic_cas_weak(&page->xthread_free, tfreex, tfree));
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
|
|
@ -824,18 +824,18 @@ static void mi_segments_prepend_abandoned(mi_segment_t* first) {
|
|||
// first try if the abandoned list happens to be NULL
|
||||
if (mi_atomic_cas_ptr_weak(mi_segment_t, &abandoned, first, NULL)) return;
|
||||
|
||||
// if not, find the end of the list
|
||||
// if not, find the end of the argument list
|
||||
mi_segment_t* last = first;
|
||||
while (last->abandoned_next != NULL) {
|
||||
last = last->abandoned_next;
|
||||
}
|
||||
|
||||
// and atomically prepend
|
||||
mi_segment_t* next;
|
||||
mi_segment_t* anext;
|
||||
do {
|
||||
next = mi_atomic_read_ptr_relaxed(mi_segment_t,&abandoned);
|
||||
last->abandoned_next = next;
|
||||
} while (!mi_atomic_cas_ptr_weak(mi_segment_t, &abandoned, first, next));
|
||||
anext = mi_atomic_read_ptr_relaxed(mi_segment_t,&abandoned);
|
||||
last->abandoned_next = anext;
|
||||
} while (!mi_atomic_cas_ptr_weak(mi_segment_t, &abandoned, first, anext));
|
||||
}
|
||||
|
||||
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
|
@ -897,14 +897,14 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
atmost--;
|
||||
}
|
||||
// split the list and push back the remaining segments
|
||||
mi_segment_t* next = last->abandoned_next;
|
||||
mi_segment_t* anext = last->abandoned_next;
|
||||
last->abandoned_next = NULL;
|
||||
mi_segments_prepend_abandoned(next);
|
||||
mi_segments_prepend_abandoned(anext);
|
||||
}
|
||||
|
||||
// reclaim all segments that we kept
|
||||
while(segment != NULL) {
|
||||
mi_segment_t* const next = segment->abandoned_next; // save the next segment
|
||||
mi_segment_t* const anext = segment->abandoned_next; // save the next segment
|
||||
|
||||
// got it.
|
||||
mi_atomic_decrement(&abandoned_count);
|
||||
|
@ -943,7 +943,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
}
|
||||
}
|
||||
mi_assert(segment->abandoned == 0);
|
||||
if (segment->used == 0) { // due to page_clear
|
||||
if (segment->used == 0) { // due to page_clear's
|
||||
mi_segment_free(segment,false,tld);
|
||||
}
|
||||
else {
|
||||
|
@ -954,7 +954,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
}
|
||||
|
||||
// go on
|
||||
segment = next;
|
||||
segment = anext;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -277,12 +277,12 @@ static void run_os_threads(size_t nthreads) {
|
|||
#ifdef __cplusplus
|
||||
#include <atomic>
|
||||
static void* atomic_exchange_ptr(volatile void** p, void* newval) {
|
||||
return std::atomic_exchange_explicit((volatile std::atomic<void*>*)p, newval, std::memory_order_acquire);
|
||||
return std::atomic_exchange((volatile std::atomic<void*>*)p, newval);
|
||||
}
|
||||
#else
|
||||
#include <stdatomic.h>
|
||||
static void* atomic_exchange_ptr(volatile void** p, void* newval) {
|
||||
return atomic_exchange_explicit((volatile _Atomic(void*)*)p, newval, memory_order_acquire);
|
||||
return atomic_exchange((volatile _Atomic(void*)*)p, newval);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue