mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 22:49:32 +03:00
collect arenas even if not on the main thread (issue #878)
This commit is contained in:
parent
bb1fafa1bb
commit
70eb7fb390
2 changed files with 10 additions and 6 deletions
|
@ -709,7 +709,7 @@ static void mi_arenas_unsafe_destroy(void) {
|
||||||
|
|
||||||
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
|
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
|
||||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
|
void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
|
||||||
mi_arenas_try_purge(force_purge, true /* visit all */, stats);
|
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
|
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
|
||||||
|
|
14
src/heap.c
14
src/heap.c
|
@ -121,6 +121,8 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||||
_mi_deferred_free(heap, collect >= MI_FORCE);
|
_mi_deferred_free(heap, collect >= MI_FORCE);
|
||||||
|
|
||||||
|
const bool force = (collect >= MI_FORCE);
|
||||||
|
|
||||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||||
if (
|
if (
|
||||||
#ifdef NDEBUG
|
#ifdef NDEBUG
|
||||||
|
@ -145,22 +147,24 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
_mi_heap_delayed_free_all(heap);
|
_mi_heap_delayed_free_all(heap);
|
||||||
|
|
||||||
// collect retired pages
|
// collect retired pages
|
||||||
_mi_heap_collect_retired(heap, collect >= MI_FORCE);
|
_mi_heap_collect_retired(heap, force);
|
||||||
|
|
||||||
// collect all pages owned by this thread
|
// collect all pages owned by this thread
|
||||||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||||
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||||
|
|
||||||
// collect segment and thread caches
|
// collect segment and thread caches
|
||||||
if (collect >= MI_FORCE) {
|
if (force) {
|
||||||
_mi_segment_thread_collect(&heap->tld->segments);
|
_mi_segment_thread_collect(&heap->tld->segments);
|
||||||
}
|
}
|
||||||
|
|
||||||
// collect arenas on program-exit (or shared library unload)
|
// if forced, collect thread data cache on program-exit (or shared library unload)
|
||||||
if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
||||||
_mi_thread_data_collect(); // collect thread data cache
|
_mi_thread_data_collect(); // collect thread data cache
|
||||||
_mi_arena_collect(true /* force purge */, &heap->tld->stats);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// collect arenas
|
||||||
|
_mi_arena_collect(force /* force purge? */, &heap->tld->stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue