mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-01 17:24:38 +03:00
add segment_collect for forced heap_collect
This commit is contained in:
parent
32e065bb32
commit
2b7530e183
3 changed files with 8 additions and 13 deletions
|
@ -155,7 +155,7 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi
|
||||||
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void _mi_segment_collect(bool force, mi_segments_tld_t* tld);
|
void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld);
|
||||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||||
void _mi_abandoned_await_readers(void);
|
void _mi_abandoned_await_readers(void);
|
||||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
|
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
|
||||||
|
|
|
@ -104,6 +104,10 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
|
||||||
// still used blocks but the thread is done; abandon the page
|
// still used blocks but the thread is done; abandon the page
|
||||||
_mi_page_abandon(page, pq);
|
_mi_page_abandon(page, pq);
|
||||||
}
|
}
|
||||||
|
if (collect == MI_FORCE) {
|
||||||
|
mi_segment_t* segment = _mi_page_segment(page);
|
||||||
|
_mi_segment_collect(segment, true /* force? */, &heap->tld->segments);
|
||||||
|
}
|
||||||
return true; // don't break
|
return true; // don't break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,9 +157,6 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||||
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||||
|
|
||||||
// collect segment and thread caches
|
|
||||||
_mi_segment_collect(force, &heap->tld->segments);
|
|
||||||
|
|
||||||
// if forced, collect thread data cache on program-exit (or shared library unload)
|
// if forced, collect thread data cache on program-exit (or shared library unload)
|
||||||
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
||||||
_mi_thread_data_collect(); // collect thread data cache
|
_mi_thread_data_collect(); // collect thread data cache
|
||||||
|
|
|
@ -515,15 +515,9 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
||||||
_mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats);
|
_mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
// called by threads that are terminating to free cached segments
|
// called from `heap_collect`. This can be called per-page.
|
||||||
void _mi_segment_collect(bool force, mi_segments_tld_t* tld) {
|
void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
|
||||||
MI_UNUSED(force); MI_UNUSED(tld);
|
MI_UNUSED(segment); MI_UNUSED(force); MI_UNUSED(tld);
|
||||||
#if MI_DEBUG>=2
|
|
||||||
if (!_mi_is_main_thread()) {
|
|
||||||
mi_assert_internal(tld->pages_purge.first == NULL);
|
|
||||||
mi_assert_internal(tld->pages_purge.last == NULL);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue