merge from dev

This commit is contained in:
daanx 2024-03-03 17:43:52 -08:00
commit abb8eab9b2
4 changed files with 38 additions and 20 deletions

View file

@ -132,7 +132,7 @@ mi_decl_cache_align static const mi_tld_t tld_empty = {
0,
false,
NULL, NULL,
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
{ 0, tld_empty_stats }, // os
{ MI_STATS_NULL } // stats
};
@ -149,7 +149,7 @@ extern mi_heap_t _mi_heap_main;
static mi_tld_t tld_main = {
0, false,
&_mi_heap_main, & _mi_heap_main,
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
{ 0, &tld_main.stats }, // os
{ MI_STATS_NULL } // stats
};
@ -248,7 +248,7 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
is_zero = memid.initially_zero;
}
}
if (td != NULL && !is_zero) {
_mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid));
}
@ -427,23 +427,23 @@ void mi_thread_done(void) mi_attr_noexcept {
_mi_thread_done(NULL);
}
void _mi_thread_done(mi_heap_t* heap)
void _mi_thread_done(mi_heap_t* heap)
{
// calling with NULL implies using the default heap
if (heap == NULL) {
heap = mi_prim_get_default_heap();
if (heap == NULL) {
heap = mi_prim_get_default_heap();
if (heap == NULL) return;
}
// prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
if (!mi_heap_is_initialized(heap)) {
return;
return;
}
// adjust stats
mi_atomic_decrement_relaxed(&thread_count);
_mi_stat_decrease(&_mi_stats_main.threads, 1);
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
if (heap->thread_id != _mi_thread_id()) return;
@ -465,7 +465,7 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
// ensure the default heap is passed to `_mi_thread_done`
// setting to a non-NULL value also ensures `mi_thread_done` is called.
_mi_prim_thread_associate_default_heap(heap);
_mi_prim_thread_associate_default_heap(heap);
}
@ -625,7 +625,7 @@ static void mi_cdecl mi_process_done(void) {
// release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
_mi_prim_thread_done_auto_done();
#ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG || !defined(MI_SHARED_LIB))
// free all memory if possible on process exit. This is not needed for a stand-alone process

View file

@ -380,6 +380,10 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
segment->thread_id = 0;
_mi_segment_map_freed_at(segment);
mi_segments_track_size(-((long)mi_segment_size(segment)),tld);
if (segment->was_reclaimed) {
tld->reclaim_count--;
segment->was_reclaimed = false;
}
if (MI_SECURE>0) {
// _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set
// unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted
@ -1075,7 +1079,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
}
// perform delayed decommits (forcing is much slower on mstress)
// Only abandoned segments in arena memory can be reclaimed without a free
// Only abandoned segments in arena memory can be reclaimed without a free
// so if a segment is not from an arena we force purge here to be conservative.
const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge);
mi_segment_try_purge(segment, force_purge, tld->stats);
@ -1085,6 +1089,10 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
segment->thread_id = 0;
segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
if (segment->was_reclaimed) {
tld->reclaim_count--;
segment->was_reclaimed = false;
}
_mi_arena_segment_mark_abandoned(segment);
}
@ -1171,6 +1179,8 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id());
mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
segment->abandoned_visits = 0;
segment->was_reclaimed = true;
tld->reclaim_count++;
mi_segments_track_size((long)mi_segment_size(segment), tld);
mi_assert_internal(segment->next == NULL);
_mi_stat_decrease(&tld->stats->segments_abandoned, 1);
@ -1229,6 +1239,9 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
// attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`)
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
// don't reclaim more from a free than half the current segments
// this is to prevent a pure free-ing thread to start owning too many segments
if (heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) return false;
if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
mi_assert_internal(res == segment);