merge from dev-steal

This commit is contained in:
daanx 2024-11-18 15:08:17 -08:00
commit ea3ac0750e
5 changed files with 24 additions and 21 deletions

View file

@ -213,9 +213,7 @@
</ClCompile>
<ClCompile Include="..\..\src\alloc-posix.c" />
<ClCompile Include="..\..\src\alloc.c" />
<ClCompile Include="..\..\src\arena-abandoned.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
<ClCompile Include="..\..\src\arena-abandon.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
</ClCompile>

View file

@ -61,7 +61,7 @@
<ClCompile Include="..\..\src\free.c">
<Filter>Sources</Filter>
</ClCompile>
<ClCompile Include="..\..\src\arena-abandoned.c">
<ClCompile Include="..\..\src\arena-abandon.c">
<Filter>Sources</Filter>
</ClCompile>
</ItemGroup>

View file

@ -666,9 +666,9 @@ static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
}
else {
// failed size criteria, rewind count (but don't write to an empty heap)
if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
return false;
}
}
}
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;

View file

@ -358,7 +358,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_page_set_in_full(page, false); // to get the right queue
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
mi_page_set_in_full(page, true);
mi_page_queue_enqueue_from_full(pq, pqfull, page);
mi_page_queue_enqueue_from_full(pq, pqfull, page);
}
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
@ -404,16 +404,19 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
_mi_segment_page_abandon(page,segments_tld);
}
// force abandon a page; this is safe to call
// force abandon a page
void _mi_page_force_abandon(mi_page_t* page) {
mi_heap_t* heap = mi_page_heap(page);
// mark page as not using delayed free
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
// ensure this page is no longer in the heap delayed free list
_mi_heap_delayed_free_all(heap);
if (page->block_size == 0) return; // it may have been freed now
// TODO: can we still access the page as it may have been
// freed and the memory decommitted?
// A way around this is to explicitly unlink this page from
// the heap delayed free list.
if (page->capacity == 0) return; // it may have been freed now
// and now unlink it from the page queue and abandon (or free)
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
@ -767,7 +770,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
#if defined(MI_MAX_CANDIDATE_SEARCH)
// search up to N pages for a best candidate
// is the local free list non-empty?
const bool immediate_available = mi_page_immediate_available(page);
@ -782,9 +785,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
if (page_candidate == NULL) {
page_candidate = page;
candidate_count = 0;
candidate_count = 0;
}
else if (!mi_page_is_expandable(page) && page->capacity < page_candidate->capacity) {
else if (!mi_page_is_expandable(page) && page->used >= page_candidate->used) {
page_candidate = page;
}
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
@ -829,7 +832,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
}
}
else {
mi_assert(pq->first == page);
// mi_assert(pq->first == page);
page->retire_expire = 0;
}
mi_assert_internal(page == NULL || mi_page_immediate_available(page));

View file

@ -1263,6 +1263,7 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
// don't reclaim more from a `free` call than half the current segments
// this is to prevent a pure free-ing thread to start owning too many segments
// (but not for out-of-arena segments as that is the main way to be reclaimed for those)
@ -1334,7 +1335,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
break;
}
else if (segment->abandoned_visits > 3 && is_suitable && !mi_option_is_enabled(mi_option_target_segments_per_thread)) {
// always reclaim on 3rd visit to limit the abandoned queue length.
// always reclaim on 3rd visit to limit the abandoned segment count.
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
else {
@ -1395,14 +1396,14 @@ static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* t
mi_assert_internal(segment->used > 0);
if (segment->used == segment->abandoned+1) {
// the last page.. abandon and return as the segment will be abandoned after this
// and we should no longer access it.
// and we should no longer access it.
_mi_page_force_abandon(page);
return;
}
else {
// abandon and continue
_mi_page_force_abandon(page);
// it might be freed, reset the slice (note: relies on coalesce setting the slice_offset)
// it might be freed, reset the slice (note: relies on coalesce setting the slice_offset)
slice = mi_slice_first(slice);
}
}
@ -1422,14 +1423,14 @@ static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* t
}
// try abandon segments.
// try abandon segments.
// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread,0,1024);
// we call this when we are about to add a fresh segment so we should be under our target segment count.
if (target == 0 || tld->count < target) return;
const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
// todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages
for (int i = 0; i < 16 && tld->count >= min_target; i++) {
@ -1442,10 +1443,11 @@ static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
}
mi_segment_t* segment = _mi_page_segment(page);
mi_segment_force_abandon(segment, tld);
mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
}
}
/* -----------------------------------------------------------
Reclaim or allocate
----------------------------------------------------------- */