mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
merge from dev-steal
This commit is contained in:
commit
ea3ac0750e
5 changed files with 24 additions and 21 deletions
|
@ -213,9 +213,7 @@
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\src\alloc-posix.c" />
|
<ClCompile Include="..\..\src\alloc-posix.c" />
|
||||||
<ClCompile Include="..\..\src\alloc.c" />
|
<ClCompile Include="..\..\src\alloc.c" />
|
||||||
<ClCompile Include="..\..\src\arena-abandoned.c">
|
<ClCompile Include="..\..\src\arena-abandon.c">
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
<ClCompile Include="..\..\src\free.c">
|
<ClCompile Include="..\..\src\free.c">
|
||||||
<Filter>Sources</Filter>
|
<Filter>Sources</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\src\arena-abandoned.c">
|
<ClCompile Include="..\..\src\arena-abandon.c">
|
||||||
<Filter>Sources</Filter>
|
<Filter>Sources</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|
13
src/page.c
13
src/page.c
|
@ -404,8 +404,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
||||||
_mi_segment_page_abandon(page,segments_tld);
|
_mi_segment_page_abandon(page,segments_tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// force abandon a page
|
||||||
// force abandon a page; this is safe to call
|
|
||||||
void _mi_page_force_abandon(mi_page_t* page) {
|
void _mi_page_force_abandon(mi_page_t* page) {
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
// mark page as not using delayed free
|
// mark page as not using delayed free
|
||||||
|
@ -413,7 +412,11 @@ void _mi_page_force_abandon(mi_page_t* page) {
|
||||||
|
|
||||||
// ensure this page is no longer in the heap delayed free list
|
// ensure this page is no longer in the heap delayed free list
|
||||||
_mi_heap_delayed_free_all(heap);
|
_mi_heap_delayed_free_all(heap);
|
||||||
if (page->block_size == 0) return; // it may have been freed now
|
// TODO: can we still access the page as it may have been
|
||||||
|
// freed and the memory decommitted?
|
||||||
|
// A way around this is to explicitly unlink this page from
|
||||||
|
// the heap delayed free list.
|
||||||
|
if (page->capacity == 0) return; // it may have been freed now
|
||||||
|
|
||||||
// and now unlink it from the page queue and abandon (or free)
|
// and now unlink it from the page queue and abandon (or free)
|
||||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||||
|
@ -784,7 +787,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||||
page_candidate = page;
|
page_candidate = page;
|
||||||
candidate_count = 0;
|
candidate_count = 0;
|
||||||
}
|
}
|
||||||
else if (!mi_page_is_expandable(page) && page->capacity < page_candidate->capacity) {
|
else if (!mi_page_is_expandable(page) && page->used >= page_candidate->used) {
|
||||||
page_candidate = page;
|
page_candidate = page;
|
||||||
}
|
}
|
||||||
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
|
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
|
||||||
|
@ -829,7 +832,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
mi_assert(pq->first == page);
|
// mi_assert(pq->first == page);
|
||||||
page->retire_expire = 0;
|
page->retire_expire = 0;
|
||||||
}
|
}
|
||||||
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
||||||
|
|
|
@ -1263,6 +1263,7 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||||
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
|
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
|
||||||
const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
|
const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
|
||||||
if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
|
if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
|
||||||
|
|
||||||
// don't reclaim more from a `free` call than half the current segments
|
// don't reclaim more from a `free` call than half the current segments
|
||||||
// this is to prevent a pure free-ing thread to start owning too many segments
|
// this is to prevent a pure free-ing thread to start owning too many segments
|
||||||
// (but not for out-of-arena segments as that is the main way to be reclaimed for those)
|
// (but not for out-of-arena segments as that is the main way to be reclaimed for those)
|
||||||
|
@ -1334,7 +1335,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else if (segment->abandoned_visits > 3 && is_suitable && !mi_option_is_enabled(mi_option_target_segments_per_thread)) {
|
else if (segment->abandoned_visits > 3 && is_suitable && !mi_option_is_enabled(mi_option_target_segments_per_thread)) {
|
||||||
// always reclaim on 3rd visit to limit the abandoned queue length.
|
// always reclaim on 3rd visit to limit the abandoned segment count.
|
||||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1446,6 +1447,7 @@ static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Reclaim or allocate
|
Reclaim or allocate
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
Loading…
Add table
Reference in a new issue