diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
index 160f1436..dddab777 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -116,7 +116,7 @@
true
Default
../../include
- MI_DEBUG=4;MI_GUARDED=1;%(PreprocessorDefinitions);
+ MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);
CompileAsCpp
false
stdcpp20
diff --git a/include/mimalloc.h b/include/mimalloc.h
index e3fecdf1..5916228b 100644
--- a/include/mimalloc.h
+++ b/include/mimalloc.h
@@ -148,6 +148,7 @@ typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
+mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
mi_decl_export int mi_version(void) mi_attr_noexcept;
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
@@ -377,6 +378,7 @@ typedef enum mi_option_e {
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
+ mi_option_target_segments_per_thread, // experimental (=0)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,
diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h
index 61cf28b2..716386d2 100644
--- a/include/mimalloc/internal.h
+++ b/include/mimalloc/internal.h
@@ -178,6 +178,8 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; /
void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
+void _mi_page_force_abandon(mi_page_t* page);
+
void _mi_heap_delayed_free_all(mi_heap_t* heap);
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
@@ -625,9 +627,9 @@ static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
}
else {
// failed size criteria, rewind count (but don't write to an empty heap)
- if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
+ if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
return false;
- }
+ }
}
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h
index f7bca137..44074450 100644
--- a/include/mimalloc/types.h
+++ b/include/mimalloc/types.h
@@ -416,7 +416,8 @@ typedef struct mi_segment_s {
// segment fields
struct mi_segment_s* next; // must be the first (non-constant) segment field -- see `segment.c:segment_init`
struct mi_segment_s* prev;
- bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
+ bool was_reclaimed; // true if it was reclaimed (used to limit reclaim-on-free reclamation)
+ bool dont_free; // can be temporarily true to ensure the segment is not freed
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited for reclaiming (to force reclaim if it is too long)
diff --git a/src/options.c b/src/options.c
index 6635f661..d565e269 100644
--- a/src/options.c
+++ b/src/options.c
@@ -65,6 +65,7 @@ typedef struct mi_option_desc_s {
#define MI_DEFAULT_ARENA_EAGER_COMMIT 2
#endif
+// in KiB
#ifndef MI_DEFAULT_ARENA_RESERVE
#if (MI_INTPTR_SIZE>4)
#define MI_DEFAULT_ARENA_RESERVE 1024L*1024L
@@ -156,6 +157,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_DEFAULT_GUARDED_SAMPLE_RATE,
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
+ { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
};
static void mi_option_init(mi_option_desc_t* desc);
diff --git a/src/page-queue.c b/src/page-queue.c
index 02a8008d..9796f3dc 100644
--- a/src/page-queue.c
+++ b/src/page-queue.c
@@ -259,8 +259,16 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
heap->page_count++;
}
+static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
+ mi_assert_internal(mi_page_heap(page) == heap);
+ mi_assert_internal(mi_page_queue_contains(queue, page));
+ if (queue->first == page) return;
+ mi_page_queue_remove(queue, page);
+ mi_page_queue_push(heap, queue, page);
+ mi_assert_internal(queue->first == page);
+}
-static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(from, page));
mi_assert_expensive(!mi_page_queue_contains(to, page));
@@ -273,6 +281,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
(mi_page_is_huge(page) && mi_page_queue_is_full(to)));
mi_heap_t* heap = mi_page_heap(page);
+
+ // delete from `from`
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == from->last) from->last = page->prev;
@@ -283,22 +293,59 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_heap_queue_first_update(heap, from);
}
- page->prev = to->last;
- page->next = NULL;
- if (to->last != NULL) {
- mi_assert_internal(heap == mi_page_heap(to->last));
- to->last->next = page;
- to->last = page;
+ // insert into `to`
+ if (enqueue_at_end) {
+ // enqueue at the end
+ page->prev = to->last;
+ page->next = NULL;
+ if (to->last != NULL) {
+ mi_assert_internal(heap == mi_page_heap(to->last));
+ to->last->next = page;
+ to->last = page;
+ }
+ else {
+ to->first = page;
+ to->last = page;
+ mi_heap_queue_first_update(heap, to);
+ }
}
else {
- to->first = page;
- to->last = page;
- mi_heap_queue_first_update(heap, to);
+ if (to->first != NULL) {
+ // enqueue at 2nd place
+ mi_assert_internal(heap == mi_page_heap(to->first));
+ mi_page_t* next = to->first->next;
+ page->prev = to->first;
+ page->next = next;
+ to->first->next = page;
+ if (next != NULL) {
+ next->prev = page;
+ }
+ else {
+ to->last = page;
+ }
+ }
+ else {
+ // enqueue at the head (singleton list)
+ page->prev = NULL;
+ page->next = NULL;
+ to->first = page;
+ to->last = page;
+ mi_heap_queue_first_update(heap, to);
+ }
}
mi_page_set_in_full(page, mi_page_queue_is_full(to));
}
+static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+ mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end */, page);
+}
+
+static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+ // note: we could insert at the front to increase reuse, but it slows down certain benchmarks (like `alloc-test`)
+ mi_page_queue_enqueue_from_ex(to, from, false /* enqueue at the end of the `to` queue? */, page);
+}
+
// Only called from `mi_heap_absorb`.
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
mi_assert_internal(mi_heap_contains_queue(heap,pq));
diff --git a/src/page.c b/src/page.c
index d6dcfb15..c681d6d0 100644
--- a/src/page.c
+++ b/src/page.c
@@ -357,7 +357,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_page_set_in_full(page, false); // to get the right queue
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
mi_page_set_in_full(page, true);
- mi_page_queue_enqueue_from(pq, pqfull, page);
+ mi_page_queue_enqueue_from_full(pq, pqfull, page);
}
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
@@ -403,6 +403,27 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
_mi_segment_page_abandon(page,segments_tld);
}
+// force abandon a page
+void _mi_page_force_abandon(mi_page_t* page) {
+ mi_heap_t* heap = mi_page_heap(page);
+ // mark page as not using delayed free
+ _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
+
+ // ensure this page is no longer in the heap delayed free list
+ _mi_heap_delayed_free_all(heap);
+ // We can still access the page meta-info even if it is freed as we ensure
+ // in `mi_segment_force_abandon` that the segment is not freed (yet)
+ if (page->capacity == 0) return; // it may have been freed now
+
+ // and now unlink it from the page queue and abandon (or free)
+ mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
+ if (mi_page_all_free(page)) {
+ _mi_page_free(page, pq, false);
+ }
+ else {
+ _mi_page_abandon(page, pq);
+ }
+}
// Free a page with no more free blocks
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
@@ -448,6 +469,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
+ #if MI_RETIRE_CYCLES > 0
const size_t bsize = mi_page_block_size(page);
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
if (pq->last==page && pq->first==page) { // the only page in the queue?
@@ -463,7 +485,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
return; // don't free after all
}
}
-
+ #endif
_mi_page_free(page, pq, false);
}
@@ -709,6 +731,17 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
Find pages with free blocks
-------------------------------------------------------------*/
+// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
+#define MI_MAX_CANDIDATE_SEARCH (8)
+
+// is the page not yet used up to its reserved space?
+static bool mi_page_is_expandable(const mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_assert_internal(page->capacity <= page->reserved);
+ return (page->capacity < page->reserved);
+}
+
+
// Find a page with free blocks of `page->block_size`.
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
@@ -716,39 +749,76 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
#if MI_STAT
size_t count = 0;
#endif
+ size_t candidate_count = 0; // we reset this on the first candidate to limit the search
+ mi_page_t* page_candidate = NULL; // a page with free space
mi_page_t* page = pq->first;
+
while (page != NULL)
{
mi_page_t* next = page->next; // remember next
#if MI_STAT
count++;
#endif
+ candidate_count++;
- // 0. collect freed blocks by us and other threads
+ // collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
- // 1. if the page contains free blocks, we are done
- if (mi_page_immediate_available(page)) {
+ #if MI_MAX_CANDIDATE_SEARCH > 1
+ // search up to N pages for a best candidate
+
+ // is the local free list non-empty?
+ const bool immediate_available = mi_page_immediate_available(page);
+
+ // if the page is completely full, move it to the `mi_pages_full`
+ // queue so we don't visit long-lived pages too often.
+ if (!immediate_available && !mi_page_is_expandable(page)) {
+ mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
+ mi_page_to_full(page, pq);
+ }
+ else {
+ // the page has free space, make it a candidate
+ // we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
+ if (page_candidate == NULL) {
+ page_candidate = page;
+ candidate_count = 0;
+ }
+ else if (/* !mi_page_is_expandable(page) && */ page->used >= page_candidate->used) {
+ page_candidate = page;
+ }
+ // if we find a non-expandable candidate, or searched for N pages, return with the best candidate
+ if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
+ mi_assert_internal(page_candidate!=NULL);
+ break;
+ }
+ }
+ #else
+ // first-fit algorithm
+ // If the page contains free blocks, we are done
+ if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
break; // pick this one
}
- // 2. Try to extend
- if (page->capacity < page->reserved) {
- mi_page_extend_free(heap, page, heap->tld);
- mi_assert_internal(mi_page_immediate_available(page));
- break;
- }
-
- // 3. If the page is completely full, move it to the `mi_pages_full`
+ // If the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often.
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq);
+ #endif
page = next;
} // for each page
mi_heap_stat_counter_increase(heap, searches, count);
+ // set the page to the best candidate
+ if (page_candidate != NULL) {
+ page = page_candidate;
+ }
+ if (page != NULL && !mi_page_immediate_available(page)) {
+ mi_assert_internal(mi_page_is_expandable(page));
+ mi_page_extend_free(heap, page, heap->tld);
+ }
+
if (page == NULL) {
_mi_heap_collect_retired(heap, false); // perhaps make a page available
page = mi_page_fresh(heap, pq);
@@ -758,10 +828,14 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
}
}
else {
- mi_assert(pq->first == page);
+ // move the page to the front of the queue
+ mi_page_queue_move_to_front(heap, pq, page);
page->retire_expire = 0;
+ // _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this
}
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
+
+
return page;
}
@@ -769,7 +843,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// Find a page with free blocks of `size`.
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
- mi_page_queue_t* pq = mi_page_queue(heap,size);
+ mi_page_queue_t* pq = mi_page_queue(heap, size);
+
+ // check the first page: we even do this with candidate search or otherwise we re-search every time
mi_page_t* page = pq->first;
if (page != NULL) {
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
@@ -788,6 +864,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
return page; // fast path
}
}
+
return mi_page_queue_find_free_ex(heap, pq, true);
}
diff --git a/src/segment.c b/src/segment.c
index 18736818..74abcdbc 100644
--- a/src/segment.c
+++ b/src/segment.c
@@ -652,6 +652,10 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
MI_UNUSED(force);
mi_assert(segment != NULL);
+
+ // in `mi_segment_force_abandon` we set this to true to ensure the segment's memory stays valid
+ if (segment->dont_free) return;
+
// don't purge as we are freeing now
mi_segment_remove_all_purges(segment, false /* don't force as we are about to free */, tld);
mi_segment_remove_from_free_queue(segment, tld);
@@ -952,6 +956,9 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
+ const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
+ if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
+
// don't reclaim more from a `free` call than half the current segments
// this is to prevent a pure free-ing thread to start owning too many segments
// (but not for out-of-arena segments as that is the main way to be reclaimed for those)
@@ -976,6 +983,13 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
_mi_arena_field_cursor_done(¤t);
}
+
+static bool segment_count_is_within_target(mi_segments_tld_t* tld, size_t* ptarget) {
+ const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 0, 1024);
+ if (ptarget != NULL) { *ptarget = target; }
+ return (target == 0 || tld->count < target);
+}
+
static long mi_segment_get_reclaim_tries(mi_segments_tld_t* tld) {
// limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries.
const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
@@ -998,7 +1012,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
mi_segment_t* segment = NULL;
mi_arena_field_cursor_t current;
_mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t);
- while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
+ while (segment_count_is_within_target(tld,NULL) && (max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
{
mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process
segment->abandoned_visits++;
@@ -1023,8 +1037,8 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
break;
}
- else if (segment->abandoned_visits >= 3 && is_suitable) {
- // always reclaim on 3rd visit to limit the list length.
+ else if (segment->abandoned_visits > 3 && is_suitable) {
+ // always reclaim on 3rd visit to limit the abandoned segment count.
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
else {
@@ -1038,6 +1052,92 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
}
+/* -----------------------------------------------------------
+ Force abandon a segment that is in use by our thread
+----------------------------------------------------------- */
+
+// force abandon a segment
+static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* tld)
+{
+ mi_assert_internal(segment->abandoned < segment->used);
+ mi_assert_internal(!segment->dont_free);
+
+ // ensure the segment does not get free'd underneath us (so we can check if a page has been freed in `mi_page_force_abandon`)
+ segment->dont_free = true;
+
+ // for all pages
+ for (size_t i = 0; i < segment->capacity; i++) {
+ mi_page_t* page = &segment->pages[i];
+ if (page->segment_in_use) {
+ // abandon the page if it is still in-use (this will free the page if possible as well (but not our segment))
+ mi_assert_internal(segment->used > 0);
+ if (segment->used == segment->abandoned+1) {
+ // the last page.. abandon and return as the segment will be abandoned after this
+ // and we should no longer access it.
+ segment->dont_free = false;
+ _mi_page_force_abandon(page);
+ return;
+ }
+ else {
+ // abandon and continue
+ _mi_page_force_abandon(page);
+ }
+ }
+ }
+ segment->dont_free = false;
+ mi_assert(segment->used == segment->abandoned);
+ mi_assert(segment->used == 0);
+ if (segment->used == 0) { // paranoia
+ // all free now
+ mi_segment_free(segment, false, tld);
+ }
+ else {
+ // perform delayed purges
+ mi_pages_try_purge(false /* force? */, tld);
+ }
+}
+
+
+// try abandon segments.
+// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
+static void mi_segments_try_abandon_to_target(mi_heap_t* heap, size_t target, mi_segments_tld_t* tld) {
+ if (target <= 1) return;
+ const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
+ // todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages
+ for (int i = 0; i < 64 && tld->count >= min_target; i++) {
+ mi_page_t* page = heap->pages[MI_BIN_FULL].first;
+ while (page != NULL && mi_page_is_huge(page)) {
+ page = page->next;
+ }
+ if (page==NULL) {
+ break;
+ }
+ mi_segment_t* segment = _mi_page_segment(page);
+ mi_segment_force_abandon(segment, tld);
+ mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
+ }
+}
+
+// try abandon segments.
+// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
+static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
+ // we call this when we are about to add a fresh segment so we should be under our target segment count.
+ size_t target = 0;
+ if (segment_count_is_within_target(tld, &target)) return;
+ mi_segments_try_abandon_to_target(heap, target, tld);
+}
+
+void mi_collect_reduce(size_t target_size) mi_attr_noexcept {
+ mi_collect(true);
+ mi_heap_t* heap = mi_heap_get_default();
+ mi_segments_tld_t* tld = &heap->tld->segments;
+ size_t target = target_size / MI_SEGMENT_SIZE;
+ if (target == 0) {
+ target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 1, 1024);
+ }
+ mi_segments_try_abandon_to_target(heap, target, tld);
+}
+
/* -----------------------------------------------------------
Reclaim or allocate
----------------------------------------------------------- */
@@ -1047,6 +1147,9 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
mi_assert_internal(page_kind <= MI_PAGE_LARGE);
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
+ // try to abandon some segments to increase reuse between threads
+ mi_segments_try_abandon(heap,tld);
+
// 1. try to reclaim an abandoned segment
bool reclaimed;
mi_segment_t* segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);