mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-07 07:49:31 +03:00
add target_segments_per_thread option
This commit is contained in:
parent
edc7ddd37c
commit
71fec8caf5
5 changed files with 112 additions and 9 deletions
|
@ -377,6 +377,7 @@ typedef enum mi_option_e {
|
|||
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
|
||||
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
|
||||
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
|
||||
mi_option_target_segments_per_thread, // experimental (=0)
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
|
|
@ -178,6 +178,8 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; /
|
|||
void _mi_page_unfull(mi_page_t* page);
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
|
||||
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
|
||||
void _mi_page_force_abandon(mi_page_t* page);
|
||||
|
||||
void _mi_heap_delayed_free_all(mi_heap_t* heap);
|
||||
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
|
||||
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
|
||||
|
@ -625,9 +627,9 @@ static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
|
|||
}
|
||||
else {
|
||||
// failed size criteria, rewind count (but don't write to an empty heap)
|
||||
if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
|
||||
if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
|
|
|
@ -65,6 +65,7 @@ typedef struct mi_option_desc_s {
|
|||
#define MI_DEFAULT_ARENA_EAGER_COMMIT 2
|
||||
#endif
|
||||
|
||||
// in KiB
|
||||
#ifndef MI_DEFAULT_ARENA_RESERVE
|
||||
#if (MI_INTPTR_SIZE>4)
|
||||
#define MI_DEFAULT_ARENA_RESERVE 1024L*1024L
|
||||
|
@ -151,6 +152,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ 0, UNINIT, MI_OPTION(guarded_sample_rate)},
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
||||
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
33
src/page.c
33
src/page.c
|
@ -357,7 +357,7 @@ void _mi_page_unfull(mi_page_t* page) {
|
|||
mi_page_set_in_full(page, false); // to get the right queue
|
||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||
mi_page_set_in_full(page, true);
|
||||
mi_page_queue_enqueue_from_full(pq, pqfull, page);
|
||||
mi_page_queue_enqueue_from_full(pq, pqfull, page);
|
||||
}
|
||||
|
||||
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
||||
|
@ -403,6 +403,29 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
_mi_segment_page_abandon(page,segments_tld);
|
||||
}
|
||||
|
||||
// force abandon a page
|
||||
void _mi_page_force_abandon(mi_page_t* page) {
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
// mark page as not using delayed free
|
||||
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
||||
|
||||
// ensure this page is no longer in the heap delayed free list
|
||||
_mi_heap_delayed_free_all(heap);
|
||||
// TODO: can we still access the page as it may have been
|
||||
// freed and the memory decommitted?
|
||||
// A way around this is to explicitly unlink this page from
|
||||
// the heap delayed free list.
|
||||
if (page->capacity == 0) return; // it may have been freed now
|
||||
|
||||
// and now unlink it from the page queue and abandon (or free)
|
||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||
if (mi_page_all_free(page)) {
|
||||
_mi_page_free(page, pq, false);
|
||||
}
|
||||
else {
|
||||
_mi_page_abandon(page, pq);
|
||||
}
|
||||
}
|
||||
|
||||
// Free a page with no more free blocks
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
||||
|
@ -743,7 +766,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
|
||||
#if defined(MI_MAX_CANDIDATE_SEARCH)
|
||||
// search up to N pages for a best candidate
|
||||
|
||||
|
||||
// is the local free list non-empty?
|
||||
const bool immediate_available = mi_page_immediate_available(page);
|
||||
|
||||
|
@ -758,9 +781,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
// we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
|
||||
if (page_candidate == NULL) {
|
||||
page_candidate = page;
|
||||
candidate_count = 0;
|
||||
candidate_count = 0;
|
||||
}
|
||||
else if (!mi_page_is_expandable(page) && page->capacity < page_candidate->capacity) {
|
||||
else if (!mi_page_is_expandable(page) && page->used >= page_candidate->used) {
|
||||
page_candidate = page;
|
||||
}
|
||||
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
|
||||
|
@ -805,7 +828,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
}
|
||||
}
|
||||
else {
|
||||
mi_assert(pq->first == page);
|
||||
// mi_assert(pq->first == page);
|
||||
page->retire_expire = 0;
|
||||
}
|
||||
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
||||
|
|
|
@ -952,6 +952,9 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
|||
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
|
||||
if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess
|
||||
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
|
||||
const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
|
||||
if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
|
||||
|
||||
// don't reclaim more from a `free` call than half the current segments
|
||||
// this is to prevent a pure free-ing thread to start owning too many segments
|
||||
// (but not for out-of-arena segments as that is the main way to be reclaimed for those)
|
||||
|
@ -1023,8 +1026,8 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
|||
result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
|
||||
break;
|
||||
}
|
||||
else if (segment->abandoned_visits >= 3 && is_suitable) {
|
||||
// always reclaim on 3rd visit to limit the list length.
|
||||
else if (segment->abandoned_visits > 3 && is_suitable && !mi_option_is_enabled(mi_option_target_segments_per_thread)) {
|
||||
// always reclaim on 3rd visit to limit the abandoned segment count.
|
||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||
}
|
||||
else {
|
||||
|
@ -1038,6 +1041,75 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
|||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Force abandon a segment that is in use by our thread
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// force abandon a segment
|
||||
static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(segment->abandoned < segment->used);
|
||||
|
||||
// for all pages
|
||||
for (size_t i = 0; i < segment->capacity; i++) {
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (page->segment_in_use) {
|
||||
// ensure used count is up to date and collect potential concurrent frees
|
||||
_mi_page_free_collect(page, false);
|
||||
{
|
||||
// abandon the page if it is still in-use (this will free it if possible as well)
|
||||
mi_assert_internal(segment->used > 0);
|
||||
if (segment->used == segment->abandoned+1) {
|
||||
// the last page.. abandon and return as the segment will be abandoned after this
|
||||
// and we should no longer access it.
|
||||
_mi_page_force_abandon(page);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
// abandon and continue
|
||||
_mi_page_force_abandon(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mi_assert(segment->used == segment->abandoned);
|
||||
mi_assert(segment->used == 0);
|
||||
if (segment->used == 0) {
|
||||
// all free now
|
||||
mi_segment_free(segment, false, tld);
|
||||
}
|
||||
else {
|
||||
// perform delayed purges
|
||||
mi_pages_try_purge(false /* force? */, tld);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// try abandon segments.
|
||||
// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
|
||||
static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
||||
const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread,0,1024);
|
||||
// we call this when we are about to add a fresh segment so we should be under our target segment count.
|
||||
if (target == 0 || tld->count < target) return;
|
||||
|
||||
const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
|
||||
|
||||
// todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages
|
||||
for (int i = 0; i < 16 && tld->count >= min_target; i++) {
|
||||
mi_page_t* page = heap->pages[MI_BIN_FULL].first;
|
||||
while (page != NULL && mi_page_is_huge(page)) {
|
||||
page = page->next;
|
||||
}
|
||||
if (page==NULL) {
|
||||
break;
|
||||
}
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_segment_force_abandon(segment, tld);
|
||||
mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Reclaim or allocate
|
||||
----------------------------------------------------------- */
|
||||
|
@ -1047,6 +1119,9 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
|
|||
mi_assert_internal(page_kind <= MI_PAGE_LARGE);
|
||||
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
||||
|
||||
// try to abandon some segments to increase reuse between threads
|
||||
mi_segments_try_abandon(heap,tld);
|
||||
|
||||
// 1. try to reclaim an abandoned segment
|
||||
bool reclaimed;
|
||||
mi_segment_t* segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
|
||||
|
|
Loading…
Add table
Reference in a new issue