add option to skip delayed frees if contention on the lock

This commit is contained in:
Daniel Posluns 2022-10-04 19:26:37 -07:00
parent eb29d6b06f
commit 567494b2b6
8 changed files with 23 additions and 8 deletions

View file

@ -115,7 +115,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); //
void _mi_heap_delayed_free(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
bool _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never, bool cancel_if_locked);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);

View file

@ -325,6 +325,7 @@ typedef enum mi_option_e {
mi_option_max_errors,
mi_option_max_warnings,
mi_option_max_segment_reclaim,
mi_option_skip_recycle_if_busy,
_mi_option_last
} mi_option_t;

View file

@ -503,7 +503,15 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// some blocks may end up in the page `thread_free` list with no blocks in the
// heap `thread_delayed_free` list which may cause the page to be never freed!
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */);
//
// Note that on systems with oversubscribed cores, this may cause stalls if the lock
// is currently held by another thread that has been suspended. You can set the
// option `skip_recycle_if_busy` to mitigate this, at the cost of further delaying
// the reuse of those blocks.
//
if (!_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */, mi_option_is_enabled(mi_option_skip_recycle_if_busy))) {
return false;
}
// collect all other non-local frees to ensure up-to-date `used` count
_mi_page_free_collect(page, false);

View file

@ -108,7 +108,7 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq
MI_UNUSED(arg2);
MI_UNUSED(heap);
MI_UNUSED(pq);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false, false);
return true; // don't break
}
@ -268,7 +268,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
MI_UNUSED(pq);
// ensure no more thread_delayed_free will be added
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false, false);
// stats
const size_t bsize = mi_page_block_size(page);

View file

@ -93,7 +93,8 @@ static mi_option_desc_t options[_mi_option_last] =
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
{ 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
{ 8, UNINIT, MI_OPTION(max_segment_reclaim)} // max. number of segment reclaims from the abandoned segments per try.
{ 8, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try.
{ 0, UNINIT, MI_OPTION(skip_recycle_if_busy)} // skip recycling memory from other threads on alloc if other thread owns lock
};
static void mi_option_init(mi_option_desc_t* desc);

View file

@ -305,7 +305,7 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
// that after appending only the new heap will be used for delayed free operations.
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false, false);
count++;
}

View file

@ -122,7 +122,7 @@ bool _mi_page_is_valid(mi_page_t* page) {
}
#endif
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
bool _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never, bool cancel_if_locked) {
mi_thread_free_t tfreex;
mi_delayed_t old_delay;
mi_thread_free_t tfree;
@ -131,6 +131,9 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
tfreex = mi_tf_set_delayed(tfree, delay);
old_delay = mi_tf_delayed(tfree);
if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
if (cancel_if_locked) {
return false;
}
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
}
@ -142,6 +145,8 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
}
} while ((old_delay == MI_DELAYED_FREEING) ||
!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
return true;
}
/* -----------------------------------------------------------

View file

@ -1066,7 +1066,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
// set the heap again and allow heap thread delayed free again.
mi_page_set_heap(page, heap);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true, false); // override never (after heap is set)
// TODO: should we not collect again given that we just collected in `check_free`?
_mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page)) {