mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 11:58:41 +03:00
more eager handling of non-local frees
This commit is contained in:
parent
72a39c0bb1
commit
1fdb4b288f
5 changed files with 44 additions and 20 deletions
29
src/page.c
29
src/page.c
|
@ -109,17 +109,19 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
|||
#endif
|
||||
|
||||
|
||||
void _mi_page_use_delayed_free(mi_page_t* page, bool enable) {
|
||||
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay ) {
|
||||
mi_thread_free_t tfree;
|
||||
mi_thread_free_t tfreex;
|
||||
|
||||
do {
|
||||
tfreex.value = tfree.value = page->thread_free.value;
|
||||
tfreex.delayed = (enable ? MI_USE_DELAYED_FREE : MI_NO_DELAYED_FREE);
|
||||
if (mi_unlikely(tfree.delayed == MI_DELAYED_FREEING)) {
|
||||
if (mi_unlikely(tfree.delayed < MI_DELAYED_FREEING)) {
|
||||
tfreex.delayed = delay;
|
||||
}
|
||||
else if (mi_unlikely(tfree.delayed == MI_DELAYED_FREEING)) {
|
||||
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
|
||||
continue; // and try again
|
||||
}
|
||||
}
|
||||
}
|
||||
while(tfreex.delayed != tfree.delayed && // avoid atomic operation if already equal
|
||||
!mi_atomic_compare_exchange((volatile uintptr_t*)&page->thread_free, tfreex.value, tfree.value));
|
||||
|
@ -272,7 +274,7 @@ void _mi_page_unfull(mi_page_t* page) {
|
|||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(page->flags.in_full);
|
||||
|
||||
_mi_page_use_delayed_free(page, false);
|
||||
_mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE);
|
||||
if (!page->flags.in_full) return;
|
||||
|
||||
mi_heap_t* heap = page->heap;
|
||||
|
@ -288,7 +290,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
mi_assert_internal(!mi_page_immediate_available(page));
|
||||
mi_assert_internal(!page->flags.in_full);
|
||||
|
||||
_mi_page_use_delayed_free(page, true);
|
||||
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE);
|
||||
if (page->flags.in_full) return;
|
||||
|
||||
mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);
|
||||
|
@ -305,8 +307,8 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(pq == mi_page_queue_of(page));
|
||||
mi_assert_internal(page->heap != NULL);
|
||||
mi_assert_internal(page->thread_free.delayed == MI_NO_DELAYED_FREE);
|
||||
|
||||
_mi_page_use_delayed_free(page,MI_NEVER_DELAYED_FREE);
|
||||
#if MI_DEBUG>1
|
||||
// check there are no references left..
|
||||
for (mi_block_t* block = (mi_block_t*)page->heap->thread_delayed_free; block != NULL; block = mi_block_nextx(page->heap->cookie,block)) {
|
||||
|
@ -330,7 +332,14 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(pq == mi_page_queue_of(page));
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
mi_assert_internal(page->thread_free.delayed != MI_DELAYED_FREEING);
|
||||
#if MI_DEBUG>1
|
||||
// check if we can safely free
|
||||
mi_thread_free_t free;
|
||||
free.value = page->thread_free.value;
|
||||
free.delayed = MI_NEVER_DELAYED_FREE;
|
||||
free.value = mi_atomic_exchange(&page->thread_free.value, free.value);
|
||||
mi_assert_internal(free.delayed != MI_DELAYED_FREEING);
|
||||
#endif
|
||||
|
||||
page->flags.has_aligned = false;
|
||||
|
||||
|
@ -717,10 +726,12 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
|
|||
mi_assert_internal(page->block_size >= size);
|
||||
|
||||
// and try again, this time succeeding! (i.e. this should never recurse)
|
||||
void* p = _mi_page_malloc(heap, page, size);
|
||||
return _mi_page_malloc(heap, page, size);
|
||||
/*
|
||||
if (page->used == page->reserved) {
|
||||
// needed for huge pages to free reliably from other threads.
|
||||
mi_page_to_full(page,mi_page_queue_of(page));
|
||||
}
|
||||
return p;
|
||||
*/
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue