Merge branch 'dev-steal' into dev-slice-steal

This commit is contained in:
daanx 2024-10-09 14:35:49 -07:00
commit 03fbaedec5
2 changed files with 65 additions and 26 deletions

View file

@ -265,7 +265,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
} }
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(from, page)); mi_assert_expensive(mi_page_queue_contains(from, page));
mi_assert_expensive(!mi_page_queue_contains(to, page)); mi_assert_expensive(!mi_page_queue_contains(to, page));
@ -278,6 +278,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to))); (mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* heap = mi_page_heap(page);
// delete from `from`
if (page->prev != NULL) page->prev->next = page->next; if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev; if (page->next != NULL) page->next->prev = page->prev;
if (page == from->last) from->last = page->prev; if (page == from->last) from->last = page->prev;
@ -288,22 +290,58 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_heap_queue_first_update(heap, from); mi_heap_queue_first_update(heap, from);
} }
page->prev = to->last; // insert into `to`
page->next = NULL; if (enqueue_at_end) {
if (to->last != NULL) { // enqueue at the end
mi_assert_internal(heap == mi_page_heap(to->last)); page->prev = to->last;
to->last->next = page; page->next = NULL;
to->last = page; if (to->last != NULL) {
mi_assert_internal(heap == mi_page_heap(to->last));
to->last->next = page;
to->last = page;
}
else {
to->first = page;
to->last = page;
mi_heap_queue_first_update(heap, to);
}
} }
else { else {
to->first = page; if (to->first != NULL) {
to->last = page; // enqueue at 2nd place
mi_heap_queue_first_update(heap, to); mi_assert_internal(heap == mi_page_heap(to->first));
mi_page_t* next = to->first->next;
page->prev = to->first;
page->next = next;
to->first->next = page;
if (next != NULL) {
next->prev = page;
}
else {
to->last = page;
}
}
else {
// enqueue at the head (singleton list)
page->prev = NULL;
page->next = NULL;
to->first = page;
to->last = page;
mi_heap_queue_first_update(heap, to);
}
} }
mi_page_set_in_full(page, mi_page_queue_is_full(to)); mi_page_set_in_full(page, mi_page_queue_is_full(to));
} }
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
mi_page_queue_enqueue_from_ex(to, from, true, page);
}
static void mi_page_queue_enqueue_from_at_start(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
mi_page_queue_enqueue_from_ex(to, from, false, page);
}
// Only called from `mi_heap_absorb`. // Only called from `mi_heap_absorb`.
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
mi_assert_internal(mi_heap_contains_queue(heap,pq)); mi_assert_internal(mi_heap_contains_queue(heap,pq));

View file

@ -358,7 +358,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_page_set_in_full(page, false); // to get the right queue mi_page_set_in_full(page, false); // to get the right queue
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
mi_page_set_in_full(page, true); mi_page_set_in_full(page, true);
mi_page_queue_enqueue_from(pq, pqfull, page); mi_page_queue_enqueue_from_at_start(pq, pqfull, page); // insert at the start to increase the chance of reusing full pages (?)
} }
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
@ -718,14 +718,17 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
Find pages with free blocks Find pages with free blocks
-------------------------------------------------------------*/ -------------------------------------------------------------*/
// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
#define MI_MAX_CANDIDATE_SEARCH (16) #define MI_MAX_CANDIDATE_SEARCH (16)
static inline bool mi_page_is_expandable(const mi_page_t* page) { // is the page not yet used up to its reserved space?
static bool mi_page_is_expandable(const mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_internal(page->capacity <= page->reserved); mi_assert_internal(page->capacity <= page->reserved);
return (page->capacity < page->reserved); return (page->capacity < page->reserved);
} }
// Find a page with free blocks of `page->block_size`. // Find a page with free blocks of `page->block_size`.
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{ {
@ -743,14 +746,17 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
count++; count++;
#endif #endif
candidate_count++; candidate_count++;
#if defined(MI_MAX_CANDIDATE_SEARCH)
// 0. collect freed blocks by us and other threads
_mi_page_free_collect(page, false); // todo: should we free empty pages?
// collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
#if defined(MI_MAX_CANDIDATE_SEARCH)
// search up to N pages for a best candidate
// is the local free list non-empty? // is the local free list non-empty?
const bool immediate_available = mi_page_immediate_available(page); const bool immediate_available = mi_page_immediate_available(page);
// 1. If the page is completely full, move it to the `mi_pages_full` // if the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often. // queue so we don't visit long-lived pages too often.
if (!immediate_available && !mi_page_is_expandable(page)) { if (!immediate_available && !mi_page_is_expandable(page)) {
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
@ -766,25 +772,20 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
else if (!mi_page_is_expandable(page) && page->used > page_candidate->used) { else if (!mi_page_is_expandable(page) && page->used > page_candidate->used) {
page_candidate = page; page_candidate = page;
} }
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) { if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
mi_assert_internal(page_candidate!=NULL); mi_assert_internal(page_candidate!=NULL);
break; break;
} }
} }
#else #else
// 1. if the page contains free blocks, we are done // first-fit algorithm
if (mi_page_immediate_available(page)) { // If the page contains free blocks, we are done
if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
break; // pick this one break; // pick this one
} }
// 2. Try to extend // If the page is completely full, move it to the `mi_pages_full`
if (page->capacity < page->reserved) {
mi_page_extend_free(heap, page, heap->tld);
mi_assert_internal(mi_page_immediate_available(page));
break;
}
// 3. If the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often. // queue so we don't visit long-lived pages too often.
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq); mi_page_to_full(page, pq);