mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
Merge branch 'dev-steal' into dev-slice-steal
This commit is contained in:
commit
03fbaedec5
2 changed files with 65 additions and 26 deletions
|
@ -265,7 +265,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
|
static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) {
|
||||||
mi_assert_internal(page != NULL);
|
mi_assert_internal(page != NULL);
|
||||||
mi_assert_expensive(mi_page_queue_contains(from, page));
|
mi_assert_expensive(mi_page_queue_contains(from, page));
|
||||||
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
||||||
|
@ -278,6 +278,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
|
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
|
||||||
|
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
|
|
||||||
|
// delete from `from`
|
||||||
if (page->prev != NULL) page->prev->next = page->next;
|
if (page->prev != NULL) page->prev->next = page->next;
|
||||||
if (page->next != NULL) page->next->prev = page->prev;
|
if (page->next != NULL) page->next->prev = page->prev;
|
||||||
if (page == from->last) from->last = page->prev;
|
if (page == from->last) from->last = page->prev;
|
||||||
|
@ -288,6 +290,9 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||||
mi_heap_queue_first_update(heap, from);
|
mi_heap_queue_first_update(heap, from);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// insert into `to`
|
||||||
|
if (enqueue_at_end) {
|
||||||
|
// enqueue at the end
|
||||||
page->prev = to->last;
|
page->prev = to->last;
|
||||||
page->next = NULL;
|
page->next = NULL;
|
||||||
if (to->last != NULL) {
|
if (to->last != NULL) {
|
||||||
|
@ -300,10 +305,43 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||||
to->last = page;
|
to->last = page;
|
||||||
mi_heap_queue_first_update(heap, to);
|
mi_heap_queue_first_update(heap, to);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (to->first != NULL) {
|
||||||
|
// enqueue at 2nd place
|
||||||
|
mi_assert_internal(heap == mi_page_heap(to->first));
|
||||||
|
mi_page_t* next = to->first->next;
|
||||||
|
page->prev = to->first;
|
||||||
|
page->next = next;
|
||||||
|
to->first->next = page;
|
||||||
|
if (next != NULL) {
|
||||||
|
next->prev = page;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
to->last = page;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// enqueue at the head (singleton list)
|
||||||
|
page->prev = NULL;
|
||||||
|
page->next = NULL;
|
||||||
|
to->first = page;
|
||||||
|
to->last = page;
|
||||||
|
mi_heap_queue_first_update(heap, to);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mi_page_set_in_full(page, mi_page_queue_is_full(to));
|
mi_page_set_in_full(page, mi_page_queue_is_full(to));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
|
||||||
|
mi_page_queue_enqueue_from_ex(to, from, true, page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mi_page_queue_enqueue_from_at_start(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
|
||||||
|
mi_page_queue_enqueue_from_ex(to, from, false, page);
|
||||||
|
}
|
||||||
|
|
||||||
// Only called from `mi_heap_absorb`.
|
// Only called from `mi_heap_absorb`.
|
||||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
|
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
|
||||||
mi_assert_internal(mi_heap_contains_queue(heap,pq));
|
mi_assert_internal(mi_heap_contains_queue(heap,pq));
|
||||||
|
|
31
src/page.c
31
src/page.c
|
@ -358,7 +358,7 @@ void _mi_page_unfull(mi_page_t* page) {
|
||||||
mi_page_set_in_full(page, false); // to get the right queue
|
mi_page_set_in_full(page, false); // to get the right queue
|
||||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||||
mi_page_set_in_full(page, true);
|
mi_page_set_in_full(page, true);
|
||||||
mi_page_queue_enqueue_from(pq, pqfull, page);
|
mi_page_queue_enqueue_from_at_start(pq, pqfull, page); // insert at the start to increase the chance of reusing full pages (?)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
||||||
|
@ -718,14 +718,17 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||||
Find pages with free blocks
|
Find pages with free blocks
|
||||||
-------------------------------------------------------------*/
|
-------------------------------------------------------------*/
|
||||||
|
|
||||||
|
// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
|
||||||
#define MI_MAX_CANDIDATE_SEARCH (16)
|
#define MI_MAX_CANDIDATE_SEARCH (16)
|
||||||
|
|
||||||
static inline bool mi_page_is_expandable(const mi_page_t* page) {
|
// is the page not yet used up to its reserved space?
|
||||||
|
static bool mi_page_is_expandable(const mi_page_t* page) {
|
||||||
mi_assert_internal(page != NULL);
|
mi_assert_internal(page != NULL);
|
||||||
mi_assert_internal(page->capacity <= page->reserved);
|
mi_assert_internal(page->capacity <= page->reserved);
|
||||||
return (page->capacity < page->reserved);
|
return (page->capacity < page->reserved);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Find a page with free blocks of `page->block_size`.
|
// Find a page with free blocks of `page->block_size`.
|
||||||
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
|
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
|
||||||
{
|
{
|
||||||
|
@ -743,14 +746,17 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||||
count++;
|
count++;
|
||||||
#endif
|
#endif
|
||||||
candidate_count++;
|
candidate_count++;
|
||||||
|
|
||||||
|
// collect freed blocks by us and other threads
|
||||||
|
_mi_page_free_collect(page, false);
|
||||||
|
|
||||||
#if defined(MI_MAX_CANDIDATE_SEARCH)
|
#if defined(MI_MAX_CANDIDATE_SEARCH)
|
||||||
// 0. collect freed blocks by us and other threads
|
// search up to N pages for a best candidate
|
||||||
_mi_page_free_collect(page, false); // todo: should we free empty pages?
|
|
||||||
|
|
||||||
// is the local free list non-empty?
|
// is the local free list non-empty?
|
||||||
const bool immediate_available = mi_page_immediate_available(page);
|
const bool immediate_available = mi_page_immediate_available(page);
|
||||||
|
|
||||||
// 1. If the page is completely full, move it to the `mi_pages_full`
|
// if the page is completely full, move it to the `mi_pages_full`
|
||||||
// queue so we don't visit long-lived pages too often.
|
// queue so we don't visit long-lived pages too often.
|
||||||
if (!immediate_available && !mi_page_is_expandable(page)) {
|
if (!immediate_available && !mi_page_is_expandable(page)) {
|
||||||
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
||||||
|
@ -766,25 +772,20 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||||
else if (!mi_page_is_expandable(page) && page->used > page_candidate->used) {
|
else if (!mi_page_is_expandable(page) && page->used > page_candidate->used) {
|
||||||
page_candidate = page;
|
page_candidate = page;
|
||||||
}
|
}
|
||||||
|
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
|
||||||
if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
|
if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
|
||||||
mi_assert_internal(page_candidate!=NULL);
|
mi_assert_internal(page_candidate!=NULL);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
// 1. if the page contains free blocks, we are done
|
// first-fit algorithm
|
||||||
if (mi_page_immediate_available(page)) {
|
// If the page contains free blocks, we are done
|
||||||
|
if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
|
||||||
break; // pick this one
|
break; // pick this one
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to extend
|
// If the page is completely full, move it to the `mi_pages_full`
|
||||||
if (page->capacity < page->reserved) {
|
|
||||||
mi_page_extend_free(heap, page, heap->tld);
|
|
||||||
mi_assert_internal(mi_page_immediate_available(page));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. If the page is completely full, move it to the `mi_pages_full`
|
|
||||||
// queue so we don't visit long-lived pages too often.
|
// queue so we don't visit long-lived pages too often.
|
||||||
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
||||||
mi_page_to_full(page, pq);
|
mi_page_to_full(page, pq);
|
||||||
|
|
Loading…
Add table
Reference in a new issue