increase retire limit, collect retired pages

This commit is contained in:
daan 2020-01-03 17:06:41 -08:00
parent f9ca88f71c
commit eeb623e6af
3 changed files with 58 additions and 27 deletions

View file

@ -187,7 +187,8 @@ typedef struct mi_page_s {
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
bool is_zero; // `true` if the blocks in the free list are zero initialized
uint8_t is_zero:1; // `true` if the blocks in the free list are zero initialized
uint8_t retire_expire:7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
#ifdef MI_ENCODE_FREELIST

View file

@ -12,8 +12,12 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
0, false, false, false, false, 0, 0,
{ 0 }, false,
0, false, false, false, false,
0, // capacity
0, // reserved capacity
{ 0 }, // flags
false, // is_zero
0, // retire_expire
NULL, // free
#if MI_ENCODE_FREELIST
{ 0, 0 },
@ -83,11 +87,11 @@ const mi_heap_t _mi_heap_empty = {
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY,
ATOMIC_VAR_INIT(NULL),
0, // tid
0, // cookie
{ 0, 0 }, // keys
0, // tid
0, // cookie
{ 0, 0 }, // keys
{ {0}, {0}, 0 },
0,
0, // page count
false
};
@ -106,7 +110,7 @@ static mi_tld_t tld_main = {
{ MI_STATS_NULL } // stats
};
#if MI_INTPTR_SIZE==8
#if MI_INTPTR_SIZE==8
#define MI_INIT_COOKIE (0xCDCDCDCDCDCDCDCDUL)
#else
#define MI_INIT_COOKIE (0xCDCDCDCDUL)
@ -121,8 +125,8 @@ mi_heap_t _mi_heap_main = {
MI_INIT_COOKIE, // initial cookie
{ MI_INIT_COOKIE, MI_INIT_COOKIE }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
{ {0}, {0}, 0 }, // random
0, // page count
false // can reclaim
0, // page count
false // can reclaim
};
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
@ -136,7 +140,7 @@ mi_stats_t _mi_stats_main = { MI_STATS_NULL };
typedef struct mi_thread_data_s {
mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
mi_tld_t tld;
mi_tld_t tld;
} mi_thread_data_t;
// Initialize the thread local default heap, called from `mi_thread_init`
@ -158,7 +162,7 @@ static bool _mi_heap_init(void) {
mi_heap_t* heap = &td->heap;
memcpy(heap, &_mi_heap_empty, sizeof(*heap));
heap->thread_id = _mi_thread_id();
_mi_random_init(&heap->random);
_mi_random_init(&heap->random);
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->key[0] = _mi_heap_random_next(heap);
heap->key[1] = _mi_heap_random_next(heap);
@ -402,7 +406,7 @@ void mi_process_init(void) mi_attr_noexcept {
_mi_heap_main.thread_id = _mi_thread_id();
_mi_verbose_message("process init: 0x%zx\n", _mi_heap_main.thread_id);
_mi_random_init(&_mi_heap_main.random);
_mi_random_init(&_mi_heap_main.random);
#ifndef __APPLE__ // TODO: fix this? cannot update cookie if allocation already happened..
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
_mi_heap_main.key[0] = _mi_heap_random_next(&_mi_heap_main);

View file

@ -229,7 +229,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
mi_assert_internal(page->heap == NULL);
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_assert_internal(!page->is_reset);
mi_assert_internal(!page->is_reset);
_mi_page_free_collect(page,false);
mi_page_queue_t* pq = mi_page_queue(heap, page->block_size);
mi_page_queue_push(heap, pq, page);
@ -342,7 +342,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(page->heap != NULL);
#if MI_DEBUG > 1
mi_heap_t* pheap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap));
#endif
@ -392,7 +392,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_stat_decrease(&page->heap->tld->stats.huge, page->block_size);
}
}
// remove from the page list
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
@ -420,20 +420,40 @@ void _mi_page_retire(mi_page_t* page) {
// (or we end up retiring and re-allocating most of the time)
// NOTE: refine this more: we should not retire if this
// is the only page left with free blocks. It is not clear
// how to check this efficiently though...
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
// if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
if (pq->last==page && pq->first==page) {
if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
return; // dont't retire after all
page->retire_expire = 2;
mi_assert_internal(mi_page_all_free(page));
return; // dont't free after all
}
}
_mi_page_free(page, pq, false);
}
// free retired pages: we don't need to look at the entire queues
// since we only retire pages that are the last one in a queue.
static void mi_page_retired_collect(mi_heap_t* heap) {
for(mi_page_queue_t* pq = heap->pages; pq->block_size <= MI_SMALL_SIZE_MAX; pq++) {
mi_page_t* page = pq->first;
if (page != NULL && page->retire_expire != 0) {
if (mi_page_all_free(page)) {
page->retire_expire--;
if (page->retire_expire == 0) {
_mi_page_free(pq->first, pq, false);
}
}
else {
page->retire_expire = 0;
}
}
}
}
/* -----------------------------------------------------------
Initialize the initial free list in a page.
@ -499,7 +519,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co
}
// prepend to the free list (usually NULL)
mi_block_set_next(page, blocks[current], page->free); // end of the list
page->free = free_start;
page->free = free_start;
}
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t extend, mi_stats_t* const stats)
@ -513,15 +533,15 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
const size_t bsize = page->block_size;
mi_block_t* const start = mi_page_block_at(page, page_area, page->capacity);
// initialize a sequential free list
mi_block_t* const last = mi_page_block_at(page, page_area, page->capacity + extend - 1);
mi_block_t* const last = mi_page_block_at(page, page_area, page->capacity + extend - 1);
mi_block_t* block = start;
while(block <= last) {
mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
mi_block_set_next(page,block,next);
block = next;
}
}
// prepend to free list (usually `NULL`)
mi_block_set_next(page, last, page->free);
page->free = start;
@ -619,6 +639,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page->thread_freed == 0);
mi_assert_internal(page->next == NULL);
mi_assert_internal(page->prev == NULL);
mi_assert_internal(page->retire_expire == 0);
mi_assert_internal(!mi_page_has_aligned(page));
#if (MI_ENCODE_FREELIST)
mi_assert_internal(page->key != 0);
@ -699,8 +720,12 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
}
else {
mi_assert(pq->first == page);
page->retire_expire = 0;
}
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
// finally collect retired pages
mi_page_retired_collect(heap);
return page;
}
@ -719,6 +744,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
_mi_page_free_collect(page,false);
}
if (mi_page_immediate_available(page)) {
page->retire_expire = 0;
return page; // fast path
}
}
@ -759,7 +785,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
// that frees the block can free the whole page and segment directly.
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
if (page != NULL) {
mi_assert_internal(mi_page_immediate_available(page));
@ -777,7 +803,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
_mi_stat_increase(&heap->tld->stats.huge, block_size);
_mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
}
}
}
return page;
}