merge from dev

This commit is contained in:
daan 2020-03-16 16:07:52 -07:00
commit 2d52b967bc
9 changed files with 72 additions and 14 deletions

View file

@ -67,6 +67,11 @@ MI_ALLOC_API1(inline mi_decl_restrict void*, malloc_small, mi_heap_t*, heap, siz
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX);
#if (MI_PADDING)
if (size == 0) {
size = sizeof(void*);
}
#endif
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE MI_SOURCE_XARG);
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);

View file

@ -97,6 +97,7 @@ const mi_heap_t _mi_heap_empty = {
{ 0, 0 }, // keys
{ {0}, {0}, 0 },
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
false
};
@ -131,6 +132,7 @@ mi_heap_t _mi_heap_main = {
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
{ {0x846ca68b}, {0}, 0 }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap
false // can reclaim
};
@ -241,7 +243,9 @@ static bool _mi_heap_done(mi_heap_t* heap) {
mi_assert_internal(heap->tld->segments.count == 0);
_mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main);
}
#if (MI_DEBUG > 0)
#if 0
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
else {
_mi_heap_destroy_pages(heap);
mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
@ -483,6 +487,10 @@ static void mi_process_done(void) {
if (process_done) return;
process_done = true;
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
FlsSetValue(mi_fls_key, NULL); // don't call main-thread callback
FlsFree(mi_fls_key); // call thread-done on all threads to prevent dangling callback pointer if statically linked with a DLL; Issue #208
#endif
#ifndef NDEBUG
mi_collect(true);
#endif
@ -490,7 +498,7 @@ static void mi_process_done(void) {
mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);
}
mi_allocator_done();
mi_allocator_done();
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
os_preloading = true; // don't call the C runtime anymore
}

View file

@ -209,7 +209,12 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
// on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations
void* hint;
if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) {
return VirtualAlloc(hint, size, flags, PAGE_READWRITE);
void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
if (p != NULL) return p;
DWORD err = GetLastError();
if (err != ERROR_INVALID_ADDRESS) { // if linked with multiple instances, we may have tried to allocate at an already allocated area
return NULL;
}
}
#endif
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)

View file

@ -380,7 +380,8 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld);
}
#define MI_MAX_RETIRE_SIZE (4*MI_SMALL_SIZE_MAX)
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
#define MI_RETIRE_CYCLES (16)
// Retire a page with no more used blocks
// Important to not retire too quickly though as new
@ -405,7 +406,13 @@ void _mi_page_retire(mi_page_t* page) {
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 16;
page->retire_expire = MI_RETIRE_CYCLES;
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages;
mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
if (index < heap->page_retired_min) heap->page_retired_min = index;
if (index > heap->page_retired_max) heap->page_retired_max = index;
mi_assert_internal(mi_page_all_free(page));
return; // dont't free after all
}
@ -415,22 +422,32 @@ void _mi_page_retire(mi_page_t* page) {
}
// free retired pages: we don't need to look at the entire queues
// since we only retire pages that are the last one in a queue.
// since we only retire pages that are at the head position in a queue.
void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
for(mi_page_queue_t* pq = heap->pages; pq->block_size <= MI_MAX_RETIRE_SIZE; pq++) {
mi_page_t* page = pq->first;
size_t min = MI_BIN_FULL;
size_t max = 0;
for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
mi_page_queue_t* pq = &heap->pages[bin];
mi_page_t* page = pq->first;
if (page != NULL && page->retire_expire != 0) {
if (mi_page_all_free(page)) {
page->retire_expire--;
if (force || page->retire_expire == 0) {
_mi_page_free(pq->first, pq, force);
}
else {
// keep retired, update min/max
if (bin < min) min = bin;
if (bin > max) max = bin;
}
}
else {
page->retire_expire = 0;
}
}
}
heap->page_retired_min = min;
heap->page_retired_max = max;
}