From 6d11e59250fd13992dc6901b4c6fddaf5f17185e Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 14 Aug 2019 07:46:38 -0700 Subject: [PATCH 1/8] fix to avoid potential linear behavior in page collect --- include/mimalloc-internal.h | 2 +- src/heap.c | 4 +-- src/page.c | 50 ++++++++++++++++++++----------------- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 151cd001..f6f2e2ae 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -77,7 +77,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay); size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); void _mi_deferred_free(mi_heap_t* heap, bool force); -void _mi_page_free_collect(mi_page_t* page); +void _mi_page_free_collect(mi_page_t* page,bool force); void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments size_t _mi_bin_size(uint8_t bin); // for stats diff --git a/src/heap.c b/src/heap.c index c18902b1..768cab96 100644 --- a/src/heap.c +++ b/src/heap.c @@ -85,7 +85,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t UNUSED(arg2); UNUSED(heap); mi_collect_t collect = *((mi_collect_t*)arg_collect); - _mi_page_free_collect(page); + _mi_page_free_collect(page, collect >= ABANDON); if (mi_page_all_free(page)) { // no more used blocks, free the page. TODO: should we retire here and be less aggressive? _mi_page_free(page, pq, collect != NORMAL); @@ -428,7 +428,7 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v mi_assert(page != NULL); if (page == NULL) return true; - _mi_page_free_collect(page); + _mi_page_free_collect(page,true); mi_assert_internal(page->local_free == NULL); if (page->used == 0) return true; diff --git a/src/page.c b/src/page.c index 7ac7535e..9d645b6c 100644 --- a/src/page.c +++ b/src/page.c @@ -137,7 +137,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay ) { // Note: The exchange must be done atomically as this is used right after // moving to the full list in `mi_page_collect_ex` and we need to // ensure that there was no race where the page became unfull just before the move. -static void mi_page_thread_free_collect(mi_page_t* page) +static void _mi_page_thread_free_collect(mi_page_t* page) { mi_block_t* head; mi_thread_free_t tfree; @@ -152,47 +152,51 @@ static void mi_page_thread_free_collect(mi_page_t* page) if (head == NULL) return; // find the tail - uint16_t count = 1; + uintptr_t count = 1; mi_block_t* tail = head; mi_block_t* next; while ((next = mi_block_next(page,tail)) != NULL) { count++; tail = next; } - - // and prepend to the free list - mi_block_set_next(page,tail, page->free); - page->free = head; + // and append the current local free list + mi_block_set_next(page,tail, page->local_free); + page->local_free = head; // update counts now mi_atomic_subtract(&page->thread_freed, count); page->used -= count; } -void _mi_page_free_collect(mi_page_t* page) { +void _mi_page_free_collect(mi_page_t* page, bool force) { mi_assert_internal(page!=NULL); - //if (page->free != NULL) return; // avoid expensive append - // free the local free list + // collect the thread free list + if (force || mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation + _mi_page_thread_free_collect(page); + } + + // and the local free list if (page->local_free != NULL) { - if (mi_likely(page->free == NULL)) { + if (mi_unlikely(page->free == NULL)) { // usual case page->free = page->local_free; + page->local_free = NULL; } - else { - mi_block_t* tail = page->free; + else if (force) { + // append -- only on shutdown (force) as this is a linear operation + mi_block_t* tail = page->local_free; mi_block_t* next; while ((next = mi_block_next(page, tail)) != NULL) { tail = next; } - mi_block_set_next(page, tail, page->local_free); - } - page->local_free = NULL; - } - // and the thread free list - if (mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation - mi_page_thread_free_collect(page); + mi_block_set_next(page, tail, page->free); + page->free = page->local_free; + page->local_free = NULL; + } } + + mi_assert_internal(!force || page->local_free == NULL); } @@ -205,7 +209,7 @@ void _mi_page_free_collect(mi_page_t* page) { void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); mi_assert_internal(page->heap == NULL); - _mi_page_free_collect(page); + _mi_page_free_collect(page,false); mi_page_queue_t* pq = mi_page_queue(heap, page->block_size); mi_page_queue_push(heap, pq, page); mi_assert_expensive(_mi_page_is_valid(page)); @@ -304,7 +308,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { if (mi_page_is_in_full(page)) return; mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page); - mi_page_thread_free_collect(page); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set + _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set } @@ -595,7 +599,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p count++; // 0. collect freed blocks by us and other threads - _mi_page_free_collect(page); + _mi_page_free_collect(page,false); // 1. if the page contains free blocks, we are done if (mi_page_immediate_available(page)) { @@ -662,7 +666,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { mi_assert_internal(mi_page_immediate_available(page)); } else { - _mi_page_free_collect(page); + _mi_page_free_collect(page,false); } if (mi_page_immediate_available(page)) { return page; // fast path From 42dedb00ec65f5497d1ff29b6120e113ac31fbe5 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 19 Aug 2019 11:10:06 -0700 Subject: [PATCH 2/8] initial implementation of mi_os_reserve_huge_pages --- include/mimalloc-atomic.h | 3 + include/mimalloc-internal.h | 2 + include/mimalloc.h | 4 +- src/options.c | 2 +- src/os.c | 174 +++++++++++++++++++++++++++++++----- src/stats.c | 14 +-- test/test-stress.c | 3 + 7 files changed, 169 insertions(+), 33 deletions(-) diff --git a/include/mimalloc-atomic.h b/include/mimalloc-atomic.h index d504634c..b20f47b6 100644 --- a/include/mimalloc-atomic.h +++ b/include/mimalloc-atomic.h @@ -68,6 +68,9 @@ static inline void* mi_atomic_exchange_ptr(volatile void** p, void* exchange) { return (void*)mi_atomic_exchange((volatile uintptr_t*)p, (uintptr_t)exchange); } +static inline intptr_t mi_atomic_iread(volatile intptr_t* p) { + return (intptr_t)mi_atomic_read( (volatile uintptr_t*)p ); +} #ifdef _MSC_VER #define WIN32_LEAN_AND_MEAN diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index f6f2e2ae..9fc6ed5d 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -91,6 +91,8 @@ uintptr_t _mi_heap_random(mi_heap_t* heap); // "stats.c" void _mi_stats_done(mi_stats_t* stats); +double _mi_clock_end(double start); +double _mi_clock_start(void); // "alloc.c" void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic` diff --git a/include/mimalloc.h b/include/mimalloc.h index c6b7b5f8..4e82548a 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -195,7 +195,7 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_ mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; - +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs) mi_attr_noexcept; // ------------------------------------------------------ // Convenience @@ -227,7 +227,7 @@ typedef enum mi_option_e { mi_option_secure, mi_option_eager_commit, mi_option_eager_region_commit, - mi_option_large_os_pages, // implies eager commit + mi_option_large_os_pages, // implies eager commit mi_option_page_reset, mi_option_cache_reset, mi_option_reset_decommits, diff --git a/src/options.c b/src/options.c index cd7e5da1..339a7546 100644 --- a/src/options.c +++ b/src/options.c @@ -53,7 +53,7 @@ static mi_option_desc_t options[_mi_option_last] = // the following options are experimental and not all combinations make sense. { 1, UNINIT, "eager_commit" }, // note: if eager_region_commit is on, this should be on too. #ifdef _WIN32 // and BSD? - { 0, UNINIT, "eager_region_commit" }, // don't commit too eagerly on windows (just for looks...) + { 1, UNINIT, "eager_region_commit" }, #else { 1, UNINIT, "eager_region_commit" }, #endif diff --git a/src/os.c b/src/os.c index bee5ac64..83dd37d7 100644 --- a/src/os.c +++ b/src/os.c @@ -34,6 +34,9 @@ terms of the MIT license. A copy of the license can be found in the file ----------------------------------------------------------- */ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +static bool mi_os_is_huge_reserved(void* p); +static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit); + static void* mi_align_up_ptr(void* p, size_t alignment) { return (void*)_mi_align_up((uintptr_t)p, alignment); } @@ -161,7 +164,7 @@ void _mi_os_init() { static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats) { - if (addr == NULL || size == 0) return true; + if (addr == NULL || size == 0 || mi_os_is_huge_reserved(addr)) return true; bool err = false; #if defined(_WIN32) err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); @@ -237,12 +240,13 @@ static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) { return (void*)aligned_base; } #else -static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { +#define MI_OS_USE_MMAP +static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { void* p = NULL; #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) // on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations static volatile intptr_t aligned_base = ((intptr_t)1 << 42); // starting at 4TiB - if (try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) { + if (addr==NULL && try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) { intptr_t hint = mi_atomic_add(&aligned_base,size) - size; if (hint%try_alignment == 0) { p = mmap((void*)hint,size,protect_flags,flags,fd,0); @@ -251,7 +255,8 @@ static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags, } #endif if (p==NULL) { - p = mmap(NULL,size,protect_flags,flags,fd,0); + p = mmap(addr,size,protect_flags,flags,fd,0); + if (p==MAP_FAILED) p = NULL; } return p; } @@ -305,19 +310,15 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) #endif if (lflags != flags) { // try large OS page allocation - p = mi_unix_mmapx(size, try_alignment, protect_flags, lflags, lfd); - if (p == MAP_FAILED) { + p = mi_unix_mmapx(NULL, size, try_alignment, protect_flags, lflags, lfd); + if (p == NULL) { mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations - p = NULL; // and fall back to regular mmap } } } } if (p == NULL) { - p = mi_unix_mmapx(size, try_alignment, protect_flags, flags, fd); - if (p == MAP_FAILED) { - p = NULL; - } + p = mi_unix_mmapx(NULL,size, try_alignment, protect_flags, flags, fd); #if defined(MADV_HUGEPAGE) // Many Linux systems don't allow MAP_HUGETLB but they support instead // transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE @@ -325,7 +326,7 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) // in that case -- in particular for our large regions (in `memory.c`). // However, some systems only allow TPH if called with explicit `madvise`, so // when large OS pages are enabled for mimalloc, we call `madvice` anyways. - else if (use_large_os_page(size, try_alignment)) { + if (use_large_os_page(size, try_alignment)) { madvise(p, size, MADV_HUGEPAGE); } #endif @@ -340,17 +341,19 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); if (size == 0) return NULL; - void* p = NULL; -#if defined(_WIN32) - int flags = MEM_RESERVE; - if (commit) flags |= MEM_COMMIT; - p = mi_win_virtual_alloc(NULL, size, try_alignment, flags); -#elif defined(__wasi__) - p = mi_wasm_heap_grow(size, try_alignment); -#else - int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); - p = mi_unix_mmap(size, try_alignment, protect_flags); -#endif + void* p = mi_os_alloc_from_huge_reserved(size,try_alignment,commit); + if (p != NULL) return p; + + #if defined(_WIN32) + int flags = MEM_RESERVE; + if (commit) flags |= MEM_COMMIT; + p = mi_win_virtual_alloc(NULL, size, try_alignment, flags); + #elif defined(__wasi__) + p = mi_wasm_heap_grow(size, try_alignment); + #else + int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); + p = mi_unix_mmap(size, try_alignment, protect_flags); + #endif _mi_stat_increase(&stats->mmap_calls, 1); if (p != NULL) { _mi_stat_increase(&stats->reserved, size); @@ -664,3 +667,128 @@ bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) { return mi_os_mem_free(start, size, stats); #endif } + + +/* ---------------------------------------------------------------------------- + +-----------------------------------------------------------------------------*/ +#define MI_HUGE_OS_PAGE_SIZE ((size_t)1 << 30) // 1GiB + +typedef struct mi_huge_info_s { + uint8_t* start; + ptrdiff_t reserved; + volatile ptrdiff_t used; +} mi_huge_info_t; + +static mi_huge_info_t os_huge_reserved = { NULL, 0, 0 }; + +static bool mi_os_is_huge_reserved(void* p) { + return (os_huge_reserved.start != NULL && + (uint8_t*)p >= os_huge_reserved.start && + (uint8_t*)p < os_huge_reserved.start + os_huge_reserved.reserved); +} + +static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit) +{ + // only allow large aligned allocations + if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL; + if (try_alignment > MI_SEGMENT_SIZE) return NULL; + if (!commit) return NULL; + if (os_huge_reserved.start==NULL) return NULL; + if (mi_atomic_iread(&os_huge_reserved.used) >= os_huge_reserved.reserved) return NULL; // already full + + // always aligned + mi_assert_internal( os_huge_reserved.used % MI_SEGMENT_SIZE == 0 ); + mi_assert_internal( (uintptr_t)os_huge_reserved.start % MI_SEGMENT_SIZE == 0 ); + + // try to reserve space + ptrdiff_t next = mi_atomic_add( &os_huge_reserved.used, (ptrdiff_t)size ); + if (next > os_huge_reserved.reserved) { + // "free" our over-allocation + mi_atomic_add( &os_huge_reserved.used, -((ptrdiff_t)size) ); + return NULL; + } + + // success! + uint8_t* p = os_huge_reserved.start + next - (ptrdiff_t)size; + mi_assert_internal( (uintptr_t)p % MI_SEGMENT_SIZE == 0 ); + return p; +} + +/* +static void mi_os_free_huge_reserved() { + uint8_t* addr = os_huge_reserved.start; + size_t total = os_huge_reserved.reserved; + os_huge_reserved.reserved = 0; + os_huge_reserved.start = NULL; + for( size_t current = 0; current < total; current += MI_HUGE_OS_PAGE_SIZE) { + _mi_os_free(addr + current, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main); + } +} +*/ + +#if !(MI_INTPTR_SIZE >= 8 && (defined(_WIN32) || defined(MI_OS_USE_MMAP))) +int mi_reserve_huge_os_pages(size_t pages, size_t max_secs) { + return -2; // cannot allocate +} +#else +int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept +{ + if (max_secs==0) return -1; // timeout + if (pages==0) return 0; // ok + + // Allocate one page at the time but try to place them contiguously + // We allocate one page at the time to be able to abort if it takes too long + double start_t = _mi_clock_start(); + uint8_t* start = (uint8_t*)((uintptr_t)1 << 43); // 8TiB virtual start address + uint8_t* addr = start; // current top of the allocations + for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) { + void* p = NULL; + // OS specific calls to allocate huge OS pages + #ifdef _WIN32 + p = mi_win_virtual_allocx(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE); + #elif defined(MI_OS_USE_MMAP) && defined(MAP_HUGETLB) + int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB; + #ifdef MAP_HUGE_1GB + flags |= MAP_HUGE_1GB + #elif defined(MAP_HUGE_2MB) + flags |= MAP_HUGE_2MB; + #endif + p = mi_unix_mmapx(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_WRITE|PROT_READ, flags, -1); + #endif + // Did we succeed at a contiguous address? + if (p != addr) { + if (p != NULL) { + _mi_warning_message("could not allocate contiguous huge page at 0x%p\n", addr); + _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main ); + } + else { + #ifdef _WIN32 + int err = GetLastError(); + #else + int err = errno; + #endif + _mi_warning_message("could not allocate huge page at 0x%p, error: %i\n", addr, err); + } + return -2; + } + // success, record it + if (page==0) { + os_huge_reserved.start = addr; + } + os_huge_reserved.reserved += MI_HUGE_OS_PAGE_SIZE; + _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE ); + _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); + + // check for timeout + double elapsed = _mi_clock_end(start_t); + if (elapsed > max_secs) return (-1); // timeout + if (page >= 1) { + double estimate = ((elapsed / (double)(page+1)) * (double)pages); + if (estimate > 1.5*max_secs) return (-1); // seems like we are going to timeout + } + } + return 0; +} +#endif + diff --git a/src/stats.c b/src/stats.c index e7d398b2..ba06b10c 100644 --- a/src/stats.c +++ b/src/stats.c @@ -276,8 +276,8 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n _mi_fprintf(out,"\n"); } -static double mi_clock_end(double start); -static double mi_clock_start(void); +double _mi_clock_end(double start); +double _mi_clock_start(void); static double mi_time_start = 0.0; static mi_stats_t* mi_stats_get_default(void) { @@ -289,7 +289,7 @@ void mi_stats_reset(void) mi_attr_noexcept { mi_stats_t* stats = mi_stats_get_default(); if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); } memset(&_mi_stats_main, 0, sizeof(mi_stats_t)); - mi_time_start = mi_clock_start(); + mi_time_start = _mi_clock_start(); } static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) { @@ -301,11 +301,11 @@ static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) { } void mi_stats_print(FILE* out) mi_attr_noexcept { - mi_stats_print_ex(mi_stats_get_default(),mi_clock_end(mi_time_start),out); + mi_stats_print_ex(mi_stats_get_default(),_mi_clock_end(mi_time_start),out); } void mi_thread_stats_print(FILE* out) mi_attr_noexcept { - _mi_stats_print(mi_stats_get_default(), mi_clock_end(mi_time_start), out); + _mi_stats_print(mi_stats_get_default(), _mi_clock_end(mi_time_start), out); } @@ -350,7 +350,7 @@ static double mi_clock_now(void) { static double mi_clock_diff = 0.0; -static double mi_clock_start(void) { +double _mi_clock_start(void) { if (mi_clock_diff == 0.0) { double t0 = mi_clock_now(); mi_clock_diff = mi_clock_now() - t0; @@ -358,7 +358,7 @@ static double mi_clock_start(void) { return mi_clock_now(); } -static double mi_clock_end(double start) { +double _mi_clock_end(double start) { double end = mi_clock_now(); return (end - start - mi_clock_diff); } diff --git a/test/test-stress.c b/test/test-stress.c index 511679ac..2b799f33 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -154,6 +154,9 @@ int main(int argc, char** argv) { if (n > 0) N = n; } printf("start with %i threads with a %i%% load-per-thread\n", THREADS, N); + int res = mi_reserve_huge_os_pages(4,1); + printf("(reserve huge: %i\n)", res); + //bench_start_program(); memset((void*)transfer, 0, TRANSFERS*sizeof(void*)); run_os_threads(THREADS); From 741f37e1f0619085e8bbb65b4354caf03f70918a Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 19 Aug 2019 11:17:00 -0700 Subject: [PATCH 3/8] always check large_os_pages enabled option so it can be changed during the running of the program --- src/os.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/os.c b/src/os.c index 83dd37d7..93864e1f 100644 --- a/src/os.c +++ b/src/os.c @@ -70,7 +70,7 @@ size_t _mi_os_large_page_size() { static bool use_large_os_page(size_t size, size_t alignment) { // if we have access, check the size and alignment requirements - if (large_os_page_size == 0) return false; + if (large_os_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false; return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0); } From 0e639addb089a01424c589ba0c5de4ea46a1063b Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 19 Aug 2019 14:14:50 -0700 Subject: [PATCH 4/8] add environment option to reserve huge pages upfront --- include/mimalloc.h | 1 + src/init.c | 6 ++++++ src/options.c | 1 + src/os.c | 3 ++- 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 4e82548a..dea4b808 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -228,6 +228,7 @@ typedef enum mi_option_e { mi_option_eager_commit, mi_option_eager_region_commit, mi_option_large_os_pages, // implies eager commit + mi_option_reserve_huge_os_pages, mi_option_page_reset, mi_option_cache_reset, mi_option_reset_decommits, diff --git a/src/init.c b/src/init.c index 4fc5d602..4cb12806 100644 --- a/src/init.c +++ b/src/init.c @@ -422,6 +422,12 @@ static void mi_process_load(void) { const char* msg = NULL; mi_allocator_init(&msg); if (msg != NULL) _mi_verbose_message(msg); + + if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + size_t pages = mi_option_get(mi_option_reserve_huge_os_pages); + double max_secs = (double)pages / 10.0; // 0.1s per page + mi_reserve_huge_os_pages(pages, max_secs); + } } // Initialize the process; called by thread_init or the process loader diff --git a/src/options.c b/src/options.c index 339a7546..1890c865 100644 --- a/src/options.c +++ b/src/options.c @@ -58,6 +58,7 @@ static mi_option_desc_t options[_mi_option_last] = { 1, UNINIT, "eager_region_commit" }, #endif { 0, UNINIT, "large_os_pages" }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's + { 0, UNINIT, "reserve_huge_os_pages" }, { 0, UNINIT, "page_reset" }, { 0, UNINIT, "cache_reset" }, { 0, UNINIT, "reset_decommits" }, // note: cannot enable this if secure is on diff --git a/src/os.c b/src/os.c index 93864e1f..2f2e5398 100644 --- a/src/os.c +++ b/src/os.c @@ -108,7 +108,7 @@ void _mi_os_init(void) { } // Try to see if large OS pages are supported unsigned long err = 0; - bool ok = mi_option_is_enabled(mi_option_large_os_pages); + bool ok = mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages); if (ok) { // To use large pages on Windows, we first need access permission // Set "Lock pages in memory" permission in the group policy editor @@ -788,6 +788,7 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept if (estimate > 1.5*max_secs) return (-1); // seems like we are going to timeout } } + _mi_verbose_message("reserved %zu huge pages\n", pages); return 0; } #endif From a8b24472cbf425b5c73879dab2506280aef6a633 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 19 Aug 2019 18:16:12 -0700 Subject: [PATCH 5/8] use 1GiB huge pages on windows when reserving upfront --- src/os.c | 132 +++++++++++++++++++++++++++------------------ test/test-stress.c | 4 +- 2 files changed, 81 insertions(+), 55 deletions(-) diff --git a/src/os.c b/src/os.c index 2f2e5398..fb4d9b95 100644 --- a/src/os.c +++ b/src/os.c @@ -84,11 +84,13 @@ static size_t mi_os_good_alloc_size(size_t size, size_t alignment) { #if defined(_WIN32) // We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016. // So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility) -// Same for DiscardVirtualMemory. (hide MEM_EXTENDED_PARAMETER to compile with older SDK's) -typedef PVOID(__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG); -typedef DWORD(__stdcall *PDiscardVirtualMemory)(PVOID,SIZE_T); +// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB) +// We hide MEM_EXTENDED_PARAMETER to compile with older SDK's. +#include +typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG); +typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ PVOID, ULONG); static PVirtualAlloc2 pVirtualAlloc2 = NULL; -static PDiscardVirtualMemory pDiscardVirtualMemory = NULL; +static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; void _mi_os_init(void) { // get the page size @@ -103,7 +105,11 @@ void _mi_os_init(void) { // use VirtualAlloc2FromApp if possible as it is available to Windows store apps pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2FromApp"); if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2"); - pDiscardVirtualMemory = (PDiscardVirtualMemory)GetProcAddress(hDll, "DiscardVirtualMemory"); + FreeLibrary(hDll); + } + hDll = LoadLibrary(TEXT("ntdll.dll")); + if (hDll != NULL) { + pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); FreeLibrary(hDll); } // Try to see if large OS pages are supported @@ -188,25 +194,46 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats) #ifdef _WIN32 static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) { #if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) + // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages + if ((size % (uintptr_t)1 << 20) == 0 /* 1GiB multiple */ + && (flags & MEM_LARGE_PAGES) != 0 && (flags & MEM_COMMIT) != 0 + && (addr != NULL || try_alignment == 0 || try_alignment % _mi_os_page_size() == 0) + && pNtAllocateVirtualMemoryEx != NULL) + { + #ifndef MEM_EXTENDED_PARAMETER_NONPAGED_HUGE + #define MEM_EXTENDED_PARAMETER_NONPAGED_HUGE (0x10) + #endif + MEM_EXTENDED_PARAMETER param = { 0, 0 }; + param.Type = 5; // == MemExtendedParameterAttributeFlags; + param.ULong64 = MEM_EXTENDED_PARAMETER_NONPAGED_HUGE; + SIZE_T psize = size; + void* base = addr; + NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags | MEM_RESERVE, PAGE_READWRITE, ¶m, 1); + if (err == 0) { + return base; + } + // else fall back to regular large OS pages + } + + // on modern Windows try use VirtualAlloc2 for aligned allocation if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { - // on modern Windows try use VirtualAlloc2 for aligned allocation MEM_ADDRESS_REQUIREMENTS reqs = { 0 }; reqs.Alignment = try_alignment; MEM_EXTENDED_PARAMETER param = { 0 }; param.Type = MemExtendedParameterAddressRequirements; param.Pointer = &reqs; - return (*pVirtualAlloc2)(addr, NULL, size, flags, PAGE_READWRITE, ¶m, 1); + return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1); } #endif return VirtualAlloc(addr, size, flags, PAGE_READWRITE); } -static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags) { +static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only) { static volatile uintptr_t large_page_try_ok = 0; void* p = NULL; - if (use_large_os_page(size, try_alignment)) { + if (large_only || use_large_os_page(size, try_alignment)) { uintptr_t try_ok = mi_atomic_read(&large_page_try_ok); - if (try_ok > 0) { + if (!large_only && try_ok > 0) { // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. mi_atomic_compare_exchange(&large_page_try_ok, try_ok - 1, try_ok); @@ -214,6 +241,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, else { // large OS pages must always reserve and commit. p = mi_win_virtual_allocx(addr, size, try_alignment, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE | flags); + if (large_only) return p; // fall back to non-large page allocation on error (`p == NULL`). if (p == NULL) { mi_atomic_write(&large_page_try_ok,10); // on error, don't try again for the next N allocations @@ -261,7 +289,7 @@ static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int pr return p; } -static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) { +static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only) { void* p = NULL; #if !defined(MAP_ANONYMOUS) #define MAP_ANONYMOUS MAP_ANON @@ -283,10 +311,10 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) fd = VM_MAKE_TAG(100); #endif - if (use_large_os_page(size, try_alignment)) { + if (large_only || use_large_os_page(size, try_alignment)) { static volatile uintptr_t large_page_try_ok = 0; uintptr_t try_ok = mi_atomic_read(&large_page_try_ok); - if (try_ok > 0) { + if (!large_only && try_ok > 0) { // If the OS is not configured for large OS pages, or the user does not have // enough permission, the `mmap` will always fail (but it might also fail for other reasons). // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times @@ -302,15 +330,24 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) #ifdef MAP_HUGETLB lflags |= MAP_HUGETLB; #endif - #ifdef MAP_HUGE_2MB - lflags |= MAP_HUGE_2MB; + #ifdef MAP_HUGE_1GB + if ((size % (uintptr_t)1 << 20) == 0) { + lflags |= MAP_HUGE_1GB; + } + else #endif + { + #ifdef MAP_HUGE_2MB + lflags |= MAP_HUGE_2MB; + #endif + } #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB; #endif - if (lflags != flags) { + if (large_only || lflags != flags) { // try large OS page allocation - p = mi_unix_mmapx(NULL, size, try_alignment, protect_flags, lflags, lfd); + p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd); + if (large_only) return p; if (p == NULL) { mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations } @@ -318,7 +355,7 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) } } if (p == NULL) { - p = mi_unix_mmapx(NULL,size, try_alignment, protect_flags, flags, fd); + p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd); #if defined(MADV_HUGEPAGE) // Many Linux systems don't allow MAP_HUGETLB but they support instead // transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE @@ -341,18 +378,18 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); if (size == 0) return NULL; - void* p = mi_os_alloc_from_huge_reserved(size,try_alignment,commit); + void* p = mi_os_alloc_from_huge_reserved(size, try_alignment, commit); if (p != NULL) return p; - + #if defined(_WIN32) int flags = MEM_RESERVE; if (commit) flags |= MEM_COMMIT; - p = mi_win_virtual_alloc(NULL, size, try_alignment, flags); + p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false); #elif defined(__wasi__) p = mi_wasm_heap_grow(size, try_alignment); #else int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); - p = mi_unix_mmap(size, try_alignment, protect_flags); + p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false); #endif _mi_stat_increase(&stats->mmap_calls, 1); if (p != NULL) { @@ -402,7 +439,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, // otherwise free and allocate at an aligned address in there mi_os_mem_free(p, over_size, stats); void* aligned_p = mi_align_up_ptr(p, alignment); - p = mi_win_virtual_alloc(aligned_p, size, alignment, flags); + p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false); if (p == aligned_p) break; // success! if (p != NULL) { // should not happen? mi_os_mem_free(p, size, stats); @@ -557,18 +594,9 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) #endif #if defined(_WIN32) - // Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory - // (but this is for an access pattern that immediately reuses the memory) - if (mi_option_is_enabled(mi_option_reset_discards) && pDiscardVirtualMemory != NULL) { - DWORD ok = (*pDiscardVirtualMemory)(start, csize); - mi_assert_internal(ok == ERROR_SUCCESS); - if (ok != ERROR_SUCCESS) return false; - } - else { - void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE); - mi_assert_internal(p == start); - if (p != start) return false; - } + void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE); + mi_assert_internal(p == start); + if (p != start) return false; #else #if defined(MADV_FREE) static int advice = MADV_FREE; @@ -736,30 +764,28 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept { if (max_secs==0) return -1; // timeout if (pages==0) return 0; // ok - + if (os_huge_reserved.start != NULL) return -2; // already reserved + // Allocate one page at the time but try to place them contiguously // We allocate one page at the time to be able to abort if it takes too long double start_t = _mi_clock_start(); uint8_t* start = (uint8_t*)((uintptr_t)1 << 43); // 8TiB virtual start address uint8_t* addr = start; // current top of the allocations for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) { - void* p = NULL; - // OS specific calls to allocate huge OS pages + // allocate lorgu pages + void* p = NULL; #ifdef _WIN32 - p = mi_win_virtual_allocx(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE); - #elif defined(MI_OS_USE_MMAP) && defined(MAP_HUGETLB) - int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB; - #ifdef MAP_HUGE_1GB - flags |= MAP_HUGE_1GB - #elif defined(MAP_HUGE_2MB) - flags |= MAP_HUGE_2MB; - #endif - p = mi_unix_mmapx(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_WRITE|PROT_READ, flags, -1); - #endif + p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true); + #elif defined(MI_OS_USE_MMAP) + p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true); + #else + // always fail + #endif + // Did we succeed at a contiguous address? if (p != addr) { if (p != NULL) { - _mi_warning_message("could not allocate contiguous huge page at 0x%p\n", addr); + _mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr); _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main ); } else { @@ -768,7 +794,7 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept #else int err = errno; #endif - _mi_warning_message("could not allocate huge page at 0x%p, error: %i\n", addr, err); + _mi_warning_message("could not allocate huge page %zu at 0x%p, error: %i\n", page, addr, err); } return -2; } @@ -777,9 +803,9 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept os_huge_reserved.start = addr; } os_huge_reserved.reserved += MI_HUGE_OS_PAGE_SIZE; - _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE ); - _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); - + _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); + _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); + // check for timeout double elapsed = _mi_clock_end(start_t); if (elapsed > max_secs) return (-1); // timeout diff --git a/test/test-stress.c b/test/test-stress.c index 2b799f33..ad487538 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -154,8 +154,8 @@ int main(int argc, char** argv) { if (n > 0) N = n; } printf("start with %i threads with a %i%% load-per-thread\n", THREADS, N); - int res = mi_reserve_huge_os_pages(4,1); - printf("(reserve huge: %i\n)", res); + //int res = mi_reserve_huge_os_pages(4,1); + //printf("(reserve huge: %i\n)", res); //bench_start_program(); memset((void*)transfer, 0, TRANSFERS*sizeof(void*)); From 228c5e5507ef93e460329801df672609aa38adf5 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 19 Aug 2019 18:20:51 -0700 Subject: [PATCH 6/8] allow larger large objects to better use segment space --- include/mimalloc-types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 94188cc0..c0778f87 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -93,7 +93,7 @@ terms of the MIT license. A copy of the license can be found in the file #define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) #define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb on 64-bit -#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/4) // 1Mb on 64-bit +#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2Mb on 64-bit #define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX>>MI_INTPTR_SHIFT) #define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c) From d81b800e126650595bff34b5e208802e8ac645b0 Mon Sep 17 00:00:00 2001 From: daan Date: Tue, 20 Aug 2019 08:58:53 -0700 Subject: [PATCH 7/8] optimize stat counter increase --- src/stats.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/stats.c b/src/stats.c index e7d398b2..39015f94 100644 --- a/src/stats.c +++ b/src/stats.c @@ -28,11 +28,14 @@ void _mi_stats_done(mi_stats_t* stats) { Statistics operations ----------------------------------------------------------- */ +static bool mi_is_in_main(void* stat) { + return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main + && (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t))); +} + static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { if (amount == 0) return; - bool in_main = ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main - && (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t))); - if (in_main) + if (mi_is_in_main(stat)) { // add atomically (for abandoned pages) int64_t current = mi_atomic_add(&stat->current,amount); @@ -58,11 +61,16 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { } void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { - mi_atomic_add( &stat->count, 1 ); - mi_atomic_add( &stat->total, (int64_t)amount ); + if (mi_is_in_main(stat)) { + mi_atomic_add( &stat->count, 1 ); + mi_atomic_add( &stat->total, (int64_t)amount ); + } + else { + stat->count++; + stat->total += amount; + } } - void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, (int64_t)amount); } From ff88361329d16dc629c61abe63a285e5c977ce80 Mon Sep 17 00:00:00 2001 From: daan Date: Tue, 20 Aug 2019 09:45:50 -0700 Subject: [PATCH 8/8] lower block size for keeping retired pages --- src/page.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/page.c b/src/page.c index 41e41b89..549ced38 100644 --- a/src/page.c +++ b/src/page.c @@ -71,7 +71,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) { mi_assert_internal(page->block_size > 0); mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->capacity <= page->reserved); - + mi_segment_t* segment = _mi_page_segment(page); uint8_t* start = _mi_page_start(segment,page,NULL); mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL)); @@ -390,7 +390,7 @@ void _mi_page_retire(mi_page_t* page) { // is the only page left with free blocks. It is not clear // how to check this efficiently though... for now we just check // if its neighbours are almost fully used. - if (mi_likely(page->block_size <= MI_MEDIUM_OBJ_SIZE_MAX)) { + if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) { if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) { _mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1); return; // dont't retire after all @@ -734,10 +734,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept // call potential deferred free routines _mi_deferred_free(heap, false); - + // free delayed frees from other threads _mi_heap_delayed_free(heap); - + // huge allocation? mi_page_t* page; if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {