diff --git a/CMakeLists.txt b/CMakeLists.txt index 72f83e17..09204a08 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,7 +43,7 @@ set(mi_sources src/init.c) # ----------------------------------------------------------------------------- -# Converience: set default build type depending on the build directory +# Convenience: set default build type depending on the build directory # ----------------------------------------------------------------------------- if (NOT CMAKE_BUILD_TYPE) @@ -224,7 +224,7 @@ else() endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) -if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel)$")) +if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")) set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version endif() if(MI_BUILD_SHARED) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index bb76b4a3..8cb49a61 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -463,7 +463,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) { return _mi_segment_page_of(_mi_ptr_segment(p), p); } -// Get the block size of a page (special cased for huge objects) +// Get the block size of a page (special case for huge objects) static inline size_t mi_page_block_size(const mi_page_t* page) { const size_t bsize = page->xblock_size; mi_assert_internal(bsize > 0); diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 2118dfbe..82d74f7f 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -105,11 +105,11 @@ terms of the MIT license. A copy of the license can be found in the file // Main tuning parameters for segment and page sizes // Sizes for 64-bit, divide by two for 32-bit -#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64kb -#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 8mb +#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB +#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 8MiB -#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64kb -#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512kb +#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB +#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB // Derived constants @@ -122,12 +122,12 @@ terms of the MIT license. A copy of the license can be found in the file #define MI_SMALL_PAGE_SIZE (1ULL<16mb) are put into a single page in a single segment. + MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment. } mi_page_kind_t; typedef enum mi_segment_kind_e { @@ -355,7 +355,7 @@ typedef struct mi_random_cxt_s { } mi_random_ctx_t; -// In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows +// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows #if (MI_PADDING) typedef struct mi_padding_s { uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) diff --git a/src/arena.c b/src/arena.c index 3f93c590..abc8b7ec 100644 --- a/src/arena.c +++ b/src/arena.c @@ -345,7 +345,7 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024)); return ENOMEM; } - _mi_verbose_message("reserved %zu kb memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : ""); + _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : ""); return 0; } @@ -389,10 +389,10 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec size_t pages_reserved = 0; void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize); if (p==NULL || pages_reserved==0) { - _mi_warning_message("failed to reserve %zu gb huge pages\n", pages); + _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages); return ENOMEM; } - _mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages); + _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); if (!mi_manage_os_memory(p, hsize, true, true, true, numa_node)) { _mi_os_free_huge_pages(p, hsize, &_mi_stats_main); diff --git a/src/heap.c b/src/heap.c index cba0fd06..1831fce8 100644 --- a/src/heap.c +++ b/src/heap.c @@ -333,7 +333,7 @@ void mi_heap_destroy(mi_heap_t* heap) { Safe Heap delete ----------------------------------------------------------- */ -// Tranfer the pages from one heap to the other +// Transfer the pages from one heap to the other static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { mi_assert_internal(heap!=NULL); if (from==NULL || from->page_count == 0) return; diff --git a/src/os.c b/src/os.c index a79f7d7d..122d7669 100644 --- a/src/os.c +++ b/src/os.c @@ -204,7 +204,7 @@ void _mi_os_init(void) { } #elif defined(__wasi__) void _mi_os_init() { - os_page_size = 0x10000; // WebAssembly has a fixed page size: 64KB + os_page_size = 0x10000; // WebAssembly has a fixed page size: 64KiB os_alloc_granularity = 16; } #else @@ -638,6 +638,10 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, mi_os_mem_free(p, over_size, commit, stats); void* aligned_p = mi_align_up_ptr(p, alignment); p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large); + if (p != NULL) { + _mi_stat_increase(&stats->reserved, size); + if (commit) { _mi_stat_increase(&stats->committed, size); } + } if (p == aligned_p) break; // success! if (p != NULL) { // should not happen? mi_os_mem_free(p, size, commit, stats); @@ -1024,7 +1028,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) else { // fall back to regular large pages mi_huge_pages_available = false; // don't try further huge pages - _mi_warning_message("unable to allocate using huge (1gb) pages, trying large (2mb) pages instead (status 0x%lx)\n", err); + _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err); } } // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation @@ -1067,7 +1071,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) // see: long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); if (err != 0) { - _mi_warning_message("failed to bind huge (1gb) pages to numa node %d: %s\n", numa_node, strerror(errno)); + _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d: %s\n", numa_node, strerror(errno)); } } return p; diff --git a/src/page.c b/src/page.c index 82d5dd65..2a53b0b5 100644 --- a/src/page.c +++ b/src/page.c @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file /* ----------------------------------------------------------- The core of the allocator. Every segment contains - pages of a {certain block size. The main function + pages of a certain block size. The main function exported is `mi_malloc_generic`. ----------------------------------------------------------- */ diff --git a/src/random.c b/src/random.c index a9e4c9cc..ce6db7c6 100644 --- a/src/random.c +++ b/src/random.c @@ -198,8 +198,10 @@ static bool os_random_buf(void* buf, size_t buf_len) { arc4random_buf(buf, buf_len); return true; } -#elif defined(__linux__) +#elif defined(__linux__) || defined(__HAIKU__) +#if defined(__linux__) #include +#endif #include #include #include diff --git a/src/segment.c b/src/segment.c index c6036c4a..6ae3d9af 100644 --- a/src/segment.c +++ b/src/segment.c @@ -18,9 +18,6 @@ static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_st /* -------------------------------------------------------------------------------- Segment allocation - In any case the memory for a segment is virtual and usually committed on demand. - (i.e. we are careful to not touch the memory until we actually allocate a block there) - If a thread ends, it "abandons" pages with used blocks and there is an abandoned segment list whose segments can be reclaimed by still running threads, much like work-stealing. diff --git a/src/stats.c b/src/stats.c index 37f65da4..e5e26f76 100644 --- a/src/stats.c +++ b/src/stats.c @@ -133,25 +133,29 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { // unit == 0: count as decimal // unit < 0 : count in binary static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) { - char buf[32]; + char buf[32]; buf[0] = 0; int len = 32; - const char* suffix = (unit <= 0 ? " " : "b"); + const char* suffix = (unit <= 0 ? " " : "B"); const int64_t base = (unit == 0 ? 1000 : 1024); if (unit>0) n *= unit; const int64_t pos = (n < 0 ? -n : n); if (pos < base) { - snprintf(buf, len, "%d %s ", (int)n, suffix); + if (n!=1 || suffix[0] != 'B') { // skip printing 1 B for the unit column + snprintf(buf, len, "%d %-3s", (int)n, (n==0 ? "" : suffix)); + } } else { - int64_t divider = base; - const char* magnitude = "k"; - if (pos >= divider*base) { divider *= base; magnitude = "m"; } - if (pos >= divider*base) { divider *= base; magnitude = "g"; } + int64_t divider = base; + const char* magnitude = "K"; + if (pos >= divider*base) { divider *= base; magnitude = "M"; } + if (pos >= divider*base) { divider *= base; magnitude = "G"; } const int64_t tens = (n / (divider/10)); const long whole = (long)(tens/10); const long frac1 = (long)(tens%10); - snprintf(buf, len, "%ld.%ld %s%s", whole, (frac1 < 0 ? -frac1 : frac1), magnitude, suffix); + char unitdesc[16]; + snprintf(unitdesc, 16, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix); + snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc); } _mi_fprintf(out, arg, (fmt==NULL ? "%11s" : fmt), buf); } @@ -221,7 +225,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* static void mi_print_header(mi_output_fun* out, void* arg ) { - _mi_fprintf(out, arg, "%10s: %10s %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); + _mi_fprintf(out, arg, "%10s: %10s %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); } #if MI_STAT>1 @@ -524,6 +528,7 @@ static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msec while (get_next_area_info(tid.team, &c, &mem) == B_OK) { *peak_rss += mem.ram_size; } + *page_faults = 0; #elif defined(__APPLE__) *peak_rss = rusage.ru_maxrss; // BSD reports in bytes struct mach_task_basic_info info; diff --git a/test/test-api.c b/test/test-api.c index 53e7f3d8..55c80431 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -83,7 +83,7 @@ int main(void) { void* p = mi_malloc(0); mi_free(p); }); CHECK_BODY("malloc-nomem1",{ - result = (mi_malloc(SIZE_MAX/2) == NULL); + result = (mi_malloc((size_t)PTRDIFF_MAX + (size_t)1) == NULL); }); CHECK_BODY("malloc-null",{ mi_free(NULL);