diff --git a/src/arena.c b/src/arena.c index 7bf8099b..724fc52c 100644 --- a/src/arena.c +++ b/src/arena.c @@ -283,7 +283,7 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec _mi_warning_message("failed to reserve %zu gb huge pages\n", pages); return ENOMEM; } - _mi_verbose_message("reserved %zu gb huge pages on numa node %i (of the %zu gb requested)\n", pages_reserved, numa_node, pages); + _mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages); size_t bcount = mi_block_count_of_size(hsize); size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS); diff --git a/src/os.c b/src/os.c index b8dfaa70..970eeb94 100644 --- a/src/os.c +++ b/src/os.c @@ -851,7 +851,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) else { // fall back to regular large pages mi_huge_pages_available = false; // don't try further huge pages - _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err); + _mi_warning_message("unable to allocate using huge (1gb) pages, trying large (2mb) pages instead (status 0x%lx)\n", err); } } // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation @@ -892,7 +892,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) // see: long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); if (err != 0) { - _mi_warning_message("failed to bind huge (1GiB) pages to NUMA node %d: %s\n", numa_node, strerror(errno)); + _mi_warning_message("failed to bind huge (1gb) pages to numa node %d: %s\n", numa_node, strerror(errno)); } } return p; diff --git a/src/segment.c b/src/segment.c index c7a9662b..01a8a693 100644 --- a/src/segment.c +++ b/src/segment.c @@ -247,6 +247,7 @@ static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, m static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) { mi_assert_internal(page->is_reset); + mi_assert_internal(page->is_committed); mi_assert_internal(!segment->mem_is_fixed); page->is_reset = false; size_t psize; @@ -779,10 +780,14 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a // note: must come after setting `segment_in_use` to false but before block_size becomes 0 //mi_page_reset(segment, page, 0 /*used_size*/, tld); - // zero the page data, but not the segment fields and block_size (for page size calculations) + // zero the page data, but not the segment fields and capacity, and block_size (for page size calculations) uint32_t block_size = page->xblock_size; + uint16_t capacity = page->capacity; + uint16_t reserved = page->reserved; ptrdiff_t ofs = offsetof(mi_page_t,capacity); memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs); + page->capacity = capacity; + page->reserved = reserved; page->xblock_size = block_size; segment->used--; @@ -790,6 +795,9 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a if (allow_reset) { mi_pages_reset_add(segment, page, tld); } + + page->capacity = 0; // after reset there can be zero'd now + page->reserved = 0; } void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)