mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 22:49:32 +03:00
improve messages; fix reset size calculation on large pages
This commit is contained in:
parent
724602b78b
commit
8422ab125d
3 changed files with 12 additions and 4 deletions
|
@ -283,7 +283,7 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec
|
|||
_mi_warning_message("failed to reserve %zu gb huge pages\n", pages);
|
||||
return ENOMEM;
|
||||
}
|
||||
_mi_verbose_message("reserved %zu gb huge pages on numa node %i (of the %zu gb requested)\n", pages_reserved, numa_node, pages);
|
||||
_mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages);
|
||||
|
||||
size_t bcount = mi_block_count_of_size(hsize);
|
||||
size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
|
||||
|
|
4
src/os.c
4
src/os.c
|
@ -851,7 +851,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
|
|||
else {
|
||||
// fall back to regular large pages
|
||||
mi_huge_pages_available = false; // don't try further huge pages
|
||||
_mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err);
|
||||
_mi_warning_message("unable to allocate using huge (1gb) pages, trying large (2mb) pages instead (status 0x%lx)\n", err);
|
||||
}
|
||||
}
|
||||
// on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
|
||||
|
@ -892,7 +892,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
|
|||
// see: <https://lkml.org/lkml/2017/2/9/875>
|
||||
long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("failed to bind huge (1GiB) pages to NUMA node %d: %s\n", numa_node, strerror(errno));
|
||||
_mi_warning_message("failed to bind huge (1gb) pages to numa node %d: %s\n", numa_node, strerror(errno));
|
||||
}
|
||||
}
|
||||
return p;
|
||||
|
|
|
@ -247,6 +247,7 @@ static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, m
|
|||
static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(page->is_reset);
|
||||
mi_assert_internal(page->is_committed);
|
||||
mi_assert_internal(!segment->mem_is_fixed);
|
||||
page->is_reset = false;
|
||||
size_t psize;
|
||||
|
@ -779,10 +780,14 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a
|
|||
// note: must come after setting `segment_in_use` to false but before block_size becomes 0
|
||||
//mi_page_reset(segment, page, 0 /*used_size*/, tld);
|
||||
|
||||
// zero the page data, but not the segment fields and block_size (for page size calculations)
|
||||
// zero the page data, but not the segment fields and capacity, and block_size (for page size calculations)
|
||||
uint32_t block_size = page->xblock_size;
|
||||
uint16_t capacity = page->capacity;
|
||||
uint16_t reserved = page->reserved;
|
||||
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
|
||||
memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
|
||||
page->capacity = capacity;
|
||||
page->reserved = reserved;
|
||||
page->xblock_size = block_size;
|
||||
segment->used--;
|
||||
|
||||
|
@ -790,6 +795,9 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a
|
|||
if (allow_reset) {
|
||||
mi_pages_reset_add(segment, page, tld);
|
||||
}
|
||||
|
||||
page->capacity = 0; // after reset there can be zero'd now
|
||||
page->reserved = 0;
|
||||
}
|
||||
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
|
||||
|
|
Loading…
Add table
Reference in a new issue