always use MI_HUGE_BLOCK_SIZE for pages in huge segments

This commit is contained in:
daanx 2023-05-17 13:27:04 -07:00
parent 4c6814bc68
commit 09f171b4f5
7 changed files with 72 additions and 25 deletions

View file

@ -450,24 +450,32 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
return _mi_segment_page_of(_mi_ptr_segment(p), p);
}
static inline bool mi_segment_is_huge(const mi_segment_t* segment) {
return (segment->page_kind == MI_PAGE_HUGE);
}
static inline bool mi_page_is_huge(const mi_page_t* page) {
bool huge = mi_segment_is_huge(_mi_page_segment(page));
mi_assert_internal((huge && page->xblock_size == MI_HUGE_BLOCK_SIZE) || (!huge && page->xblock_size <= MI_LARGE_OBJ_SIZE_MAX));
return huge;
}
// Get the block size of a page (special case for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0);
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
mi_assert_internal(bsize <= MI_LARGE_OBJ_SIZE_MAX);
return bsize;
}
else {
mi_assert_internal(mi_page_is_huge(page));
size_t psize;
_mi_segment_page_start(_mi_page_segment(page), page, bsize, &psize, NULL);
return psize;
}
}
static inline bool mi_page_is_huge(const mi_page_t* page) {
return (_mi_page_segment(page)->page_kind == MI_PAGE_HUGE);
}
// Get the usable block size of a page without fixed padding.
// This may still include internal padding due to alignment and rounding up size classes.
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {

View file

@ -91,7 +91,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
{ 0, UNINIT, MI_OPTION(remap_threshold) }, // size in KiB after which realloc starts using OS remap (0 to disable auto remap)
{ 1024, UNINIT, MI_OPTION(remap_threshold) }, // size in KiB after which realloc starts using OS remap (0 to disable auto remap)
};
static bool mi_option_is_size_in_kib(mi_option_t option) {

View file

@ -231,9 +231,9 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
#endif
mi_assert_internal(page->xblock_size == queue->block_size ||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_assert_internal((page->xblock_size == queue->block_size) ||
(mi_page_queue_is_huge(queue)) || // not: && page->xblock_size > MI_LARGE_OBJ_SIZE_MAX since it could be due to page alignment
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)) );
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);

View file

@ -129,7 +129,13 @@ bool _mi_page_is_valid(mi_page_t* page) {
{
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
if (mi_segment_is_huge(segment)) {
mi_assert_internal(mi_page_queue_is_huge(pq) || mi_page_queue_is_full(pq));
}
else {
mi_assert_internal(pq->block_size == mi_page_block_size(page) || mi_page_is_in_full(page));
mi_assert_internal(mi_page_block_size(page) <= MI_LARGE_OBJ_SIZE_MAX);
}
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
}
}
@ -283,12 +289,13 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
#endif
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
// a fresh page was found, initialize it
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
mi_assert_internal(full_block_size >= block_size);
mi_page_init(heap, page, full_block_size, heap->tld);
const size_t xblock_size = ((pq == NULL || mi_page_is_huge(page)) ? MI_HUGE_BLOCK_SIZE : block_size);
//((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
//mi_assert_internal(xblock_size >= block_size);
mi_page_init(heap, page, xblock_size, heap->tld);
mi_assert_internal(mi_page_block_size(page) >= block_size);
mi_heap_stat_increase(heap, pages, 1);
if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
mi_assert_expensive(_mi_page_is_valid(page));
@ -683,9 +690,16 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
const void* page_start = _mi_segment_page_start(segment, page, block_size, &page_size, NULL);
MI_UNUSED(page_start);
mi_track_mem_noaccess(page_start,page_size);
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE);
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
if (segment->page_kind == MI_PAGE_HUGE) {
page->xblock_size = MI_HUGE_BLOCK_SIZE;
page->reserved = 1;
}
else {
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
page->xblock_size = (uint32_t)block_size;
mi_assert_internal(page_size / block_size < (1L << 16));
page->reserved = (uint16_t)(page_size / block_size);
}
mi_assert_internal(page->reserved > 0);
#if (MI_PADDING || MI_ENCODE_FREELIST)
page->keys[0] = _mi_heap_random_next(heap);
@ -848,7 +862,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
mi_page_queue_t* pq = mi_page_queue(heap, MI_HUGE_OBJ_SIZE_MAX); // not block_size as that can be low if the page_alignment > 0
mi_assert_internal(mi_page_queue_is_huge(pq));
#endif
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size,page_alignment);
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
if (page != NULL) {
const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already
mi_assert_internal(bsize >= size);
@ -934,11 +948,15 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
mi_assert_internal(mi_page_block_size(page) >= size);
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
if mi_unlikely(zero && page->xblock_size == 0) {
if mi_unlikely(zero && mi_page_is_huge(page)) {
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
void* p = _mi_page_malloc(heap, page, size, false);
mi_assert_internal(p != NULL);
if (!page->free_is_zero) {
if (page->free_is_zero) {
((mi_block_t*)p)->next = 0;
mi_track_mem_defined(p, mi_page_usable_block_size(page));
}
else {
_mi_memzero_aligned(p, mi_page_usable_block_size(page));
}
return p;

View file

@ -1235,7 +1235,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
// overallocate to accommodate large alignments.
size_t psize;
uint8_t* start = _mi_segment_page_start(segment, page, 0, &psize, NULL);
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
page->xblock_size = MI_HUGE_BLOCK_SIZE; // ensure it goes into the huge page queue; (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
// reset the part of the page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
if (page_alignment >= MI_ALIGN_HUGE && segment->allow_decommit && page->is_committed) {
@ -1314,7 +1314,7 @@ mi_block_t* _mi_segment_huge_page_expand(mi_segment_t* segment, mi_page_t* page,
size_t psize = 0;
_mi_segment_page_start(segment, page, 0, &psize, NULL);
mi_assert_internal(psize >= newsize);
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
page->xblock_size = MI_HUGE_BLOCK_SIZE; // (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
return block;
}
@ -1363,7 +1363,7 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
size_t psize = 0;
_mi_segment_page_start(newsegment, newpage, 0, &psize, NULL);
mi_assert_internal(psize >= newsize);
newpage->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
newpage->xblock_size = MI_HUGE_BLOCK_SIZE; // (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
mi_assert_internal(mi_page_block_size(newpage) >= newsize);
_mi_heap_huge_page_attach(heap, newpage);

View file

@ -19,13 +19,15 @@ static void negative_stat(void);
static void alloc_huge(void);
static void test_heap_walk(void);
static void test_remap(bool start_remappable);
static void test_remap2(void);
int main() {
mi_version();
mi_stats_reset();
test_remap(true);
test_remap2();
//test_remap(true);
// detect double frees and heap corruption
// double_free1();
// double_free2();
@ -219,6 +221,14 @@ static void test_heap_walk(void) {
mi_heap_visit_blocks(heap, true, &test_visit, NULL);
}
static void test_remap2(void) {
int* p = (int*)mi_malloc(356);
p = (int*)mi_realloc(p, 583);
memset(p, '\0', 580);
p = (int*)mi_realloc(p, 1500705);
p = (int*)mi_realloc(p, 3000711);
mi_free(p);
}
static void test_remap(bool start_remappable) {
const size_t iterN = 100;

View file

@ -27,6 +27,7 @@ we therefore test the API over various inputs. Please add more tests :-)
#include <stdbool.h>
#include <stdint.h>
#include <errno.h>
#include <string.h>
#ifdef __cplusplus
#include <vector>
@ -59,6 +60,7 @@ bool mem_is_zero(uint8_t* p, size_t size) {
// ---------------------------------------------------------------------------
int main(void) {
mi_option_disable(mi_option_verbose);
mi_option_set(mi_option_remap_threshold, 100 /* in kib */);
// ---------------------------------------------------
// Malloc
@ -284,6 +286,15 @@ int main(void) {
mi_free(p);
};
CHECK_BODY("reallo-huge") { // By Jason Gibson
int* p = (int*)mi_malloc(356);
p = (int*)mi_realloc(p, 583);
memset(p, '\0', 580);
p = (int*)mi_realloc(p, 1500705);
p = (int*)mi_realloc(p, 3000711);
mi_free(p);
};
// ---------------------------------------------------
// Heaps
// ---------------------------------------------------