fix decommit for huge objects

This commit is contained in:
Daan Leijen 2022-11-23 09:50:29 -08:00
parent 20880807ce
commit 9e56567d23
4 changed files with 39 additions and 27 deletions

View file

@ -395,9 +395,10 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
}
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
}
#endif
// Try to put the block on either the page-local thread free list, or the heap delayed free list.
@ -449,7 +450,9 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
}
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;

View file

@ -1522,25 +1522,23 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
#if MI_HUGE_PAGE_ABANDON
segment->thread_id = 0; // huge segments are immediately abandoned
#endif
if (page_alignment > 0) {
size_t psize;
uint8_t* p = _mi_segment_page_start(segment, page, &psize);
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)p, page_alignment);
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
mi_assert_internal(psize - (aligned_p - p) >= size);
if (!segment->allow_decommit) {
// decommit the part of the page that is unused; this can be quite large (close to MI_SEGMENT_SIZE)
uint8_t* decommit_start = p + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
mi_segment_decommit(segment, decommit_start, decommit_size, &_mi_stats_main);
}
}
// for huge pages we initialize the xblock_size as we may
// overallocate to accommodate large alignments.
size_t psize;
_mi_segment_page_start(segment, page, &psize);
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
// decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
if (page_alignment > 0 && segment->allow_decommit) {
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
mi_assert_internal(psize - (aligned_p - start) >= size);
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
_mi_os_decommit(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
}
return page;
}
@ -1579,13 +1577,10 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
mi_assert_internal(segment == _mi_page_segment(page));
mi_assert_internal(page->used == 1); // this is called just before the free
mi_assert_internal(page->free == NULL);
const size_t csize = mi_page_block_size(page) - sizeof(mi_block_t);
uint8_t* p = ( uint8_t*)block + sizeof(mi_block_t);
if (segment->allow_decommit) {
mi_segment_decommit(segment, p, csize, &_mi_stats_main);
}
else {
_mi_os_reset(p, csize, &_mi_stats_main);
const size_t csize = mi_usable_size(block) - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
_mi_os_decommit(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
}
}
#endif