fix stats for large objects that were off by the block size padding

This commit is contained in:
Daan Leijen 2022-04-07 11:08:54 -07:00
parent 332346b685
commit 0cda8b02d5
2 changed files with 16 additions and 21 deletions

View file

@ -45,7 +45,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
#if (MI_STAT>0)
const size_t bsize = mi_page_usable_block_size(page);
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_increase(heap, normal, bsize);
mi_heap_stat_counter_increase(heap, normal_count, 1);
#if (MI_STAT>1)
@ -297,20 +297,26 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2)
#if (MI_STAT < 2)
MI_UNUSED(block);
#endif
#endif
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_usable_block_size(page);
#if (MI_STAT>1)
const size_t bsize = mi_page_usable_block_size(page);
#if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc, usize);
#endif
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
#endif
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal, bsize);
#if (MI_STAT > 1)
#if (MI_STAT > 1)
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
#endif
#endif
}
else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, large, bsize);
}
else {
mi_heap_stat_decrease(heap, huge, bsize);
}
}
#else

View file

@ -368,17 +368,6 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
mi_page_set_has_aligned(page, false);
mi_heap_t* heap = mi_page_heap(page);
const size_t bsize = mi_page_block_size(page);
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, large, bsize);
}
else {
// not strictly necessary as we never get here for a huge page
mi_assert_internal(false);
mi_heap_stat_decrease(heap, huge, bsize);
}
}
// remove from the page list
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
@ -791,7 +780,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size);
if (page != NULL) {
const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding
const size_t bsize = mi_page_usable_block_size(page); // note: includes padding
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(bsize >= size);