fix page_bin and pages statistics; merge thread-local statistics regularly with main statistics; bump version to 3.0.4

This commit is contained in:
Daan 2025-05-20 18:15:36 -07:00
parent 15b2d1cbd6
commit 3301ba09b4
10 changed files with 35 additions and 24 deletions

View file

@ -1,6 +1,6 @@
set(mi_version_major 3) set(mi_version_major 3)
set(mi_version_minor 0) set(mi_version_minor 0)
set(mi_version_patch 3) set(mi_version_patch 4)
set(mi_version ${mi_version_major}.${mi_version_minor}) set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version}) set(PACKAGE_VERSION ${mi_version})

View file

@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H #ifndef MIMALLOC_H
#define MIMALLOC_H #define MIMALLOC_H
#define MI_MALLOC_VERSION 303 // major + 2 digits minor #define MI_MALLOC_VERSION 304 // major + 2 digits minor
// ------------------------------------------------------ // ------------------------------------------------------
// Compiler specific attributes // Compiler specific attributes

View file

@ -181,7 +181,7 @@ void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld); void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld);
mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment); mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment);
void _mi_arenas_page_free(mi_page_t* page); void _mi_arenas_page_free(mi_page_t* page, mi_tld_t* tld);
void _mi_arenas_page_abandon(mi_page_t* page, mi_tld_t* tld); void _mi_arenas_page_abandon(mi_page_t* page, mi_tld_t* tld);
void _mi_arenas_page_unabandon(mi_page_t* page); void _mi_arenas_page_unabandon(mi_page_t* page);
bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page); bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page);
@ -215,6 +215,7 @@ void _mi_page_free_collect_partly(mi_page_t* page, mi_block_t* head);
void _mi_page_init(mi_heap_t* heap, mi_page_t* page); void _mi_page_init(mi_heap_t* heap, mi_page_t* page);
bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq); bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq);
size_t _mi_page_bin(const mi_page_t* page); // for stats
size_t _mi_bin_size(size_t bin); // for stats size_t _mi_bin_size(size_t bin); // for stats
size_t _mi_bin(size_t size); // for stats size_t _mi_bin(size_t size); // for stats
@ -840,7 +841,7 @@ static inline bool _mi_page_unown(mi_page_t* page) {
_mi_page_free_collect(page, false); // update used _mi_page_free_collect(page, false); // update used
if (mi_page_all_free(page)) { // it may become free just before unowning it if (mi_page_all_free(page)) { // it may become free just before unowning it
_mi_arenas_page_unabandon(page); _mi_arenas_page_unabandon(page);
_mi_arenas_page_free(page); _mi_arenas_page_free(page,NULL);
return true; return true;
} }
tf_old = mi_atomic_load_relaxed(&page->xthread_free); tf_old = mi_atomic_load_relaxed(&page->xthread_free);

View file

@ -587,8 +587,8 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
} }
// Allocate a fresh page // Allocate a fresh page
static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment, static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_size, size_t block_alignment,
mi_arena_t* req_arena, size_t tseq, bool commit) mi_arena_t* req_arena, bool commit, mi_tld_t* tld)
{ {
const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN); const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
@ -602,7 +602,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
!os_align && // not large alignment !os_align && // not large alignment
slice_count <= MI_ARENA_MAX_OBJ_SLICES) // and not too large slice_count <= MI_ARENA_MAX_OBJ_SLICES) // and not too large
{ {
page = (mi_page_t*)mi_arenas_try_alloc(subproc, slice_count, page_alignment, commit, allow_large, req_arena, tseq, &memid); page = (mi_page_t*)mi_arenas_try_alloc(tld->subproc, slice_count, page_alignment, commit, allow_large, req_arena, tld->thread_seq, &memid);
if (page != NULL) { if (page != NULL) {
mi_assert_internal(mi_bitmap_is_clearN(memid.mem.arena.arena->pages, memid.mem.arena.slice_index, memid.mem.arena.slice_count)); mi_assert_internal(mi_bitmap_is_clearN(memid.mem.arena.arena->pages, memid.mem.arena.slice_index, memid.mem.arena.slice_count));
mi_bitmap_set(memid.mem.arena.arena->pages, memid.mem.arena.slice_index); mi_bitmap_set(memid.mem.arena.arena->pages, memid.mem.arena.slice_index);
@ -710,6 +710,11 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
// register in the page map // register in the page map
_mi_page_map_register(page); _mi_page_map_register(page);
// stats
mi_tld_stat_increase(tld, pages, 1);
mi_tld_stat_increase(tld, page_bins[_mi_page_bin(page)], 1);
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page); mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_block_size(page) == block_size); mi_assert_internal(mi_page_block_size(page) == block_size);
@ -733,7 +738,7 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou
const long commit_on_demand = mi_option_get(mi_option_page_commit_on_demand); const long commit_on_demand = mi_option_get(mi_option_page_commit_on_demand);
const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages
(commit_on_demand == 2 && _mi_os_has_overcommit()) || (commit_on_demand == 0)); (commit_on_demand == 2 && _mi_os_has_overcommit()) || (commit_on_demand == 0));
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq, commit); page = mi_arenas_page_alloc_fresh(slice_count, block_size, 1, req_arena, commit, tld);
if (page != NULL) { if (page != NULL) {
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count); mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
_mi_page_init(heap, page); _mi_page_init(heap, page);
@ -755,7 +760,7 @@ static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_s
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size()); const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size());
#endif #endif
mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq, true /* commit singletons always */); mi_page_t* page = mi_arenas_page_alloc_fresh(slice_count, block_size, block_alignment, req_arena, true /* commit singletons always */, tld);
if (page == NULL) return NULL; if (page == NULL) return NULL;
mi_assert(page->reserved == 1); mi_assert(page->reserved == 1);
@ -794,7 +799,7 @@ mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t bloc
return page; return page;
} }
void _mi_arenas_page_free(mi_page_t* page) { void _mi_arenas_page_free(mi_page_t* page, mi_tld_t* tld /* can be NULL */) {
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(page)==page); mi_assert_internal(_mi_ptr_page(page)==page);
mi_assert_internal(mi_page_is_owned(page)); mi_assert_internal(mi_page_is_owned(page));
@ -802,6 +807,10 @@ void _mi_arenas_page_free(mi_page_t* page) {
mi_assert_internal(mi_page_is_abandoned(page)); mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(page->next==NULL && page->prev==NULL); mi_assert_internal(page->next==NULL && page->prev==NULL);
if (tld==NULL) { tld = _mi_thread_tld(); }
mi_tld_stat_decrease(tld, page_bins[_mi_page_bin(page)], 1);
mi_tld_stat_decrease(tld, pages, 1);
#if MI_DEBUG>1 #if MI_DEBUG>1
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) { if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
size_t bin = _mi_bin(mi_page_block_size(page)); size_t bin = _mi_bin(mi_page_block_size(page));

View file

@ -233,7 +233,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish) // first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
_mi_arenas_page_unabandon(page); _mi_arenas_page_unabandon(page);
// we can free the page directly // we can free the page directly
_mi_arenas_page_free(page); _mi_arenas_page_free(page,NULL);
return; return;
} }
@ -301,7 +301,7 @@ static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free) {
_mi_page_free_collect(page,false); // update used _mi_page_free_collect(page,false); // update used
if (mi_page_all_free(page)) { // it may become free just before unowning it if (mi_page_all_free(page)) { // it may become free just before unowning it
_mi_arenas_page_unabandon(page); _mi_arenas_page_unabandon(page);
_mi_arenas_page_free(page); _mi_arenas_page_free(page,NULL);
return true; return true;
} }
tf_expect = mi_atomic_load_relaxed(&page->xthread_free); tf_expect = mi_atomic_load_relaxed(&page->xthread_free);

View file

@ -117,17 +117,20 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// python/cpython#112532: we may be called from a thread that is not the owner of the heap // python/cpython#112532: we may be called from a thread that is not the owner of the heap
// const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id()); // const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
// if (_mi_is_main_thread()) { mi_debug_show_arenas(true, false, false); }
// collect retired pages // collect retired pages
_mi_heap_collect_retired(heap, force); _mi_heap_collect_retired(heap, force);
// if (_mi_is_main_thread()) { mi_debug_show_arenas(true, false, false); }
// collect all pages owned by this thread // collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
// collect arenas (this is program wide so don't force purges on abandonment of threads) // collect arenas (this is program wide so don't force purges on abandonment of threads)
//mi_atomic_storei64_release(&heap->tld->subproc->purge_expire, 1); //mi_atomic_storei64_release(&heap->tld->subproc->purge_expire, 1);
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, collect >= MI_FORCE /* visit all? */, heap->tld); _mi_arenas_collect(collect == MI_FORCE /* force purge? */, collect >= MI_FORCE /* visit all? */, heap->tld);
// merge statistics
_mi_stats_merge_from(&_mi_subproc()->stats, &heap->tld->stats);
} }
void _mi_heap_collect_abandon(mi_heap_t* heap) { void _mi_heap_collect_abandon(mi_heap_t* heap) {
@ -320,7 +323,6 @@ mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) {
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
MI_UNUSED(arg1); MI_UNUSED(arg1);
MI_UNUSED(arg2); MI_UNUSED(arg2);
MI_UNUSED(heap);
MI_UNUSED(pq); MI_UNUSED(pq);
// ensure no more thread_delayed_free will be added // ensure no more thread_delayed_free will be added
@ -352,7 +354,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
page->next = NULL; page->next = NULL;
page->prev = NULL; page->prev = NULL;
mi_page_set_heap(page, NULL); mi_page_set_heap(page, NULL);
_mi_arenas_page_free(page); _mi_arenas_page_free(page, heap->tld);
return true; // keep going return true; // keep going
} }

View file

@ -167,7 +167,7 @@ bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq) {
return true; return true;
} }
static size_t mi_page_bin(const mi_page_t* page) { size_t _mi_page_bin(const mi_page_t* page) {
const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
mi_assert_internal(bin <= MI_BIN_FULL); mi_assert_internal(bin <= MI_BIN_FULL);
return bin; return bin;

View file

@ -322,9 +322,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
else if (pq != NULL) { else if (pq != NULL) {
mi_page_queue_push(heap, pq, page); mi_page_queue_push(heap, pq, page);
} }
mi_heap_stat_increase(heap, pages, 1);
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size); mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
mi_heap_stat_increase(heap, page_bins[mi_page_bin(page)], 1);
mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_expensive(_mi_page_is_valid(page));
return page; return page;
} }
@ -394,11 +392,10 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) {
mi_page_queue_remove(pq, page); mi_page_queue_remove(pq, page);
// and free it // and free it
mi_heap_t* heap = page->heap; mi_tld_t* const tld = page->heap->tld;
mi_heap_stat_decrease(heap, page_bins[mi_page_bin(page)], 1);
mi_page_set_heap(page,NULL); mi_page_set_heap(page,NULL);
_mi_arenas_page_free(page); _mi_arenas_page_free(page,tld);
_mi_arenas_collect(false, false, heap->tld); // allow purging _mi_arenas_collect(false, false, tld); // allow purging
} }
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE #define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE

View file

@ -379,7 +379,7 @@ static mi_msecs_t mi_process_start; // = 0
// return thread local stats // return thread local stats
static mi_stats_t* mi_get_tld_stats(void) { static mi_stats_t* mi_get_tld_stats(void) {
return &mi_heap_get_default()->tld->stats; return &_mi_thread_tld()->stats;
} }
void mi_stats_reset(void) mi_attr_noexcept { void mi_stats_reset(void) mi_attr_noexcept {
@ -596,6 +596,7 @@ static void mi_heap_buf_print_counter_value(mi_heap_buf_t* hbuf, const char* nam
#define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat); #define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat);
char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept { char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept {
mi_stats_merge();
mi_heap_buf_t hbuf = { NULL, 0, 0, true }; mi_heap_buf_t hbuf = { NULL, 0, 0, true };
if (output_size > 0 && output_buf != NULL) { if (output_size > 0 && output_buf != NULL) {
_mi_memzero(output_buf, output_size); _mi_memzero(output_buf, output_size);

View file

@ -372,6 +372,7 @@ int main(int argc, char** argv) {
mi_free(json); mi_free(json);
} }
#endif #endif
mi_collect(true);
mi_stats_print(NULL); mi_stats_print(NULL);
#endif #endif
//bench_end_program(); //bench_end_program();