mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-07 15:59:32 +03:00
fix free stats
This commit is contained in:
parent
c9abfe8253
commit
5e95ebc7a0
6 changed files with 40 additions and 14 deletions
|
@ -223,6 +223,7 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p);
|
|||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
// void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||
void _mi_stat_free(const mi_page_t* page, const mi_block_t* block);
|
||||
|
||||
// "libc.c"
|
||||
#include <stdarg.h>
|
||||
|
|
11
src/arena.c
11
src/arena.c
|
@ -622,6 +622,10 @@ void _mi_arena_page_free(mi_page_t* page) {
|
|||
mi_assert_internal(mi_page_all_free(page));
|
||||
mi_assert_internal(page->next==NULL);
|
||||
|
||||
#if MI_STAT > 1
|
||||
_mi_page_free_collect(page, true);
|
||||
#endif
|
||||
|
||||
#if MI_DEBUG>1
|
||||
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
|
||||
size_t bin = _mi_bin(mi_page_block_size(page));
|
||||
|
@ -665,7 +669,6 @@ void _mi_arena_page_abandon(mi_page_t* page) {
|
|||
mi_assert_internal(mi_bitmap_is_clearN(&arena->slices_purge, slice_index, slice_count));
|
||||
// mi_assert_internal(mi_bitmap_is_setN(&arena->slices_dirty, slice_index, slice_count));
|
||||
|
||||
_mi_page_unown(page);
|
||||
bool were_zero = mi_pairmap_set(&arena->pages_abandoned[bin], slice_index);
|
||||
MI_UNUSED(were_zero); mi_assert_internal(were_zero);
|
||||
mi_atomic_increment_relaxed(&subproc->abandoned_count[bin]);
|
||||
|
@ -673,8 +676,9 @@ void _mi_arena_page_abandon(mi_page_t* page) {
|
|||
else {
|
||||
// page is full (or a singleton), page is OS/externally allocated
|
||||
// leave as is; it will be reclaimed when an object is free'd in the page
|
||||
_mi_page_unown(page);
|
||||
}
|
||||
}
|
||||
_mi_page_unown(page);
|
||||
mi_stat_increase(_mi_stats_main.pages_abandoned, 1);
|
||||
}
|
||||
|
||||
// called from `mi_free` if trying to unabandon an abandoned page
|
||||
|
@ -704,6 +708,7 @@ void _mi_arena_page_unabandon(mi_page_t* page) {
|
|||
// nothing to do
|
||||
// TODO: maintain count of these as well?
|
||||
}
|
||||
mi_stat_decrease(_mi_stats_main.pages_abandoned, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
21
src/free.c
21
src/free.c
|
@ -16,7 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block);
|
||||
static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block);
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block);
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
|
||||
// static void _mi_stat_free(const mi_page_t* page, const mi_block_t* block);
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
|
@ -33,7 +33,7 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
|
|||
// checks
|
||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||
mi_check_padding(page, block);
|
||||
if (track_stats) { mi_stat_free(page, block); }
|
||||
if (track_stats) { _mi_stat_free(page, block); }
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
#endif
|
||||
|
@ -199,9 +199,20 @@ static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
|
|||
}
|
||||
}
|
||||
|
||||
// Push a block that is owned by another thread on its page-local thread free list.
|
||||
// Push a block that is owned by another thread (or abandoned) on its page-local thread free list.
|
||||
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||
{
|
||||
// adjust stats (after padding check and potentially recursive `mi_free` above)
|
||||
_mi_stat_free(page, block); // stat_free may access the padding
|
||||
mi_track_free_size(block, mi_page_usable_size_of(page, block));
|
||||
|
||||
// _mi_padding_shrink(page, block, sizeof(mi_block_t));
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
|
||||
size_t dbgsize = mi_usable_size(block);
|
||||
if (dbgsize > MI_MiB) { dbgsize = MI_MiB; }
|
||||
_mi_memset_aligned(block, MI_DEBUG_FREED, dbgsize);
|
||||
#endif
|
||||
|
||||
// push atomically on the page thread free list
|
||||
mi_thread_free_t tf_new;
|
||||
mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
|
@ -532,7 +543,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
|||
|
||||
// only maintain stats for smaller objects if requested
|
||||
#if (MI_STAT>0)
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
void _mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
#if (MI_STAT < 2)
|
||||
MI_UNUSED(block);
|
||||
#endif
|
||||
|
@ -554,7 +565,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|||
}
|
||||
}
|
||||
#else
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
void _mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(page); MI_UNUSED(block);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -158,7 +158,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
|
||||
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
||||
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
|
||||
{ 1, UNINIT, MI_OPTION(eager_abandon) },
|
||||
{ 0, UNINIT, MI_OPTION(eager_abandon) },
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
|
@ -189,10 +189,11 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
|
|||
size_t count = 1;
|
||||
mi_block_t* tail = head;
|
||||
mi_block_t* next;
|
||||
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
|
||||
while( (next = mi_block_next(page,tail)) != NULL && count <= max_count) {
|
||||
count++;
|
||||
tail = next;
|
||||
}
|
||||
|
||||
// if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
|
||||
if (count > max_count) {
|
||||
_mi_error_message(EFAULT, "corrupted thread-free list\n");
|
||||
|
|
|
@ -40,10 +40,10 @@ static int ITER = 20;
|
|||
static int THREADS = 8;
|
||||
static int SCALE = 10;
|
||||
static int ITER = 10;
|
||||
#elif 0
|
||||
#elif 1
|
||||
static int THREADS = 4;
|
||||
static int SCALE = 100;
|
||||
static int ITER = 20;
|
||||
static int ITER = 50;
|
||||
#else
|
||||
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
||||
static int SCALE = 25; // scaling factor
|
||||
|
@ -227,7 +227,7 @@ static void test_stress(void) {
|
|||
run_os_threads(THREADS, &stress);
|
||||
#if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
|
||||
// switch between arena and OS allocation for testing
|
||||
mi_option_set_enabled(mi_option_disallow_arena_alloc, (n%2)==1);
|
||||
// mi_option_set_enabled(mi_option_disallow_arena_alloc, (n%2)==1);
|
||||
#endif
|
||||
#ifdef HEAP_WALK
|
||||
size_t total = 0;
|
||||
|
@ -248,7 +248,14 @@ static void test_stress(void) {
|
|||
{ printf("- iterations left: %3d\n", ITER - (n + 1)); }
|
||||
#endif
|
||||
}
|
||||
}
|
||||
// clean up
|
||||
for (int i = 0; i < TRANSFERS; i++) {
|
||||
void* p = atomic_exchange_ptr(&transfer[i], NULL);
|
||||
if (p != NULL) {
|
||||
free_items(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef STRESS
|
||||
static void leak(intptr_t tid) {
|
||||
|
@ -320,6 +327,7 @@ int main(int argc, char** argv) {
|
|||
|
||||
#ifndef USE_STD_MALLOC
|
||||
#ifndef NDEBUG
|
||||
// mi_debug_show_arenas(true, true, false);
|
||||
mi_collect(true);
|
||||
mi_debug_show_arenas(true,true,false);
|
||||
#endif
|
||||
|
|
Loading…
Add table
Reference in a new issue