Merge branch 'dev' into dev2

This commit is contained in:
Daan Leijen 2025-03-02 17:06:35 -08:00
commit 2d81b6fee9
9 changed files with 37 additions and 11 deletions

View file

@ -554,7 +554,8 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
// reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
_mi_stat_counter_increase(&_mi_stats_main.arena_purges, 1);
// potential purges scheduled, walk through the bitmap
bool any_purged = false;
bool full_purge = true;

View file

@ -77,9 +77,9 @@ const mi_page_t _mi_page_empty = {
{ 0 }, { 0 }, { 0 }, { 0 }, \
{ 0 }, { 0 }, { 0 }, { 0 }, \
\
{ 0 }, { 0 }, { 0 }, { 0 }, \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
{ 0 }, { 0 }, { 0 }, { 0 }, \
{ 0 }, { 0 }, { 0 }, { 0 }, { 0 }, \
MI_INIT4(MI_STAT_COUNT_NULL), \
{ 0 }, { 0 }, { 0 }, { 0 }, \
\
{ MI_INIT4(MI_STAT_COUNT_NULL) }, \
{ { 0 }, { 0 }, { 0 }, { 0 } }, \
@ -118,6 +118,7 @@ mi_decl_hidden mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
{ {0}, {0}, 0, true }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
0, 0, // generic count
NULL, // next
false, // can reclaim
0, // tag
@ -166,6 +167,7 @@ mi_decl_cache_align mi_heap_t _mi_heap_main = {
{ {0x846ca68b}, {0}, 0, true }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
0, 0, // generic count
NULL, // next heap
false, // can reclaim
0, // tag

View file

@ -162,6 +162,7 @@ static mi_option_desc_t options[_mi_option_last] =
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
{ 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
};
static void mi_option_init(mi_option_desc_t* desc);

View file

@ -989,11 +989,23 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
}
mi_assert_internal(mi_heap_is_initialized(heap));
// call potential deferred free routines
_mi_deferred_free(heap, false);
// do administrative tasks every N generic mallocs
if mi_unlikely(++heap->generic_count >= 100) {
heap->generic_collect_count += heap->generic_count;
heap->generic_count = 0;
// call potential deferred free routines
_mi_deferred_free(heap, false);
// free delayed frees from other threads (but skip contended ones)
_mi_heap_delayed_free_partial(heap);
// free delayed frees from other threads (but skip contended ones)
_mi_heap_delayed_free_partial(heap);
// collect every once in a while (10000 by default)
const long generic_collect = mi_option_get_clamp(mi_option_generic_collect, 1, 1000000L);
if (heap->generic_collect_count >= generic_collect) {
heap->generic_collect_count = 0;
mi_heap_collect(heap, false /* force? */);
}
}
// find (or allocate) a page of the right size
mi_page_t* page = mi_find_page(heap, size, huge_alignment);