merge from dev

This commit is contained in:
Daan Leijen 2025-03-02 17:16:28 -08:00
commit a447a1a297
8 changed files with 33 additions and 19 deletions

View file

@ -433,6 +433,7 @@
<ClInclude Include="$(ProjectDir)..\..\include\mimalloc.h" />
<ClInclude Include="..\..\include\mimalloc-new-delete.h" />
<ClInclude Include="..\..\include\mimalloc-override.h" />
<ClInclude Include="..\..\include\mimalloc-stats.h" />
<ClInclude Include="..\..\include\mimalloc\atomic.h" />
<ClInclude Include="..\..\include\mimalloc\bits.h" />
<ClInclude Include="..\..\include\mimalloc\internal.h" />

View file

@ -93,6 +93,9 @@
<ClInclude Include="..\..\include\mimalloc\bits.h">
<Filter>Headers</Filter>
</ClInclude>
<ClInclude Include="..\..\include\mimalloc-stats.h">
<Filter>Headers</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Headers">

View file

@ -49,6 +49,7 @@ typedef struct mi_stat_counter_s {
\
/* internal statistics */ \
MI_STAT_COUNTER(arena_rollback_count) \
MI_STAT_COUNTER(arena_purges) \
MI_STAT_COUNTER(pages_extended) /* number of page extensions */ \
MI_STAT_COUNTER(pages_retire) /* number of pages that are retired */ \
MI_STAT_COUNTER(page_searches) /* searches for a fresh page */ \

View file

@ -400,6 +400,7 @@ typedef enum mi_option_e {
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
mi_option_generic_collect, // collect heaps every N (=10000) generic allocation calls
mi_option_page_reclaim_on_free, // reclaim abandoned pages on a free (=0). -1 disallowr always, 0 allows if the page originated from the current heap, 1 allow always
mi_option_page_full_retain, // retain N full (small) pages per size class (=2)
mi_option_page_max_candidates, // max candidate pages to consider for allocation (=4)

View file

@ -139,11 +139,12 @@ terms of the MIT license. A copy of the license can be found in the file
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U)
#if MI_BIN_HUGE != 73U
#error "mimalloc internal: expecting 73 bins"
#endif
#define MI_BIN_FULL (MI_BIN_HUGE+1)
#define MI_BIN_COUNT (MI_BIN_FULL+1)
// We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
@ -428,7 +429,8 @@ struct mi_heap_s {
size_t page_count; // total number of pages in the `pages` queues.
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
size_t page_retired_max; // largest retired index into the `pages` array.
size_t generic_count; // how often is mimalloc_generic invoked?
long generic_count; // how often is `_mi_malloc_generic` called?
long generic_collect_count; // how often is `_mi_malloc_generic` called without collecting?
mi_heap_t* next; // list of heaps per thread
long page_full_retain; // how many full pages can be retained per queue (before abondoning them)
bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages

View file

@ -75,8 +75,8 @@ const mi_page_t _mi_page_empty = {
{ 0 }, { 0 }, { 0 }, { 0 }, \
{ 0 }, { 0 }, { 0 }, { 0 }, \
\
{ 0 }, { 0 }, { 0 }, { 0 }, \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
{ 0 }, { 0 }, { 0 }, { 0 }, { 0 }, \
MI_INIT4(MI_STAT_COUNT_NULL), \
{ 0 }, { 0 }, { 0 }, { 0 }, \
\
{ MI_INIT4(MI_STAT_COUNT_NULL) }, \
@ -122,7 +122,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
{ {0}, {0}, 0, true }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
0, // generic count
0, 0, // generic count
NULL, // next
0, // full page retain
false, // can reclaim
@ -159,7 +159,7 @@ mi_decl_cache_align mi_heap_t heap_main = {
{ {0x846ca68b}, {0}, 0, true }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
0, // generic count
0, 0, // generic count
NULL, // next heap
2, // full page retain
true, // allow page reclaim

View file

@ -168,6 +168,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_DEFAULT_GUARDED_SAMPLE_RATE,
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
{ 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls
{ 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
{ 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
{ 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate

View file

@ -713,9 +713,7 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
// search through the pages in "next fit" order
#if MI_STAT
size_t count = 0;
#endif
long candidate_limit = 0; // we reset this on the first candidate to limit the search
long page_full_retain = (pq->block_size > MI_SMALL_MAX_OBJ_SIZE ? 0 : heap->page_full_retain); // only retain small pages
mi_page_t* page_candidate = NULL; // a page with free space
@ -724,9 +722,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
while (page != NULL)
{
mi_page_t* next = page->next; // remember next (as this page can move to another queue)
#if MI_STAT
count++;
#endif
candidate_limit--;
// search up to N pages for a best candidate
@ -944,11 +940,20 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
}
mi_assert_internal(mi_heap_is_initialized(heap));
// collect every N generic mallocs
if mi_unlikely(heap->generic_count++ > 10000) {
// do administrative tasks every N generic mallocs
if mi_unlikely(++heap->generic_count >= 1000) {
heap->generic_collect_count += heap->generic_count;
heap->generic_count = 0;
// call potential deferred free routines
_mi_deferred_free(heap, false);
// collect every once in a while (10000 by default)
const long generic_collect = mi_option_get_clamp(mi_option_generic_collect, 1, 1000000L);
if (heap->generic_collect_count >= generic_collect) {
heap->generic_collect_count = 0;
mi_heap_collect(heap, false /* force? */);
}
}
// find (or allocate) a page of the right size
mi_page_t* page = mi_find_page(heap, size, huge_alignment);