mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-09 00:39:32 +03:00
Merge branch 'dev3' into dev3-bin
This commit is contained in:
commit
8788ffb14e
5 changed files with 18 additions and 17 deletions
|
@ -426,7 +426,7 @@ struct mi_heap_s {
|
|||
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||
size_t generic_count; // how often is mimalloc_generic invoked?
|
||||
mi_heap_t* next; // list of heaps per thread
|
||||
long full_page_retain; // how many full pages can be retained per queue (before abondoning them)
|
||||
long page_full_retain; // how many full pages can be retained per queue (before abondoning them)
|
||||
bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
|
||||
uint8_t tag; // custom tag, can be used for separating heaps based on the object types
|
||||
|
|
|
@ -250,8 +250,8 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) mi_attr_noe
|
|||
|
||||
// 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
|
||||
if (!mi_page_is_used_at_frac(page,8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
|
||||
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
|
||||
_mi_arenas_page_try_reabandon_to_mapped(page))
|
||||
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
|
||||
_mi_arenas_page_try_reabandon_to_mapped(page))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
|
20
src/heap.c
20
src/heap.c
|
@ -167,7 +167,7 @@ mi_heap_t* mi_heap_get_backing(void) {
|
|||
}
|
||||
|
||||
// todo: make order of parameters consistent (but would that break compat with CPython?)
|
||||
void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint8_t heap_tag, mi_tld_t* tld)
|
||||
void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, uint8_t heap_tag, mi_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(heap!=NULL);
|
||||
mi_memid_t memid = heap->memid;
|
||||
|
@ -175,17 +175,17 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint
|
|||
heap->memid = memid;
|
||||
heap->tld = tld; // avoid reading the thread-local tld during initialization
|
||||
heap->exclusive_arena = _mi_arena_from_id(arena_id);
|
||||
heap->allow_page_reclaim = !noreclaim;
|
||||
heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_page_full_retain) >= 0);
|
||||
heap->full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_reclaim_on_free));
|
||||
heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0);
|
||||
heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
heap->tag = heap_tag;
|
||||
if (heap->tld->is_in_threadpool) {
|
||||
// if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap.
|
||||
// (but abandoning is good in this case)
|
||||
heap->allow_page_reclaim = false;
|
||||
// and halve the full page retain (possibly to 0)
|
||||
if (heap->full_page_retain >= 0) {
|
||||
heap->full_page_retain = heap->full_page_retain / 4;
|
||||
// .. but abandoning is good in this case: quarter the full page retain (possibly to 0)
|
||||
// (so blocked threads do not hold on to too much memory)
|
||||
if (heap->page_full_retain >= 0) {
|
||||
heap->page_full_retain = heap->page_full_retain / 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,12 +236,12 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi
|
|||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
return mi_heap_new_ex(0 /* default heap tag */, false /* don't allow `mi_heap_destroy` */, arena_id);
|
||||
return mi_heap_new_ex(0 /* default heap tag */, false /* allow destroy? */, arena_id);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
// don't reclaim abandoned memory or otherwise destroy is unsafe
|
||||
return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none());
|
||||
return mi_heap_new_ex(0 /* default heap tag */, true /* allow destroy? */, _mi_arena_id_none());
|
||||
}
|
||||
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
|
||||
|
|
|
@ -259,8 +259,9 @@ static void mi_heap_main_init(void) {
|
|||
//heap_main.keys[0] = _mi_heap_random_next(&heap_main);
|
||||
//heap_main.keys[1] = _mi_heap_random_next(&heap_main);
|
||||
_mi_heap_guarded_init(&heap_main);
|
||||
heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_reclaim_on_free);
|
||||
heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0);
|
||||
heap_main.full_page_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -680,7 +680,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
size_t count = 0;
|
||||
#endif
|
||||
long candidate_limit = 0; // we reset this on the first candidate to limit the search
|
||||
long full_page_retain = heap->full_page_retain;
|
||||
long page_full_retain = heap->page_full_retain;
|
||||
mi_page_t* page_candidate = NULL; // a page with free space
|
||||
mi_page_t* page = pq->first;
|
||||
|
||||
|
@ -703,8 +703,8 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
// if the page is completely full, move it to the `mi_pages_full`
|
||||
// queue so we don't visit long-lived pages too often.
|
||||
if (!immediate_available && !mi_page_is_expandable(page)) {
|
||||
full_page_retain--;
|
||||
if (full_page_retain < 0) {
|
||||
page_full_retain--;
|
||||
if (page_full_retain < 0) {
|
||||
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
||||
mi_page_to_full(page, pq);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue