mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-01 01:04:37 +03:00
merge from dev3
This commit is contained in:
commit
dddf732c60
6 changed files with 48 additions and 47 deletions
19
src/arena.c
19
src/arena.c
|
@ -160,12 +160,15 @@ static mi_arena_t* mi_page_arena(mi_page_t* page, size_t* slice_index, size_t* s
|
|||
return mi_arena_from_memid(page->memid, slice_index, slice_count);
|
||||
}
|
||||
|
||||
static size_t mi_memid_size(mi_memid_t memid) {
|
||||
if (memid.memkind == MI_MEM_ARENA) {
|
||||
return memid.mem.arena.slice_count * MI_ARENA_SLICE_SIZE;
|
||||
static size_t mi_page_full_size(mi_page_t* page) {
|
||||
if (page->memid.memkind == MI_MEM_ARENA) {
|
||||
return page->memid.mem.arena.slice_count * MI_ARENA_SLICE_SIZE;
|
||||
}
|
||||
else if (mi_memid_is_os(memid) || memid.memkind == MI_MEM_EXTERNAL) {
|
||||
return memid.mem.os.size;
|
||||
else if (mi_memid_is_os(page->memid) || page->memid.memkind == MI_MEM_EXTERNAL) {
|
||||
mi_assert_internal((uint8_t*)page->memid.mem.os.base <= (uint8_t*)page);
|
||||
const ptrdiff_t presize = (uint8_t*)page - (uint8_t*)page->memid.mem.os.base;
|
||||
mi_assert_internal((ptrdiff_t)page->memid.mem.os.size >= presize);
|
||||
return (presize > (ptrdiff_t)page->memid.mem.os.size ? 0 : page->memid.mem.os.size - presize);
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
|
@ -820,7 +823,7 @@ void _mi_arenas_page_free(mi_page_t* page) {
|
|||
// we must do this since we may later allocate large spans over this page and cannot have a guard page in between
|
||||
#if MI_SECURE >= 2
|
||||
if (!page->memid.is_pinned) {
|
||||
_mi_os_secure_guard_page_reset_before((uint8_t*)page + mi_memid_size(page->memid));
|
||||
_mi_os_secure_guard_page_reset_before((uint8_t*)page + mi_page_full_size(page));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -831,7 +834,7 @@ void _mi_arenas_page_free(mi_page_t* page) {
|
|||
mi_bitmap_clear(arena->pages, page->memid.mem.arena.slice_index);
|
||||
if (page->slice_committed > 0) {
|
||||
// if committed on-demand, set the commit bits to account commit properly
|
||||
mi_assert_internal(mi_memid_size(page->memid) >= page->slice_committed);
|
||||
mi_assert_internal(mi_page_full_size(page) >= page->slice_committed);
|
||||
const size_t total_slices = page->slice_committed / MI_ARENA_SLICE_SIZE; // conservative
|
||||
//mi_assert_internal(mi_bitmap_is_clearN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices));
|
||||
mi_assert_internal(page->memid.mem.arena.slice_count >= total_slices);
|
||||
|
@ -849,7 +852,7 @@ void _mi_arenas_page_free(mi_page_t* page) {
|
|||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, page->memid.mem.arena.slice_index, page->memid.mem.arena.slice_count));
|
||||
}
|
||||
}
|
||||
_mi_arenas_free(page, mi_memid_size(page->memid), page->memid);
|
||||
_mi_arenas_free(page, mi_page_full_size(page), page->memid);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
|
47
src/free.c
47
src/free.c
|
@ -217,43 +217,40 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
|||
return;
|
||||
}
|
||||
|
||||
const bool too_full = mi_page_is_used_at_frac(page, 8); // more than 7/8th of the page is in use?
|
||||
|
||||
// 2. if the page is not too full, we can try to reclaim it for ourselves
|
||||
// note: this seems a bad idea but it speeds up some benchmarks (like `larson`) quite a bit.
|
||||
if (!too_full &&
|
||||
_mi_option_get_fast(mi_option_page_reclaim_on_free) != 0 &&
|
||||
page->block_size <= MI_SMALL_MAX_OBJ_SIZE // only for small sized blocks
|
||||
)
|
||||
// note:
|
||||
// we only reclaim if the page originated from our heap (the heap field is preserved on abandonment)
|
||||
// to avoid claiming arbitrary object sizes and limit indefinite expansion.
|
||||
// this helps benchmarks like `larson`
|
||||
const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free);
|
||||
if (reclaim_on_free >= 0 && page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks
|
||||
{
|
||||
// the page has still some blocks in use (but not too many)
|
||||
// reclaim in our heap if compatible, or otherwise abandon again
|
||||
// todo: optimize this check further?
|
||||
// note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should
|
||||
// not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944)
|
||||
mi_heap_t* const heap = mi_prim_get_default_heap();
|
||||
if (mi_heap_is_initialized(heap)) // we did not already terminate our thread
|
||||
{
|
||||
mi_heap_t* const tagheap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||
if ((tagheap != NULL) && // don't reclaim across heap object types
|
||||
(tagheap->allow_page_reclaim) && // and we are allowed to reclaim abandoned pages
|
||||
// (page->subproc == tagheap->tld->subproc) && // don't reclaim across sub-processes; todo: make this check faster (integrate with _mi_heap_by_tag ? )
|
||||
(_mi_arena_memid_is_suitable(page->memid, tagheap->exclusive_arena)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
||||
)
|
||||
{
|
||||
if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for a block_size we don't use
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arenas_page_unabandon(page);
|
||||
_mi_heap_page_reclaim(tagheap, page);
|
||||
mi_heap_stat_counter_increase(tagheap, pages_reclaim_on_free, 1);
|
||||
return;
|
||||
}
|
||||
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||
if (heap != page->heap) {
|
||||
if (mi_heap_is_initialized(heap)) {
|
||||
heap = _mi_heap_by_tag(heap, page->heap_tag);
|
||||
}
|
||||
}
|
||||
if (heap != NULL && heap->allow_page_reclaim &&
|
||||
(heap == page->heap || (reclaim_on_free == 1 && !mi_page_is_used_at_frac(page, 8))) && // only reclaim if we were the originating heap, or if reclaim_on_free == 1 and the pages is not too full
|
||||
_mi_arena_memid_is_suitable(page->memid,heap->exclusive_arena) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
||||
)
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arenas_page_unabandon(page);
|
||||
_mi_heap_page_reclaim(heap, page);
|
||||
mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
|
||||
if (!too_full && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
|
||||
if (!mi_page_is_used_at_frac(page, 8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
|
||||
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
|
||||
_mi_arenas_page_try_reabandon_to_mapped(page))
|
||||
{
|
||||
|
|
|
@ -175,7 +175,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy,
|
|||
heap->memid = memid;
|
||||
heap->tld = tld; // avoid reading the thread-local tld during initialization
|
||||
heap->exclusive_arena = _mi_arena_from_id(arena_id);
|
||||
heap->allow_page_reclaim = (!allow_destroy && mi_option_is_enabled(mi_option_page_reclaim_on_free));
|
||||
heap->allow_page_reclaim = (!allow_destroy && mi_option_get(mi_option_page_reclaim_on_free) >= 0);
|
||||
heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0);
|
||||
heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
heap->tag = heap_tag;
|
||||
|
|
|
@ -259,7 +259,7 @@ static void mi_heap_main_init(void) {
|
|||
//heap_main.keys[0] = _mi_heap_random_next(&heap_main);
|
||||
//heap_main.keys[1] = _mi_heap_random_next(&heap_main);
|
||||
_mi_heap_guarded_init(&heap_main);
|
||||
heap_main.allow_page_reclaim = mi_option_is_enabled(mi_option_page_reclaim_on_free);
|
||||
heap_main.allow_page_reclaim = (mi_option_get(mi_option_page_reclaim_on_free) >= 0);
|
||||
heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0);
|
||||
heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32);
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
#else
|
||||
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
|
||||
#endif
|
||||
{ 100, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
|
||||
{ 1000,UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
|
||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
||||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||
|
@ -168,13 +168,13 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ MI_DEFAULT_GUARDED_SAMPLE_RATE,
|
||||
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
|
||||
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
|
||||
{ 2, UNINIT, MI_OPTION(page_full_retain) },
|
||||
{ 4, UNINIT, MI_OPTION(page_max_candidates) },
|
||||
{ 0, UNINIT, MI_OPTION(max_vabits) },
|
||||
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
|
||||
{ 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
|
||||
{ 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
|
||||
{ 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
|
||||
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
||||
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
||||
{ 2, UNINIT, MI_OPTION(page_commit_on_demand) },
|
||||
{ 2, UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this on overcommit systems (like Linux))
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
11
src/page.c
11
src/page.c
|
@ -278,10 +278,11 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
}
|
||||
else {
|
||||
mi_page_queue_remove(pq, page);
|
||||
mi_tld_t* tld = page->heap->tld;
|
||||
mi_heap_t* heap = page->heap;
|
||||
mi_page_set_heap(page, NULL);
|
||||
_mi_arenas_page_abandon(page,tld);
|
||||
_mi_arenas_collect(false, false, tld); // allow purging
|
||||
page->heap = heap; // dont set heap to NULL so we can reclaim_on_free within the same heap
|
||||
_mi_arenas_page_abandon(page, heap->tld);
|
||||
_mi_arenas_collect(false, false, heap->tld); // allow purging
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -717,7 +718,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
count++;
|
||||
#endif
|
||||
candidate_limit--;
|
||||
|
||||
|
||||
// search up to N pages for a best candidate
|
||||
|
||||
// is the local free list non-empty?
|
||||
|
@ -744,7 +745,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
page_candidate = page;
|
||||
candidate_limit = _mi_option_get_fast(mi_option_page_max_candidates);
|
||||
}
|
||||
else if (mi_page_all_free(page_candidate)) {
|
||||
else if (mi_page_all_free(page_candidate)) {
|
||||
_mi_page_free(page_candidate, pq);
|
||||
page_candidate = page;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue