mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 22:49:32 +03:00
allow abandoned segment reclaim on a free
This commit is contained in:
parent
bdda13b880
commit
64edbc92dd
6 changed files with 35 additions and 10 deletions
|
@ -342,6 +342,7 @@ typedef enum mi_option_e {
|
|||
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
|
||||
mi_option_arena_purge_mult,
|
||||
mi_option_purge_extend_delay,
|
||||
mi_option_abandoned_reclaim_on_free, // reclaim abandoned segments on a free
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
|
|
@ -147,6 +147,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, m
|
|||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_await_readers(void);
|
||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
|
||||
|
|
15
src/alloc.c
15
src/alloc.c
|
@ -406,12 +406,24 @@ static void mi_stat_huge_free(const mi_page_t* page) {
|
|||
// multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
||||
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||
{
|
||||
// first see if the segment was abandoned and we can reclaim it
|
||||
mi_segment_t* const segment = _mi_page_segment(page);
|
||||
if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
|
||||
mi_atomic_load_relaxed(&segment->thread_id) == 0)
|
||||
{
|
||||
// the segment is abandoned, try to reclaim it into our heap
|
||||
if (_mi_segment_attempt_reclaim(mi_prim_get_default_heap(), segment)) {
|
||||
mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
|
||||
mi_free(block); // recursively free as now it will be a local free in our heap
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// The padding check may access the non-thread-owned page for the key values.
|
||||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||
mi_check_padding(page, block);
|
||||
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
|
||||
mi_segment_t* const segment = _mi_page_segment(page);
|
||||
if (segment->page_kind == MI_PAGE_HUGE) {
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
|
@ -426,7 +438,6 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
|
||||
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
||||
#endif
|
||||
|
|
|
@ -749,9 +749,9 @@ bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
|
|||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
bool was_abandoned = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
|
||||
mi_assert_internal(was_abandoned);
|
||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
||||
// mi_assert_internal(was_abandoned);
|
||||
mi_assert_internal(!was_abandoned || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
||||
return was_abandoned;
|
||||
}
|
||||
|
||||
|
@ -766,6 +766,7 @@ void _mi_arena_segment_mark_abandoned(mi_memid_t memid)
|
|||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
const bool was_unset = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
||||
MI_UNUSED_RELEASE(was_unset);
|
||||
mi_assert_internal(was_unset);
|
||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||
{ 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
|
||||
{ 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
|
||||
{ 8, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try.
|
||||
{ 16, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try.
|
||||
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
|
||||
#if (MI_INTPTR_SIZE>4)
|
||||
{ 1024L * 1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time
|
||||
|
@ -91,6 +91,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
|
||||
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
|
||||
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) }, // reclaim an abandoned segment on a free
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
|
@ -336,7 +336,7 @@ static void mi_segment_remove_all_purges(mi_segment_t* segment, bool force_purge
|
|||
mi_page_t* page = &segment->pages[i];
|
||||
if (!page->segment_in_use) {
|
||||
mi_page_purge_remove(page, tld);
|
||||
if (force_purge) {
|
||||
if (force_purge && page->is_committed) {
|
||||
mi_page_purge(segment, page, tld);
|
||||
}
|
||||
}
|
||||
|
@ -902,6 +902,16 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
|||
}
|
||||
}
|
||||
|
||||
// attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`)
|
||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
|
||||
if (_mi_arena_segment_clear_abandoned(segment->memid)) { // atomically unabandon
|
||||
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
|
||||
mi_assert_internal(res != NULL);
|
||||
return (res != NULL);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment;
|
||||
|
|
Loading…
Add table
Reference in a new issue