merge from dev with the destroy_on_exit option

This commit is contained in:
Daan Leijen 2022-11-21 10:03:52 -08:00
commit 163afcce75
8 changed files with 68 additions and 8 deletions

View file

@ -104,6 +104,7 @@ bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id)
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
void _mi_segment_cache_free_all(mi_os_tld_t* tld);
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
void _mi_segment_map_freed_at(const mi_segment_t* segment);
@ -149,6 +150,7 @@ void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
void _mi_heap_destroy_all(void);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);

View file

@ -342,6 +342,7 @@ typedef enum mi_option_e {
mi_option_allow_decommit,
mi_option_segment_decommit_delay,
mi_option_decommit_extend_delay,
mi_option_destroy_on_exit,
_mi_option_last
} mi_option_t;

View file

@ -347,7 +347,20 @@ void mi_heap_destroy(mi_heap_t* heap) {
}
}
void _mi_heap_destroy_all(void) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* curr = bheap->tld->heaps;
while (curr != NULL) {
mi_heap_t* next = curr->next;
if (curr->no_reclaim) {
mi_heap_destroy(curr);
}
else {
_mi_heap_destroy_pages(curr);
}
curr = next;
}
}
/* -----------------------------------------------------------
Safe Heap delete

View file

@ -627,7 +627,7 @@ static void mi_cdecl mi_process_done(void) {
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
FlsFree(mi_fls_key); // call thread-done on all threads (except the main thread) to prevent dangling callback pointer if statically linked with a DLL; Issue #208
#endif
#ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB)
// free all memory if possible on process exit. This is not needed for a stand-alone process
@ -637,6 +637,14 @@ static void mi_cdecl mi_process_done(void) {
#endif
#endif
// Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free
// since after process_done there might still be other code running that calls `free` (like at_exit routines,
// or C-runtime termination code.
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
_mi_heap_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_segment_cache_free_all(&_mi_heap_main_get()->tld->os); // release all cached segments
}
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);
}

View file

@ -94,7 +94,8 @@ static mi_option_desc_t options[_mi_option_last] =
{ 8, UNINIT, MI_OPTION(max_segment_reclaim)},// max. number of segment reclaims from the abandoned segments per try.
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after decommit_delay milli-seconds)
{ 500, UNINIT, MI_OPTION(segment_decommit_delay) }, // decommit delay in milli-seconds for freed segments
{ 2, UNINIT, MI_OPTION(decommit_extend_delay) }
{ 2, UNINIT, MI_OPTION(decommit_extend_delay) },
{ 0, UNINIT, MI_OPTION(destroy_on_exit)} // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
};
static void mi_option_init(mi_option_desc_t* desc);

View file

@ -45,7 +45,11 @@ static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void
return _mi_arena_memid_is_suitable(slot->memid, req_arena_id);
}
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
mi_decl_noinline static void* mi_segment_cache_pop_ex(
bool all_suitable,
size_t size, mi_commit_mask_t* commit_mask,
mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE
return NULL;
@ -66,8 +70,8 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
mi_bitmap_index_t bitidx = 0;
bool claimed = false;
mi_arena_id_t req_arena_id = _req_arena_id;
mi_bitmap_pred_fun_t pred_fun = &mi_segment_cache_is_suitable; // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache?
mi_bitmap_pred_fun_t pred_fun = (all_suitable ? NULL : &mi_segment_cache_is_suitable); // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache?
if (*large) { // large allowed?
claimed = _mi_bitmap_try_find_from_claim_pred(cache_available_large, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
if (claimed) *large = true;
@ -97,6 +101,12 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
#endif
}
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
{
return mi_segment_cache_pop_ex(false, size, commit_mask, decommit_mask, large, is_pinned, is_zero, _req_arena_id, memid, tld);
}
static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats)
{
if (mi_commit_mask_is_empty(cmask)) {
@ -163,6 +173,30 @@ void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld) {
mi_segment_cache_purge(force, tld );
}
void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
mi_commit_mask_t commit_mask;
mi_commit_mask_t decommit_mask;
bool is_pinned;
bool is_zero;
size_t memid;
const size_t size = MI_SEGMENT_SIZE;
// iterate twice: first large pages, then regular memory
for (int i = 0; i < 2; i++) {
void* p;
do {
// keep popping and freeing the memory
bool large = (i == 0);
p = mi_segment_cache_pop_ex(true /* all */, size, &commit_mask, &decommit_mask,
&large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, tld);
if (p != NULL) {
size_t csize = _mi_commit_mask_committed_size(&commit_mask, size);
if (csize > 0 && !is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
_mi_arena_free(p, size, MI_SEGMENT_ALIGN, 0, memid, is_pinned /* pretend not committed to not double count decommits */, tld->stats);
}
} while (p != NULL);
}
}
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE

View file

@ -803,7 +803,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, siz
bool is_pinned = false;
size_t memid = 0;
size_t align_offset = 0;
size_t alignment = MI_SEGMENT_SIZE;
size_t alignment = MI_SEGMENT_ALIGN;
if (page_alignment > 0) {
mi_assert_internal(huge_page != NULL);

View file

@ -33,9 +33,10 @@ int main() {
// invalid_free();
// test_reserved();
// negative_stat();
// test_heap_walk();
// alloc_huge();
// test_heap_walk();
test_heap_arena();
// test_heap_arena();
void* p1 = malloc(78);
void* p2 = malloc(24);