Merge branch 'dev3' into dev3-bin

This commit is contained in:
daanx 2024-12-22 22:31:26 -08:00
commit da2ab86e9f
7 changed files with 120 additions and 38 deletions

View file

@ -352,6 +352,7 @@ static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena
mi_arena_t* name_arena; \
if (req_arena != NULL) { \
name_arena = req_arena; /* if there is a specific req_arena, only search that one */\
if (_i > 0) break; /* only once */ \
} \
else { \
size_t _idx; \
@ -369,7 +370,6 @@ static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena
#define mi_forall_arenas_end() \
} \
if (req_arena != NULL) break; \
} \
}
@ -923,7 +923,7 @@ void _mi_arenas_page_unabandon(mi_page_t* page) {
Arena free
----------------------------------------------------------- */
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices);
static void mi_arenas_try_purge(bool force, bool visit_all);
static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld);
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
if (p==NULL) return;
@ -979,12 +979,12 @@ void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
}
// try to purge expired decommits
mi_arenas_try_purge(false, false);
// mi_arenas_try_purge(false, false, NULL);
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arenas_collect(bool force_purge) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
void _mi_arenas_collect(bool force_purge, mi_tld_t* tld) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, tld);
}
@ -1038,9 +1038,9 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arenas_unsafe_destroy_all(void) {
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) {
mi_arenas_unsafe_destroy(_mi_subproc());
_mi_arenas_collect(true /* force purge */); // purge non-owned arenas
_mi_arenas_collect(true /* force purge */, tld); // purge non-owned arenas
}
@ -1584,13 +1584,12 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
}
static void mi_arenas_try_purge(bool force, bool visit_all)
static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld)
{
const long delay = mi_arena_purge_delay();
if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled
// check if any arena needs purging?
mi_tld_t* tld = _mi_tld();
mi_subproc_t* subproc = tld->subproc;
const mi_msecs_t now = _mi_clock_now();
mi_msecs_t arenas_expire = mi_atomic_load_acquire(&subproc->purge_expire);
@ -1628,10 +1627,71 @@ static void mi_arenas_try_purge(bool force, bool visit_all)
}
}
/* -----------------------------------------------------------
Visit abandoned pages
----------------------------------------------------------- */
typedef struct mi_abandoned_page_visit_info_s {
int heap_tag;
mi_block_visit_fun* visitor;
void* arg;
bool visit_blocks;
} mi_abandoned_page_visit_info_t;
static bool abandoned_page_visit(mi_page_t* page, mi_abandoned_page_visit_info_t* vinfo) {
if (page->heap_tag != vinfo->heap_tag) { return true; } // continue
mi_heap_area_t area;
_mi_heap_area_init(&area, page);
if (!vinfo->visitor(NULL, &area, NULL, area.block_size, vinfo->arg)) {
return false;
}
if (vinfo->visit_blocks) {
return _mi_heap_area_visit_blocks(&area, page, vinfo->visitor, vinfo->arg);
}
else {
return true;
}
}
static bool abandoned_page_visit_at(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg) {
MI_UNUSED(slice_count);
mi_abandoned_page_visit_info_t* vinfo = (mi_abandoned_page_visit_info_t*)arg;
mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index);
mi_assert_internal(mi_page_is_abandoned_mapped(page));
return abandoned_page_visit(page, vinfo);
}
// Visit all abandoned pages in this subproc.
bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
mi_abandoned_page_visit_info_t visit_info = { heap_tag, visitor, arg, visit_blocks };
MI_UNUSED(subproc_id); MI_UNUSED(heap_tag); MI_UNUSED(visit_blocks); MI_UNUSED(visitor); MI_UNUSED(arg);
_mi_error_message(EINVAL, "implement mi_abandoned_visit_blocks\n");
return false;
// visit abandoned pages in the arenas
// we don't have to claim because we assume we are the only thread running (in this subproc).
// (but we could atomically claim as well by first doing abandoned_reclaim and afterwards reabandoning).
bool ok = true;
mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id);
mi_forall_arenas(subproc, NULL, 0, arena) {
mi_assert_internal(arena->subproc == subproc);
for (size_t bin = 0; ok && bin < MI_BIN_COUNT; bin++) {
// todo: if we had a single abandoned page map as well, this can be faster.
if (mi_atomic_load_relaxed(&subproc->abandoned_count[bin]) > 0) {
ok = _mi_bitmap_forall_set(arena->pages_abandoned[bin], &abandoned_page_visit_at, arena, &visit_info);
}
}
}
mi_forall_arenas_end();
if (!ok) return false;
// visit abandoned pages in OS allocated memory
// (technically we don't need the lock as we assume we are the only thread running in this subproc)
mi_lock(&subproc->os_abandoned_pages_lock) {
for (mi_page_t* page = subproc->os_abandoned_pages; ok && page != NULL; page = page->next) {
ok = abandoned_page_visit(page, &visit_info);
}
}
return ok;
}
@ -1731,3 +1791,4 @@ mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_arena_id_t* are
return true;
}