bug fixes

This commit is contained in:
daanx 2024-12-01 12:54:16 -08:00
parent 8f2a5864b8
commit 1d7a9f62a5
6 changed files with 34 additions and 32 deletions

View file

@ -598,7 +598,7 @@ static inline bool mi_page_mostly_used(const mi_page_t* page) {
static inline bool mi_page_is_abandoned(const mi_page_t* page) {
// note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free)
return (mi_page_thread_id(page) == 0);
return (mi_atomic_load_acquire(&page->xthread_id) == 0);
}
static inline bool mi_page_is_huge(const mi_page_t* page) {

View file

@ -646,11 +646,12 @@ bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) {
// we only call this when on free (and thus there is still an object alive in the page)
mi_memid_t memid = page->memid;
if (!_mi_arena_memid_is_suitable(memid, heap->arena_id)) return false; // don't reclaim between exclusive and non-exclusive arena's
if (mi_atomic_load_acquire(&page->xheap) != (uintptr_t)heap->tld->subproc) return false;
if mi_likely(memid.memkind == MI_MEM_ARENA) {
size_t slice_index;
mi_arena_t* arena = mi_page_arena(page, &slice_index, NULL);
if (arena->subproc != heap->tld->subproc) return false; // only reclaim within the same subprocess
//if (arena->subproc != heap->tld->subproc) return false; // only reclaim within the same subprocess
// don't reclaim more from a `free` call than half the current segments
// this is to prevent a pure free-ing thread to start owning too many segments
@ -665,6 +666,11 @@ bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(!mi_page_is_abandoned(page));
return true;
}
else {
if (mi_page_is_abandoned(page)) {
mi_assert(false);
}
}
}
else {
// A page in OS or external memory
@ -1089,15 +1095,6 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv
/* -----------------------------------------------------------
Abandoned pages
----------------------------------------------------------- */
void mi_arena_page_abandon(mi_page_t* page) {
mi_assert_internal(mi_page_is_abandoned(page));
if (mi_page_is_full(page)) {}
}
/* -----------------------------------------------------------

View file

@ -225,11 +225,10 @@ static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// first see if the page was abandoned and if we can reclaim it into our thread
if (mi_page_is_abandoned(page) &&
(_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 ||
mi_page_is_singleton(page) // only one block, and we are free-ing it
) &&
mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
if (mi_page_is_abandoned(page)) {
if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 ||
mi_page_is_singleton(page)) { // only one block, and we are free-ing it
if (mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
{
// the page is abandoned, try to reclaim it into our heap
if (_mi_arena_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue
@ -239,9 +238,15 @@ static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block
return;
}
else {
if (mi_page_is_abandoned(page)) {
mi_assert(false);
}
mi_assert_internal(!mi_page_is_singleton(page)); // we should have succeeded on singleton pages
}
}
}
}
// The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).

View file

@ -396,8 +396,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
tld->heap_backing = bheap;
tld->heaps = NULL;
tld->subproc = &mi_subproc_default;
tld->tseq = 0;
mi_atomic_add_acq_rel(&mi_tcount, 1);
tld->tseq = 0; // mi_atomic_add_acq_rel(&mi_tcount, 1);
tld->os.stats = &tld->stats;
}

View file

@ -219,11 +219,12 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
size = _mi_align_up(size, _mi_os_page_size());
const bool use_overalloc = (alignment > mi_os_mem_config.alloc_granularity && alignment <= size/8);
// try a direct allocation if the alignment is below the default, or if larger than 1/64 fraction of the size (to avoid waste).
const bool try_direct_alloc = (alignment <= mi_os_mem_config.alloc_granularity || alignment > size/64);
// try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
void* p = NULL;
if (!use_overalloc) {
if (try_direct_alloc) {
p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
}
@ -234,7 +235,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
else {
// if not aligned, free it, overallocate, and unmap around it
#if !MI_TRACK_ASAN
if (!use_overalloc) {
if (try_direct_alloc) {
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
}
#endif

View file

@ -36,7 +36,7 @@ static int ITER = 400;
static int THREADS = 8;
static int SCALE = 25;
static int ITER = 20;
#elif defined(xMI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
static int THREADS = 8;
static int SCALE = 10;
static int ITER = 10;