wip: cannot compile

This commit is contained in:
daanx 2024-12-01 16:26:59 -08:00
parent 1d7a9f62a5
commit 2f789aae9a
5 changed files with 181 additions and 77 deletions

View file

@ -693,3 +693,48 @@ mi_decl_nodiscard bool mi_bitmap_try_find_and_clearN(mi_bitmap_t* bitmap, size_t
mi_bitmap_forall_set_chunks_end();
return false;
}
mi_decl_nodiscard bool mi_pairmap_xset(mi_pair_t set, mi_bitmap_t* bitmap, size_t idx);
mi_decl_nodiscard bool mi_pairmap_xset_while_not_busy(mi_pair_t set, mi_bitmap_t* bitmap, size_t idx);
mi_decl_nodiscard bool mi_pairmap_try_find_and_set_busy(mi_pairmap_t* pairmap, size_t tseq, size_t* pidx) {
size_t set_idx;
size_t start = tseq % MI_BFIELD_BITS;
size_t epoch = mi_atomic_load_acquire(&pairmap->epoch);
mi_bfield_t any_set = mi_bfield_rotate_right(mi_atomic_load_relaxed(&pairmap->any_set), start);
while (mi_bfield_find_least_bit(any_set, &set_idx)) {
size_t chunk_idx = 2*((set_idx + start) % MI_BFIELD_BITS);
{
// look at chunk_idx and chunck_idx+1
mi_bitmap_chunk_t* chunk1 = &pairmap->chunks[chunk_idx];
mi_bitmap_chunk_t* chunk2 = &pairmap->chunks[chunk_idx+1];
size_t cidx;
if (mi_pairmap_chunk_find_and_set_busy(chunk1, &cidx)) {
*pidx = (chunk_idx * MI_BITMAP_CHUNK_BITS) + cidx;
mi_assert_internal(*pidx < MI_PAIRMAP_MAX_BITS);
return true;
}
else {
if (mi_pairmap_chunk_find_and_set_busy(chunk2, &cidx)) {
*pidx = ((chunk_idx+1) * MI_BITMAP_CHUNK_BITS) + cidx;
mi_assert_internal(*pidx < MI_PAIRMAP_MAX_BITS);
return true;
}
else if (mi_bitmap_chunk_all_are_clear(chunk1) && mi_bitmap_chunk_all_are_clear(chunk2)) {
mi_bfield_atomic_xset(MI_BIT_CLEAR, &pairmap->any_set, chunk_idx/2);
}
}
else {
if (mi_bitmap_chunk_all_are_clear(&bitmap->chunks[chunk_idx])) {
mi_bfield_atomic_xset(MI_BIT_CLEAR, &bitmap->any_set, chunk_idx);
}
}
}
start += set_idx+1; /* so chunk_idx stays valid */
any_set >>= set_idx; /* skip scanned bits (and avoid UB with (idx+1)) */
any_set >>= 1;
}
}

View file

@ -41,7 +41,7 @@ typedef mi_decl_align(32) struct mi_bitmap_s {
#define MI_BITMAP_MAX_BITS (MI_BFIELD_BITS * MI_BITMAP_CHUNK_BITS) // 16k bits on 64bit, 8k bits on 32bit
/* --------------------------------------------------------------------------------
Bitmap
Atomic bitmap
-------------------------------------------------------------------------------- */
typedef bool mi_bit_t;
@ -89,4 +89,30 @@ mi_decl_nodiscard bool mi_bitmap_try_find_and_clear8(mi_bitmap_t* bitmap, size_t
// Returns true on success, and in that case sets the index: `0 <= *pidx <= MI_BITMAP_MAX_BITS-n`.
mi_decl_nodiscard bool mi_bitmap_try_find_and_clearN(mi_bitmap_t* bitmap, size_t n, size_t tseq, size_t* pidx );
/* --------------------------------------------------------------------------------
Atomic bitmap for a pair of bits
-------------------------------------------------------------------------------- */
typedef mi_bfield_t mi_pair_t;
#define MI_PAIR_CLEAR (0)
#define MI_PAIR_BUSY (1)
#define MI_PAIR_BUSYX (2)
#define MI_PAIR_SET (3)
typedef mi_decl_align(32) struct mi_pairmap_s {
mi_bitmap_chunk_t chunks[2*MI_BFIELD_BITS];
_Atomic(mi_bfield_t) any_set;
_Atomic(size_t) epoch;
} mi_pairmap_t;
#define MI_PAIRMAP_MAX_PAIRS (MI_BITMAP_MAX_BITS) // 16k pairs on 64bit, 8k pairs on 32bit
#define MI_PAIRMAP_MAX_BITS (2*MI_PAIRMAP_MAX_PAIRS)
mi_decl_nodiscard bool mi_pairmap_xset(mi_pair_t set, mi_pairmap_t* pairmap, size_t idx);
mi_decl_nodiscard bool mi_pairmap_xset_while_not_busy(mi_pair_t set, mi_pairmap_t* pairmap, size_t idx);
mi_decl_nodiscard bool mi_pairmap_try_find_and_set_busy(mi_pairmap_t* pairmap, size_t n, size_t tseq, size_t* pidx);
#endif // MI_XBITMAP_H

View file

@ -147,39 +147,66 @@ void mi_free(void* p) mi_attr_noexcept
}
}
// return true if successful
bool _mi_free_delayed_block(mi_block_t* block) {
// get segment and page
mi_assert_internal(block!=NULL);
mi_page_t* const page = mi_checked_ptr_page(block,"_mi_free_delayed_block");
mi_assert_internal(_mi_thread_id() == mi_page_thread_id(page));
// Clear the no-delayed flag so delayed freeing is used again for this page.
// This must be done before collecting the free lists on this page -- otherwise
// some blocks may end up in the page `thread_free` list with no blocks in the
// heap `thread_delayed_free` list which may cause the page to be never freed!
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
return false;
}
// collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count
_mi_page_free_collect(page, false);
// and free the block (possibly freeing the page as well since `used` is updated)
mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */);
return true;
}
// ------------------------------------------------------
// Multi-threaded Free (`_mt`)
// ------------------------------------------------------
// Push a block that is owned by another thread on its page-local thread free
// list or it's heap delayed free list. Such blocks are later collected by
// the owning thread in `_mi_free_delayed_block`.
static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block )
static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_thread_id(page)==0);
// we own the page now..
// first remove it from the abandoned pages in the arena
mi_heap_t* const heap = mi_heap_get_default();
_mi_arena_page_unabandon(page,heap->tld);
// collect the thread atomic free list
_mi_page_free_collect(page, false); // update `used` count
if (mi_page_is_singleton(page)) mi_assert_internal(mi_page_all_free(page));
if (mi_page_all_free(page)) {
// we can free the page directly
_mi_arena_page_free(page, heap->tld);
}
else {
// the page has still some blocks in use
// reclaim in our heap if compatible, or otherwise abandon again
if ((_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0) &&
(mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) && // we did not already terminate our thread (can this happen? yes, due to thread-local destructors for example (issue #944))
(page->subproc == heap->tld->subproc) && // don't reclaim across sub-processes
mi_arena_page_try_reclaim(page) // and we can reclaim it from the arena
)
{
// make it part of our heap
_mi_heap_page_reclaim(heap, page);
}
else {
// abandon again
_mi_arena_page_abandon(page, heap->tld);
}
}
}
// Push a block that is owned by another thread on its page-local thread free list.
static void mi_decl_noinline mi_free_block_delayed_mt(mi_page_t* page, mi_block_t* block)
{
// push atomically on the page thread free list
mi_thread_free_t tf_new;
mi_thread_free_t tf;
do {
tf = mi_atomic_load_relaxed(&page->xthread_free);
mi_block_set_next(page, block, mi_tf_block(tf));
tf_new = mi_tf_create(block, true /* always owned: try to claim it if abandoned */);
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tf, tf_new));
// and atomically reclaim the page if it was abandoned
bool reclaimed = !mi_tf_is_owned(tf);
if (reclaimed) mi_free_try_reclaim_mt(page);
}
/*
// Try to put the block on either the page-local thread free list,
// or the heap delayed free list (if this is the first non-local free in that page)
mi_thread_free_t tfreex;
@ -276,7 +303,7 @@ static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block
// thread_delayed free list (or heap delayed free list)
mi_free_block_delayed_mt(page,block);
}
*/
// ------------------------------------------------------
// Usable size