wip: cannot compile

This commit is contained in:
daanx 2024-12-01 16:26:59 -08:00
parent 1d7a9f62a5
commit 2f789aae9a
5 changed files with 181 additions and 77 deletions

View file

@ -92,11 +92,13 @@ bool _mi_preloading(void); // true while the C runtime is not in
void _mi_thread_done(mi_heap_t* heap);
void _mi_thread_data_collect(void);
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
size_t _mi_thread_seq_id(void) mi_attr_noexcept;
size_t _mi_thread_seq_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
void _mi_heap_guarded_init(mi_heap_t* heap);
void _mi_heap_guarded_init(mi_heap_t* heap);
// os.c
void _mi_os_init(void); // called from process init
@ -180,8 +182,6 @@ void _mi_heap_delayed_free_all(mi_heap_t* heap);
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);
@ -426,6 +426,10 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) {
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
static inline mi_tld_t* _mi_tld(void) {
return mi_heap_get_default()->tld;
}
/* -----------------------------------------------------------
Pages
----------------------------------------------------------- */
@ -507,53 +511,53 @@ static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
//static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
// mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
// if (heap != NULL) {
// mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
// page->heap_tag = heap->tag;
// mi_atomic_store_release(&page->xthread_id, heap->thread_id);
// }
// else {
// mi_atomic_store_release(&page->xheap, (uintptr_t)mi_page_heap(page)->tld->subproc);
// mi_atomic_store_release(&page->xthread_id,0);
// }
//}
// Thread free flag helpers
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
return (mi_block_t*)(tf & ~1);
}
static inline bool mi_tf_is_owned(mi_thread_free_t tf) {
return ((tf & 1) == 0);
}
static inline mi_thread_free_t mi_tf_create(mi_block_t* block, bool owned) {
return (mi_thread_free_t)((uintptr_t)block | (owned ? 0 : 1));
}
// Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3);
return mi_tf_block(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free));
}
static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3);
}
// Heap access
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap));
// Owned?
static inline bool mi_page_is_owned(const mi_page_t* page) {
return mi_tf_is_owned(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free));
}
// Thread id of thread that owns this page
static inline mi_threadid_t mi_page_thread_id(const mi_page_t* page) {
return mi_atomic_load_relaxed(&page->xthread_id);
}
static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
if (heap != NULL) {
mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
page->heap_tag = heap->tag;
mi_atomic_store_release(&page->xthread_id, heap->thread_id);
}
else {
mi_atomic_store_release(&page->xheap, (uintptr_t)mi_page_heap(page)->tld->subproc);
mi_atomic_store_release(&page->xthread_id,0);
}
}
// Thread free flag helpers
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
return (mi_block_t*)(tf & ~0x03);
}
static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
return (mi_delayed_t)(tf & 0x03);
}
static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
}
static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
return mi_tf_make(mi_tf_block(tf),delayed);
}
static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
return mi_tf_make(block, mi_tf_delayed(tf));
}
//static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
// return mi_tf_make(mi_tf_block(tf),delayed);
//}
//static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
// return mi_tf_make(block, mi_tf_delayed(tf));
//}
// are all blocks in a page freed?
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.

View file

@ -216,13 +216,14 @@ typedef struct mi_block_s {
#endif
// The delayed flags are used for efficient multi-threaded free-ing
typedef enum mi_delayed_e {
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
MI_NEVER_DELAYED_FREE = 3 // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim
} mi_delayed_t;
// The owned flags are used for efficient multi-threaded free-ing
// When we push on the page thread free queue of an abandoned page,
// we also atomically get to own it. This is needed to atomically
// abandon a page (while other threads could concurrently free blocks in it).
typedef enum mi_owned_e {
MI_OWNED = 0, // some heap owns this page
MI_ABANDONED = 1, // the page is abandoned
} mi_owned_t;
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
@ -247,7 +248,7 @@ typedef union mi_page_flags_s {
#endif
// Thread free list.
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
// We use the bottom bit of the pointer for `mi_owned_t` flags
typedef uintptr_t mi_thread_free_t;
// Sub processes are used to keep memory separate between them (e.g. multiple interpreters in CPython)
@ -304,10 +305,11 @@ typedef struct mi_page_s {
#endif
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
_Atomic(uintptr_t) xheap; // heap this threads belong to.
// _Atomic(uintptr_t) xheap; // heap this threads belong to.
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
mi_subproc_t* subproc; // sub-process of this heap
mi_memid_t memid; // provenance of the page memory
} mi_page_t;