Initial local deferred free impl

This commit is contained in:
playX 2021-04-29 06:07:55 +03:00
parent 73c339235c
commit f4d1ad552e
6 changed files with 55 additions and 5 deletions

View file

@ -730,6 +730,32 @@ bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
/// @see mi_heap_contains_block()
/// @see mi_heap_get_default()
bool mi_check_owned(const void* p);
/// Type of deferred free functions.
/// @param heap The heap.
/// @param force If \a true all outstanding items should be freed.
/// @param heartbeat A monotonically increasing count.
/// @param arg Argument that was passed at registration to hold extra state.
///
/// @see mi_heap_register_deferred_free
typedef void (mi_local_deferred_free_fun)(mi_heap_t* heap,bool force,unsigned long long heartbeat,void* arg);
/// Register a deferred free function.
/// @param heap The heap where deferred function should be registered.
/// @param deferred_free Address of a deferred free-ing function or \a NULL to unregister.
/// @param arg Argument that will be passed on to the deferred free function.
///
/// Some runtime systems use deferred free-ing, for example when using
/// reference counting to limit the worst case free time.
/// Such systems can register (re-entrant) deferred free function
/// to free more memory on demand. When the \a force parameter is
/// \a true all possible memory should be freed.
/// The per-thread \a heartbeat parameter is monotonically increasing
/// and guaranteed to be deterministic if the program allocates
/// deterministically. The \a deferred_free function is guaranteed
/// to be called deterministically after some number of allocations
/// (regardless of freeing or available free memory).
/// At most one \a deferred_free function can be active.
void mi_heap_register_deferred_free(mi_heap_t* heap,mi_local_deferred_free_fun* deferred_free, void* arg);
/// An area of heap space contains blocks of a single size.
/// The bytes in freed blocks are `committed - used`.

View file

@ -350,6 +350,9 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
mi_local_deferred_free_fun* deferred_free; // local deferred free function
void* deferred_free_arg; // argument passed to local deferred free function
};

View file

@ -180,6 +180,9 @@ mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t new
struct mi_heap_s;
typedef struct mi_heap_s mi_heap_t;
typedef void (mi_cdecl mi_local_deferred_free_fun)(mi_heap_t* heap,bool force,unsigned long long heartbeat,void* arg);
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
@ -187,7 +190,7 @@ mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
mi_decl_export mi_heap_t* mi_heap_get_default(void);
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
mi_decl_export void mi_heap_register_deferred_free(mi_heap_t* heap,mi_local_deferred_free_fun* fun,void* arg);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);

View file

@ -204,6 +204,8 @@ mi_heap_t* mi_heap_new(void) {
// push on the thread local heaps list
heap->next = heap->tld->heaps;
heap->tld->heaps = heap;
heap->deferred_free = NULL;
heap->deferred_free_arg = NULL;
return heap;
}
@ -564,3 +566,9 @@ bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_vis
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
}
void mi_heap_register_deferred_free(mi_heap_t* heap,mi_local_deferred_free_fun* fun,void*arg) {
heap->deferred_free = fun;
heap->deferred_free_arg = arg;
}

View file

@ -99,7 +99,9 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
false
false,
NULL,
NULL
};
// the thread-local default heap for allocation
@ -130,7 +132,9 @@ mi_heap_t _mi_heap_main = {
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap
false // can reclaim
false, // can reclaim
NULL,
NULL
};
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.

View file

@ -737,9 +737,15 @@ static _Atomic(void*) deferred_arg; // = NULL
void _mi_deferred_free(mi_heap_t* heap, bool force) {
heap->tld->heartbeat++;
if (deferred_free != NULL && !heap->tld->recurse) {
if (!heap->tld->recurse) {
heap->tld->recurse = true;
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
if (deferred_free != NULL) {
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
}
if (heap->deferred_free != NULL) {
// TODO: Should heap->deferred_free_arg be an atomic load?
(heap->deferred_free)(heap, force, heap->tld->heartbeat, heap->deferred_free_arg);
}
heap->tld->recurse = false;
}
}