This commit is contained in:
playX 2023-03-17 14:23:29 +08:00 committed by GitHub
commit 1f6122c401
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 58 additions and 5 deletions

View file

@ -756,6 +756,34 @@ bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
/// @see mi_heap_get_default()
bool mi_check_owned(const void* p);
/// Type of deferred free functions.
/// @param heap The heap.
/// @param force If \a true all outstanding items should be freed.
/// @param heartbeat A monotonically increasing count.
/// @param arg Argument that was passed at registration to hold extra state.
///
/// @see mi_heap_register_deferred_free
typedef void (mi_local_deferred_free_fun)(mi_heap_t* heap,bool force,unsigned long long heartbeat,void* arg);
/// Register a deferred free function.
/// @param heap The heap where deferred function should be registered.
/// @param deferred_free Address of a deferred free-ing function or \a NULL to unregister.
/// @param arg Argument that will be passed on to the deferred free function.
///
/// Some runtime systems (JSC through lazy sweeping, JikesRVM has
/// both lazy sweeping and deferred RC) use deferred free-ing, for example when using
/// reference counting to limit the worst case free time.
/// Such systems can register (re-entrant) deferred free function
/// to free more memory on demand. When the \a force parameter is
/// \a true all possible memory should be freed.
/// The per-thread \a heartbeat parameter is monotonically increasing
/// and guaranteed to be deterministic if the program allocates
/// deterministically. The \a deferred_free function is guaranteed
/// to be called deterministically after some number of allocations
/// (regardless of freeing or available free memory).
/// At most one \a deferred_free function can be active.
void mi_heap_register_deferred_free(mi_heap_t* heap,mi_local_deferred_free_fun* deferred_free, void* arg);
/// An area of heap space contains blocks of a single size.
/// The bytes in freed blocks are `committed - used`.
typedef struct mi_heap_area_s {

View file

@ -406,6 +406,9 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
mi_local_deferred_free_fun* deferred_free; // local deferred free function
void* deferred_free_arg; // argument passed to local deferred free function
};

View file

@ -186,6 +186,9 @@ mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t new
struct mi_heap_s;
typedef struct mi_heap_s mi_heap_t;
typedef void (mi_cdecl mi_local_deferred_free_fun)(mi_heap_t* heap,bool force,unsigned long long heartbeat,void* arg);
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
@ -193,7 +196,7 @@ mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
mi_decl_export mi_heap_t* mi_heap_get_default(void);
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
mi_decl_export void mi_heap_register_deferred_free(mi_heap_t* heap,mi_local_deferred_free_fun* fun,void* arg);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);

View file

@ -205,6 +205,8 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
// push on the thread local heaps list
heap->next = heap->tld->heaps;
heap->tld->heaps = heap;
heap->deferred_free = NULL;
heap->deferred_free_arg = NULL;
return heap;
}
@ -593,3 +595,9 @@ bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_vis
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
}
void mi_heap_register_deferred_free(mi_heap_t* heap,mi_local_deferred_free_fun* fun,void*arg) {
heap->deferred_free = fun;
heap->deferred_free_arg = arg;
}

View file

@ -99,7 +99,9 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
false
false,
NULL,
NULL
};
@ -131,7 +133,9 @@ mi_heap_t _mi_heap_main = {
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap
false // can reclaim
false, // can reclaim
NULL,
NULL
};
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.

View file

@ -784,9 +784,16 @@ static _Atomic(void*) deferred_arg; // = NULL
void _mi_deferred_free(mi_heap_t* heap, bool force) {
heap->tld->heartbeat++;
if (deferred_free != NULL && !heap->tld->recurse) {
if (!heap->tld->recurse) {
heap->tld->recurse = true;
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
if (deferred_free != NULL) {
// first we invoke global deferred free function (if it is defined).
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
}
if (heap->deferred_free != NULL) {
// invoke deferred free function that is defined to do deferred free on specific heap instance.
(heap->deferred_free)(heap, force, heap->tld->heartbeat, heap->deferred_free_arg);
}
heap->tld->recurse = false;
}
}