mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
merge from dev
This commit is contained in:
commit
3f2b6e6df9
5 changed files with 7 additions and 14 deletions
|
@ -295,11 +295,11 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t a
|
|||
typedef void* mi_subproc_id_t;
|
||||
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
|
||||
mi_decl_export mi_subproc_id_t mi_subproc_new(void);
|
||||
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
|
||||
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
|
||||
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
|
||||
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
|
||||
|
||||
// Experimental: visit abandoned heap areas (from threads that have been terminated)
|
||||
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread
|
||||
// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will
|
||||
|
@ -307,7 +307,7 @@ mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int
|
|||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id);
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
|
|
|
@ -159,7 +159,6 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, m
|
|||
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_await_readers(void);
|
||||
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
|
||||
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
|
|
@ -140,10 +140,10 @@ static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bit
|
|||
|
||||
/* -----------------------------------------------------------
|
||||
Special static area for mimalloc internal structures
|
||||
to avoid OS calls (for example, for the arena metadata)
|
||||
to avoid OS calls (for example, for the arena metadata (~= 256b))
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit
|
||||
#define MI_ARENA_STATIC_MAX ((MI_INTPTR_SIZE/2)*MI_KiB) // 4 KiB on 64-bit
|
||||
|
||||
static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895
|
||||
static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top;
|
||||
|
|
|
@ -271,7 +271,7 @@ typedef struct mi_thread_data_s {
|
|||
// destroy many OS threads, this may causes too much overhead
|
||||
// per thread so we maintain a small cache of recently freed metadata.
|
||||
|
||||
#define TD_CACHE_SIZE (16)
|
||||
#define TD_CACHE_SIZE (32)
|
||||
static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
|
||||
|
||||
static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
||||
|
|
|
@ -407,7 +407,6 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
const size_t size = mi_segment_size(segment);
|
||||
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
||||
|
||||
_mi_abandoned_await_readers(); // wait until safe to free
|
||||
_mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats);
|
||||
}
|
||||
|
||||
|
@ -1067,11 +1066,6 @@ by scanning the arena memory
|
|||
(segments outside arena memoryare only reclaimed by a free).
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
|
||||
void _mi_abandoned_await_readers(void) {
|
||||
// nothing needed
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Abandon segment/page
|
||||
----------------------------------------------------------- */
|
||||
|
|
Loading…
Add table
Reference in a new issue