mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-03 14:09:31 +03:00
wip: use purge throughout for segments and arenas; more agressive delays
This commit is contained in:
parent
94a867869e
commit
f5ab38f87b
10 changed files with 141 additions and 127 deletions
|
@ -821,12 +821,12 @@ typedef enum mi_option_e {
|
|||
mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
|
||||
mi_option_segment_reset, ///< Experimental
|
||||
mi_option_reset_delay, ///< Delay in milli-seconds before resetting a page (100ms by default)
|
||||
mi_option_reset_decommits, ///< Experimental
|
||||
mi_option_purge_decommits, ///< Experimental
|
||||
|
||||
// v2.x specific options
|
||||
mi_option_allow_decommit, ///< Enable decommitting memory (=on)
|
||||
mi_option_decommit_delay, ///< Decommit page memory after N milli-seconds delay (25ms).
|
||||
mi_option_segment_decommit_delay, ///< Decommit large segment memory after N milli-seconds delay (500ms).
|
||||
mi_option_allow_purge, ///< Enable decommitting memory (=on)
|
||||
mi_option_purge_delay, ///< Decommit page memory after N milli-seconds delay (25ms).
|
||||
mi_option_segment_purge_delay, ///< Decommit large segment memory after N milli-seconds delay (500ms).
|
||||
|
||||
_mi_option_last
|
||||
} mi_option_t;
|
||||
|
|
|
@ -324,27 +324,27 @@ typedef enum mi_option_e {
|
|||
// some of the following options are experimental
|
||||
// (deprecated options are kept for binary backward compatibility with v1.x versions)
|
||||
mi_option_eager_commit,
|
||||
mi_option_deprecated_eager_region_commit,
|
||||
mi_option_reset_decommits,
|
||||
mi_option_eager_arena_commit,
|
||||
mi_option_purge_decommits,
|
||||
mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_page_reset,
|
||||
mi_option_abandoned_page_decommit,
|
||||
mi_option_abandoned_page_purge,
|
||||
mi_option_deprecated_segment_reset,
|
||||
mi_option_eager_commit_delay,
|
||||
mi_option_decommit_delay,
|
||||
mi_option_purge_delay,
|
||||
mi_option_use_numa_nodes, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
||||
mi_option_os_tag,
|
||||
mi_option_max_errors,
|
||||
mi_option_max_warnings,
|
||||
mi_option_max_segment_reclaim,
|
||||
mi_option_allow_decommit,
|
||||
mi_option_segment_decommit_delay,
|
||||
mi_option_decommit_extend_delay,
|
||||
mi_option_allow_purge,
|
||||
mi_option_deprecated_segment_decommit_delay,
|
||||
mi_option_purge_extend_delay,
|
||||
mi_option_destroy_on_exit,
|
||||
mi_option_arena_reserve,
|
||||
mi_option_arena_purge_delay,
|
||||
|
|
|
@ -93,6 +93,7 @@ size_t _mi_os_page_size(void);
|
|||
size_t _mi_os_good_alloc_size(size_t size);
|
||||
bool _mi_os_has_overcommit(void);
|
||||
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
|
@ -120,8 +121,8 @@ bool _mi_arena_is_os_allocated(size_t arena_memid);
|
|||
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
|
||||
|
||||
// "segment-cache.c"
|
||||
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
|
||||
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* purge_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* purge_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
|
||||
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
|
||||
void _mi_segment_cache_free_all(mi_os_tld_t* tld);
|
||||
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
||||
|
|
|
@ -350,7 +350,7 @@ typedef enum mi_segment_kind_e {
|
|||
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define MI_MINIMAL_COMMIT_SIZE (16*MI_SEGMENT_SLICE_SIZE) // 1MiB
|
||||
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) // 1MiB
|
||||
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
|
||||
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
|
||||
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
|
||||
|
@ -379,9 +379,10 @@ typedef struct mi_segment_s {
|
|||
size_t mem_alignment; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
|
||||
size_t mem_align_offset; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
|
||||
|
||||
bool allow_decommit;
|
||||
mi_msecs_t decommit_expire;
|
||||
mi_commit_mask_t decommit_mask;
|
||||
bool allow_decommit;
|
||||
bool allow_purge;
|
||||
mi_msecs_t purge_expire;
|
||||
mi_commit_mask_t purge_mask;
|
||||
mi_commit_mask_t commit_mask;
|
||||
|
||||
_Atomic(struct mi_segment_s*) abandoned_next;
|
||||
|
|
23
src/arena.c
23
src/arena.c
|
@ -297,7 +297,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
|||
mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) ) // not too many arenas already?
|
||||
{
|
||||
mi_arena_id_t arena_id = 0;
|
||||
const bool arena_commit = _mi_os_has_overcommit();
|
||||
const bool arena_commit = _mi_os_has_overcommit() || mi_option_is_enabled(mi_option_eager_arena_commit);
|
||||
if (mi_reserve_os_memory_ex(arena_reserve, arena_commit /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
|
||||
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
|
@ -336,20 +336,6 @@ void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
|||
Arena purge
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// either resets or decommits memory, returns true if the memory was decommitted.
|
||||
static bool mi_os_purge(void* p, size_t size, mi_stats_t* stats) {
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits) && // should decommit?
|
||||
!_mi_preloading()) // don't decommit during preloading (unsafe)
|
||||
{
|
||||
_mi_os_decommit(p, size, stats);
|
||||
return true; // decommitted
|
||||
}
|
||||
else {
|
||||
_mi_os_reset(p, size, stats);
|
||||
return false; // not decommitted
|
||||
}
|
||||
}
|
||||
|
||||
// reset or decommit in an arena and update the committed/decommit bitmaps
|
||||
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
|
||||
mi_assert_internal(arena->blocks_committed != NULL);
|
||||
|
@ -357,7 +343,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
|
|||
mi_assert_internal(arena->allow_decommit);
|
||||
const size_t size = blocks * MI_ARENA_BLOCK_SIZE;
|
||||
void* const p = arena->start + (mi_bitmap_index_bit(bitmap_idx) * MI_ARENA_BLOCK_SIZE);
|
||||
const bool decommitted = mi_os_purge(p, size, stats);
|
||||
const bool decommitted = _mi_os_purge(p, size, stats);
|
||||
// clear the purged blocks
|
||||
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
|
||||
// update committed bitmap
|
||||
|
@ -369,6 +355,8 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
|
|||
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
|
||||
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
|
||||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
if (!mi_option_is_enabled(mi_option_allow_purge)) return;
|
||||
|
||||
const long delay = mi_option_get(mi_option_arena_purge_delay);
|
||||
if (_mi_preloading() || delay == 0) {
|
||||
// decommit directly
|
||||
|
@ -468,7 +456,8 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
|
|||
|
||||
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
|
||||
const long delay = mi_option_get(mi_option_arena_purge_delay);
|
||||
if (_mi_preloading() || delay == 0 /* || !mi_option_is_enabled(mi_option_allow_decommit) */) return; // nothing will be scheduled
|
||||
if (_mi_preloading() || delay == 0 || !mi_option_is_enabled(mi_option_allow_purge)) return; // nothing will be scheduled
|
||||
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
if (max_arena == 0) return;
|
||||
|
||||
|
|
|
@ -60,15 +60,15 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
|
||||
// Some of the following options are experimental and not all combinations are valid. Use with care.
|
||||
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_eager_region_commit) },
|
||||
{ 0, UNINIT, MI_OPTION(reset_decommits) },
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(eager_arena_commit,eager_region_commit) },
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) },
|
||||
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_decommit, abandoned_page_reset) },// decommit free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge, abandoned_page_decommit) },// decommit free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_reset) },
|
||||
#if defined(__NetBSD__)
|
||||
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
|
||||
|
@ -77,23 +77,23 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
#else
|
||||
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
|
||||
#endif
|
||||
{ 25, UNINIT, MI_OPTION_LEGACY(decommit_delay, reset_delay) }, // page decommit delay in milli-seconds
|
||||
{ 10, UNINIT, MI_OPTION_LEGACY(purge_delay, decommit_delay) }, // page decommit delay in milli-seconds
|
||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||
{ 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
||||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||
{ 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
|
||||
{ 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
|
||||
{ 8, UNINIT, MI_OPTION(max_segment_reclaim)},// max. number of segment reclaims from the abandoned segments per try.
|
||||
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after decommit_delay milli-seconds)
|
||||
{ 500, UNINIT, MI_OPTION(segment_decommit_delay) }, // decommit delay in milli-seconds for freed segments
|
||||
{ 1, UNINIT, MI_OPTION(decommit_extend_delay) },
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(allow_purge, allow_decommit) }, // decommit slices when no longer used (after decommit_delay milli-seconds)
|
||||
{ 100, UNINIT, MI_OPTION(deprecated_segment_decommit_delay) }, // decommit delay in milli-seconds for freed segments
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
|
||||
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
|
||||
#if (MI_INTPTR_SIZE>4)
|
||||
{ 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time
|
||||
#else
|
||||
{ 128L*1024L, UNINIT, MI_OPTION(arena_reserve) },
|
||||
#endif
|
||||
{ 500, UNINIT, MI_OPTION(arena_purge_delay) } // reset/decommit delay in milli-seconds for arena allocation
|
||||
{ 100, UNINIT, MI_OPTION(arena_purge_delay) } // reset/decommit delay in milli-seconds for arena allocation
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
15
src/os.c
15
src/os.c
|
@ -436,6 +436,21 @@ bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stat
|
|||
}
|
||||
*/
|
||||
|
||||
// either resets or decommits memory, returns true if the memory was decommitted.
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats) {
|
||||
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
|
||||
!_mi_preloading()) // don't decommit during preloading (unsafe)
|
||||
{
|
||||
_mi_os_decommit(p, size, stats);
|
||||
return true; // decommitted
|
||||
}
|
||||
else {
|
||||
_mi_os_reset(p, size, stats);
|
||||
return false; // not decommitted
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Protect a region in memory to be not accessible.
|
||||
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
|
||||
// page align conservatively within the range
|
||||
|
|
|
@ -307,7 +307,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
|
|||
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
|
||||
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
||||
_mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
||||
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
||||
if (*commit || !mi_option_is_enabled(mi_option_purge_decommits)) { // only if needed
|
||||
bool reset_zero = false;
|
||||
_mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
|
||||
if (reset_zero) *is_zero = true;
|
||||
|
@ -415,7 +415,7 @@ void _mi_mem_free(void* p, size_t size, size_t alignment, size_t align_offset, s
|
|||
// reset the blocks to reduce the working set.
|
||||
if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
|
||||
&& (mi_option_is_enabled(mi_option_eager_commit) ||
|
||||
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
||||
mi_option_is_enabled(mi_option_purge_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
||||
{
|
||||
bool any_unreset;
|
||||
_mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, &any_unreset);
|
||||
|
@ -467,7 +467,7 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
|
|||
-----------------------------------------------------------------------------*/
|
||||
|
||||
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) {
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||
if (mi_option_is_enabled(mi_option_purge_decommits)) {
|
||||
return _mi_os_decommit(p, size, tld->stats);
|
||||
}
|
||||
else {
|
||||
|
@ -476,7 +476,7 @@ bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) {
|
|||
}
|
||||
|
||||
bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||
if (mi_option_is_enabled(mi_option_purge_decommits)) {
|
||||
return _mi_os_commit(p, size, is_zero, tld->stats);
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -29,7 +29,7 @@ typedef struct mi_cache_slot_s {
|
|||
size_t memid;
|
||||
bool is_pinned;
|
||||
mi_commit_mask_t commit_mask;
|
||||
mi_commit_mask_t decommit_mask;
|
||||
mi_commit_mask_t purge_mask;
|
||||
_Atomic(mi_msecs_t) expire;
|
||||
} mi_cache_slot_t;
|
||||
|
||||
|
@ -48,7 +48,7 @@ static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void
|
|||
mi_decl_noinline static void* mi_segment_cache_pop_ex(
|
||||
bool all_suitable,
|
||||
size_t size, mi_commit_mask_t* commit_mask,
|
||||
mi_commit_mask_t* decommit_mask, bool large_allowed,
|
||||
mi_commit_mask_t* purge_mask, bool large_allowed,
|
||||
bool* large, bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
|
@ -96,7 +96,7 @@ mi_decl_noinline static void* mi_segment_cache_pop_ex(
|
|||
*is_pinned = slot->is_pinned;
|
||||
*is_zero = false;
|
||||
*commit_mask = slot->commit_mask;
|
||||
*decommit_mask = slot->decommit_mask;
|
||||
*purge_mask = slot->purge_mask;
|
||||
slot->p = NULL;
|
||||
mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0);
|
||||
|
||||
|
@ -107,9 +107,9 @@ mi_decl_noinline static void* mi_segment_cache_pop_ex(
|
|||
}
|
||||
|
||||
|
||||
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* purge_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
return mi_segment_cache_pop_ex(false, size, commit_mask, decommit_mask, large_allowed, large, is_pinned, is_zero, _req_arena_id, memid, tld);
|
||||
return mi_segment_cache_pop_ex(false, size, commit_mask, purge_mask, large_allowed, large, is_pinned, is_zero, _req_arena_id, memid, tld);
|
||||
}
|
||||
|
||||
static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats)
|
||||
|
@ -142,7 +142,7 @@ static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, vo
|
|||
static mi_decl_noinline void mi_segment_cache_purge(bool visit_all, bool force, mi_os_tld_t* tld)
|
||||
{
|
||||
MI_UNUSED(tld);
|
||||
if (!mi_option_is_enabled(mi_option_allow_decommit)) return;
|
||||
if (!mi_option_is_enabled(mi_option_allow_purge)) return;
|
||||
mi_msecs_t now = _mi_clock_now();
|
||||
size_t purged = 0;
|
||||
const size_t max_visits = (visit_all ? MI_CACHE_MAX /* visit all */ : MI_CACHE_FIELDS /* probe at most N (=16) slots */);
|
||||
|
@ -170,7 +170,7 @@ static mi_decl_noinline void mi_segment_cache_purge(bool visit_all, bool force,
|
|||
// decommit committed parts
|
||||
// TODO: instead of decommit, we could also free to the OS?
|
||||
mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats);
|
||||
mi_commit_mask_create_empty(&slot->decommit_mask);
|
||||
mi_commit_mask_create_empty(&slot->purge_mask);
|
||||
}
|
||||
_mi_bitmap_unclaim(cache_unavailable, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld) {
|
|||
|
||||
void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
|
||||
mi_commit_mask_t commit_mask;
|
||||
mi_commit_mask_t decommit_mask;
|
||||
mi_commit_mask_t purge_mask;
|
||||
bool is_pinned;
|
||||
bool is_zero;
|
||||
bool is_large;
|
||||
|
@ -200,7 +200,7 @@ void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
|
|||
void* p;
|
||||
do {
|
||||
// keep popping and freeing the memory
|
||||
p = mi_segment_cache_pop_ex(true /* all */, size, &commit_mask, &decommit_mask,
|
||||
p = mi_segment_cache_pop_ex(true /* all */, size, &commit_mask, &purge_mask,
|
||||
true /* allow large */, &is_large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, tld);
|
||||
if (p != NULL) {
|
||||
size_t csize = _mi_commit_mask_committed_size(&commit_mask, size);
|
||||
|
@ -210,7 +210,7 @@ void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
|
|||
} while (p != NULL);
|
||||
}
|
||||
|
||||
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
|
||||
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* purge_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
|
||||
{
|
||||
#ifdef MI_CACHE_DISABLE
|
||||
return false;
|
||||
|
@ -257,13 +257,13 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
|
|||
slot->is_pinned = is_pinned;
|
||||
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
|
||||
slot->commit_mask = *commit_mask;
|
||||
slot->decommit_mask = *decommit_mask;
|
||||
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) {
|
||||
long delay = mi_option_get(mi_option_segment_decommit_delay);
|
||||
slot->purge_mask = *purge_mask;
|
||||
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_purge)) {
|
||||
long delay = mi_option_get(mi_option_arena_purge_delay);
|
||||
if (delay == 0) {
|
||||
_mi_abandoned_await_readers(); // wait until safe to decommit
|
||||
mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats);
|
||||
mi_commit_mask_create_empty(&slot->decommit_mask);
|
||||
mi_commit_mask_create_empty(&slot->purge_mask);
|
||||
}
|
||||
else {
|
||||
mi_atomic_storei64_release(&slot->expire, _mi_clock_now() + delay);
|
||||
|
|
142
src/segment.c
142
src/segment.c
|
@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define MI_USE_SEGMENT_CACHE 0
|
||||
#define MI_PAGE_HUGE_ALIGN (256*1024)
|
||||
|
||||
static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats);
|
||||
static void mi_segment_delayed_purge(mi_segment_t* segment, bool force, mi_stats_t* stats);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
@ -258,7 +258,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
|
||||
mi_assert_internal(segment->abandoned <= segment->used);
|
||||
mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id());
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // can only decommit committed blocks
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks
|
||||
//mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0);
|
||||
mi_slice_t* slice = &segment->slices[0];
|
||||
const mi_slice_t* end = mi_segment_slices_end(segment);
|
||||
|
@ -390,14 +390,14 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
_mi_os_unprotect(end, os_pagesize);
|
||||
}
|
||||
|
||||
// purge delayed decommits now? (no, leave it to the cache)
|
||||
// mi_segment_delayed_decommit(segment,true,tld->stats);
|
||||
// purge delayed decommits now? (no, leave it to the arena)
|
||||
// mi_segment_delayed_purge(segment,true,tld->stats);
|
||||
|
||||
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
|
||||
const size_t size = mi_segment_size(segment);
|
||||
#if MI_USE_SEGMENT_CACHE
|
||||
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || segment->kind == MI_SEGMENT_HUGE // only push regular segments on the cache
|
||||
|| !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
||||
|| !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->purge_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
||||
#endif
|
||||
{
|
||||
// if not all committed, an arena may decommit the whole area, but that double counts
|
||||
|
@ -478,7 +478,7 @@ static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uin
|
|||
|
||||
|
||||
static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, size_t size, mi_stats_t* stats) {
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
|
||||
|
||||
// commit liberal, but decommit conservative
|
||||
uint8_t* start = NULL;
|
||||
|
@ -488,6 +488,7 @@ static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, s
|
|||
if (mi_commit_mask_is_empty(&mask) || full_size==0) return true;
|
||||
|
||||
if (commit && !mi_commit_mask_all_set(&segment->commit_mask, &mask)) {
|
||||
// committing
|
||||
bool is_zero = false;
|
||||
mi_commit_mask_t cmask;
|
||||
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
|
||||
|
@ -496,41 +497,47 @@ static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, s
|
|||
mi_commit_mask_set(&segment->commit_mask, &mask);
|
||||
}
|
||||
else if (!commit && mi_commit_mask_any_set(&segment->commit_mask, &mask)) {
|
||||
// purging
|
||||
mi_assert_internal((void*)start != (void*)segment);
|
||||
//mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &mask));
|
||||
|
||||
mi_commit_mask_t cmask;
|
||||
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
|
||||
_mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
|
||||
if (segment->allow_decommit) {
|
||||
_mi_os_decommit(start, full_size, stats); // ok if this fails
|
||||
}
|
||||
mi_commit_mask_clear(&segment->commit_mask, &mask);
|
||||
if (mi_option_is_enabled(mi_option_allow_purge)) {
|
||||
if (segment->allow_decommit) {
|
||||
const bool decommitted = _mi_os_purge(start, full_size, stats); // reset or decommit
|
||||
if (decommitted) {
|
||||
mi_commit_mask_t cmask;
|
||||
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
|
||||
_mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting
|
||||
mi_commit_mask_clear(&segment->commit_mask, &mask);
|
||||
}
|
||||
}
|
||||
else if (segment->allow_purge) {
|
||||
_mi_os_reset(start, full_size, stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
// increase expiration of reusing part of the delayed decommit
|
||||
if (commit && mi_commit_mask_any_set(&segment->decommit_mask, &mask)) {
|
||||
segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
|
||||
if (commit && mi_commit_mask_any_set(&segment->purge_mask, &mask)) {
|
||||
segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay);
|
||||
}
|
||||
// always undo delayed decommits
|
||||
mi_commit_mask_clear(&segment->decommit_mask, &mask);
|
||||
// always undo delayed purges
|
||||
mi_commit_mask_clear(&segment->purge_mask, &mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
|
||||
// note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
|
||||
if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->decommit_mask)) return true; // fully committed
|
||||
if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed
|
||||
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
|
||||
return mi_segment_commitx(segment,true,p,size,stats);
|
||||
}
|
||||
|
||||
static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
|
||||
if (!segment->allow_decommit) return;
|
||||
if (mi_option_get(mi_option_decommit_delay) == 0) {
|
||||
static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
|
||||
if (!segment->allow_purge) return;
|
||||
if (mi_option_get(mi_option_purge_delay) == 0) {
|
||||
mi_segment_commitx(segment, false, p, size, stats);
|
||||
}
|
||||
else {
|
||||
// register for future decommit in the decommit mask
|
||||
// register for future purge in the purge mask
|
||||
uint8_t* start = NULL;
|
||||
size_t full_size = 0;
|
||||
mi_commit_mask_t mask;
|
||||
|
@ -538,39 +545,39 @@ static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_
|
|||
if (mi_commit_mask_is_empty(&mask) || full_size==0) return;
|
||||
|
||||
// update delayed commit
|
||||
mi_assert_internal(segment->decommit_expire > 0 || mi_commit_mask_is_empty(&segment->decommit_mask));
|
||||
mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask));
|
||||
mi_commit_mask_t cmask;
|
||||
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only decommit what is committed; span_free may try to decommit more
|
||||
mi_commit_mask_set(&segment->decommit_mask, &cmask);
|
||||
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more
|
||||
mi_commit_mask_set(&segment->purge_mask, &cmask);
|
||||
mi_msecs_t now = _mi_clock_now();
|
||||
if (segment->decommit_expire == 0) {
|
||||
if (segment->purge_expire == 0) {
|
||||
// no previous decommits, initialize now
|
||||
segment->decommit_expire = now + mi_option_get(mi_option_decommit_delay);
|
||||
segment->purge_expire = now + mi_option_get(mi_option_purge_delay);
|
||||
}
|
||||
else if (segment->decommit_expire <= now) {
|
||||
else if (segment->purge_expire <= now) {
|
||||
// previous decommit mask already expired
|
||||
if (segment->decommit_expire + mi_option_get(mi_option_decommit_extend_delay) <= now) {
|
||||
mi_segment_delayed_decommit(segment, true, stats);
|
||||
if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) {
|
||||
mi_segment_delayed_purge(segment, true, stats);
|
||||
}
|
||||
else {
|
||||
segment->decommit_expire = now + mi_option_get(mi_option_decommit_extend_delay); // (mi_option_get(mi_option_decommit_delay) / 8); // wait a tiny bit longer in case there is a series of free's
|
||||
segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's
|
||||
}
|
||||
}
|
||||
else {
|
||||
// previous decommit mask is not yet expired, increase the expiration by a bit.
|
||||
segment->decommit_expire += mi_option_get(mi_option_decommit_extend_delay);
|
||||
segment->purge_expire += mi_option_get(mi_option_purge_extend_delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats) {
|
||||
if (!segment->allow_decommit || mi_commit_mask_is_empty(&segment->decommit_mask)) return;
|
||||
static void mi_segment_delayed_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) {
|
||||
if (!segment->allow_purge || mi_commit_mask_is_empty(&segment->purge_mask)) return;
|
||||
mi_msecs_t now = _mi_clock_now();
|
||||
if (!force && now < segment->decommit_expire) return;
|
||||
if (!force && now < segment->purge_expire) return;
|
||||
|
||||
mi_commit_mask_t mask = segment->decommit_mask;
|
||||
segment->decommit_expire = 0;
|
||||
mi_commit_mask_create_empty(&segment->decommit_mask);
|
||||
mi_commit_mask_t mask = segment->purge_mask;
|
||||
segment->purge_expire = 0;
|
||||
mi_commit_mask_create_empty(&segment->purge_mask);
|
||||
|
||||
size_t idx;
|
||||
size_t count;
|
||||
|
@ -583,7 +590,7 @@ static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_st
|
|||
}
|
||||
}
|
||||
mi_commit_mask_foreach_end()
|
||||
mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask));
|
||||
mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
|
||||
}
|
||||
|
||||
|
||||
|
@ -596,7 +603,7 @@ static bool mi_segment_is_abandoned(mi_segment_t* segment) {
|
|||
}
|
||||
|
||||
// note: can be called on abandoned segments
|
||||
static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_decommit, mi_segments_tld_t* tld) {
|
||||
static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(slice_index < segment->slice_entries);
|
||||
mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment)
|
||||
? NULL : mi_span_queue_for(slice_count,tld));
|
||||
|
@ -616,8 +623,8 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size
|
|||
}
|
||||
|
||||
// perhaps decommit
|
||||
if (allow_decommit) {
|
||||
mi_segment_perhaps_decommit(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats);
|
||||
if (allow_purge) {
|
||||
mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats);
|
||||
}
|
||||
|
||||
// and push it on the free page queue (if it was not a huge page)
|
||||
|
@ -794,7 +801,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
|
|||
|
||||
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delay, mi_arena_id_t req_arena_id,
|
||||
size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices,
|
||||
mi_commit_mask_t* pcommit_mask, mi_commit_mask_t* pdecommit_mask,
|
||||
mi_commit_mask_t* pcommit_mask, mi_commit_mask_t* ppurge_mask,
|
||||
bool* is_zero, bool* pcommit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
|
||||
{
|
||||
|
@ -821,10 +828,10 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
|
|||
#if MI_USE_SEGMENT_CACHE
|
||||
// get from cache?
|
||||
if (page_alignment == 0) {
|
||||
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, pdecommit_mask, mem_large, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
|
||||
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, ppurge_mask, mem_large, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
|
||||
}
|
||||
#else
|
||||
MI_UNUSED(pdecommit_mask);
|
||||
MI_UNUSED(ppurge_mask);
|
||||
#endif
|
||||
|
||||
// get from OS
|
||||
|
@ -886,13 +893,13 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
|
|||
bool is_zero = false;
|
||||
|
||||
mi_commit_mask_t commit_mask;
|
||||
mi_commit_mask_t decommit_mask;
|
||||
mi_commit_mask_t purge_mask;
|
||||
mi_commit_mask_create_empty(&commit_mask);
|
||||
mi_commit_mask_create_empty(&decommit_mask);
|
||||
mi_commit_mask_create_empty(&purge_mask);
|
||||
|
||||
// Allocate the segment from the OS
|
||||
mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
|
||||
&segment_slices, &pre_size, &info_slices, &commit_mask, &decommit_mask,
|
||||
&segment_slices, &pre_size, &info_slices, &commit_mask, &purge_mask,
|
||||
&is_zero, &commit, tld, os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
|
||||
|
@ -908,21 +915,22 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
|
|||
}
|
||||
|
||||
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
|
||||
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
|
||||
if (segment->allow_decommit) {
|
||||
segment->decommit_expire = 0; // don't decommit just committed memory // _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
|
||||
segment->decommit_mask = decommit_mask;
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
|
||||
segment->allow_decommit = !segment->mem_is_pinned && !segment->mem_is_large;
|
||||
segment->allow_purge = mi_option_is_enabled(mi_option_allow_purge) && segment->allow_decommit;
|
||||
if (segment->allow_purge) {
|
||||
segment->purge_expire = 0; // don't decommit just committed memory // _mi_clock_now() + mi_option_get(mi_option_purge_delay);
|
||||
segment->purge_mask = purge_mask;
|
||||
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
|
||||
#if MI_DEBUG>2
|
||||
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
|
||||
mi_commit_mask_t commit_needed_mask;
|
||||
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
|
||||
mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask));
|
||||
mi_assert_internal(!mi_commit_mask_any_set(&segment->purge_mask, &commit_needed_mask));
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
segment->decommit_expire = 0;
|
||||
mi_commit_mask_create_empty( &segment->decommit_mask );
|
||||
segment->purge_expire = 0;
|
||||
mi_commit_mask_create_empty( &segment->purge_mask );
|
||||
}
|
||||
|
||||
// initialize segment info
|
||||
|
@ -965,7 +973,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
|
|||
}
|
||||
else {
|
||||
mi_assert_internal(huge_page!=NULL);
|
||||
mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask));
|
||||
mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
|
||||
mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask));
|
||||
*huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld);
|
||||
mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance
|
||||
|
@ -1269,8 +1277,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
slice = slice + slice->slice_count;
|
||||
}
|
||||
|
||||
// perform delayed decommits
|
||||
mi_segment_delayed_decommit(segment, mi_option_is_enabled(mi_option_abandoned_page_decommit) /* force? */, tld->stats);
|
||||
// perform delayed decommits (forcing is much slower on mstress)
|
||||
mi_segment_delayed_purge(segment, mi_option_is_enabled(mi_option_abandoned_page_purge) /* force? */, tld->stats);
|
||||
|
||||
// all pages in the segment are abandoned; add it to the abandoned list
|
||||
_mi_stat_increase(&tld->stats->segments_abandoned, 1);
|
||||
|
@ -1459,7 +1467,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
|||
}
|
||||
else {
|
||||
// otherwise, push on the visited list so it gets not looked at too quickly again
|
||||
mi_segment_delayed_decommit(segment, true /* force? */, tld->stats); // forced decommit if needed as we may not visit soon again
|
||||
mi_segment_delayed_purge(segment, true /* force? */, tld->stats); // force purge if needed as we may not visit soon again
|
||||
mi_abandoned_visited_push(segment);
|
||||
}
|
||||
}
|
||||
|
@ -1483,9 +1491,9 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
|||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||
}
|
||||
else {
|
||||
// otherwise, decommit if needed and push on the visited list
|
||||
// note: forced decommit can be expensive if many threads are destroyed/created as in mstress.
|
||||
mi_segment_delayed_decommit(segment, force, tld->stats);
|
||||
// otherwise, purge if needed and push on the visited list
|
||||
// note: forced purge can be expensive if many threads are destroyed/created as in mstress.
|
||||
mi_segment_delayed_purge(segment, force, tld->stats);
|
||||
mi_abandoned_visited_push(segment);
|
||||
}
|
||||
}
|
||||
|
@ -1543,7 +1551,7 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki
|
|||
}
|
||||
mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
|
||||
mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id());
|
||||
mi_segment_delayed_decommit(_mi_ptr_segment(page), false, tld->stats);
|
||||
mi_segment_delayed_purge(_mi_ptr_segment(page), false, tld->stats);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue