mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
merge from dev-reset
This commit is contained in:
commit
f4e006fa76
10 changed files with 241 additions and 21 deletions
|
@ -325,7 +325,7 @@ typedef enum mi_option_e {
|
||||||
// (deprecated options are kept for binary backward compatibility with v1.x versions)
|
// (deprecated options are kept for binary backward compatibility with v1.x versions)
|
||||||
mi_option_eager_commit,
|
mi_option_eager_commit,
|
||||||
mi_option_deprecated_eager_region_commit,
|
mi_option_deprecated_eager_region_commit,
|
||||||
mi_option_deprecated_reset_decommits,
|
mi_option_reset_decommits,
|
||||||
mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit
|
mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit
|
||||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
|
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
|
||||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||||
|
@ -346,7 +346,8 @@ typedef enum mi_option_e {
|
||||||
mi_option_segment_decommit_delay,
|
mi_option_segment_decommit_delay,
|
||||||
mi_option_decommit_extend_delay,
|
mi_option_decommit_extend_delay,
|
||||||
mi_option_destroy_on_exit,
|
mi_option_destroy_on_exit,
|
||||||
mi_option_eager_reserve,
|
mi_option_arena_reserve,
|
||||||
|
mi_option_arena_purge_delay,
|
||||||
_mi_option_last
|
_mi_option_last
|
||||||
} mi_option_t;
|
} mi_option_t;
|
||||||
|
|
||||||
|
|
|
@ -284,6 +284,17 @@ static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
|
||||||
return mi_atomic_cas_strong_acq_rel(once, &expected, 1); // try to set to 1
|
return mi_atomic_cas_strong_acq_rel(once, &expected, 1); // try to set to 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef _Atomic(uintptr_t) mi_atomic_guard_t;
|
||||||
|
|
||||||
|
// Allows only one thread to execute at a time
|
||||||
|
#define mi_atomic_guard(guard) \
|
||||||
|
uintptr_t _mi_guard_expected = 0; \
|
||||||
|
for(bool _mi_guard_once = true; \
|
||||||
|
_mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,1); \
|
||||||
|
(mi_atomic_store_release(guard,0), _mi_guard_once = false) )
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Yield
|
// Yield
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
|
@ -117,6 +117,7 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinn
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);
|
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);
|
||||||
bool _mi_arena_is_os_allocated(size_t arena_memid);
|
bool _mi_arena_is_os_allocated(size_t arena_memid);
|
||||||
|
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
|
||||||
|
|
||||||
// "segment-cache.c"
|
// "segment-cache.c"
|
||||||
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
|
|
208
src/arena.c
208
src/arena.c
|
@ -50,8 +50,10 @@ typedef struct mi_arena_s {
|
||||||
bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
|
bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
|
||||||
bool is_large; // large- or huge OS pages (always committed)
|
bool is_large; // large- or huge OS pages (always committed)
|
||||||
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
|
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
|
||||||
|
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
|
||||||
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
||||||
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
|
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
|
||||||
|
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
|
||||||
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
||||||
} mi_arena_t;
|
} mi_arena_t;
|
||||||
|
|
||||||
|
@ -153,12 +155,22 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
|
||||||
mi_bitmap_index_t bitmap_index;
|
mi_bitmap_index_t bitmap_index;
|
||||||
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||||
|
|
||||||
// claimed it! set the dirty bits (todo: no need for an atomic op here?)
|
// claimed it!
|
||||||
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
|
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
|
||||||
*memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index);
|
*memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index);
|
||||||
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
|
||||||
*large = arena->is_large;
|
*large = arena->is_large;
|
||||||
*is_pinned = (arena->is_large || !arena->allow_decommit);
|
*is_pinned = (arena->is_large || !arena->allow_decommit);
|
||||||
|
|
||||||
|
// none of the claimed blocks should be scheduled for a decommit
|
||||||
|
if (arena->blocks_purge != NULL) {
|
||||||
|
// this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `in_use`).
|
||||||
|
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
// set the dirty bits (todo: no need for an atomic op here?)
|
||||||
|
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||||
|
|
||||||
|
// set commit state
|
||||||
if (arena->blocks_committed == NULL) {
|
if (arena->blocks_committed == NULL) {
|
||||||
// always committed
|
// always committed
|
||||||
*commit = true;
|
*commit = true;
|
||||||
|
@ -276,14 +288,15 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
|
|
||||||
// otherwise, try to first eagerly reserve a new arena
|
// otherwise, try to first eagerly reserve a new arena
|
||||||
size_t eager_reserve = mi_option_get_size(mi_option_eager_reserve);
|
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
|
||||||
eager_reserve = _mi_align_up(eager_reserve, MI_ARENA_BLOCK_SIZE);
|
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
|
||||||
if (eager_reserve > 0 && eager_reserve >= size && // eager reserve enabled and large enough?
|
if (arena_reserve > 0 && arena_reserve >= size && // eager reserve enabled and large enough?
|
||||||
req_arena_id == _mi_arena_id_none() && // not exclusive?
|
req_arena_id == _mi_arena_id_none() && // not exclusive?
|
||||||
mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) ) // not too many arenas already?
|
mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) ) // not too many arenas already?
|
||||||
{
|
{
|
||||||
mi_arena_id_t arena_id = 0;
|
mi_arena_id_t arena_id = 0;
|
||||||
if (mi_reserve_os_memory_ex(eager_reserve, false /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
|
const bool arena_commit = _mi_os_has_overcommit();
|
||||||
|
if (mi_reserve_os_memory_ex(arena_reserve, arena_commit /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
|
||||||
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
|
@ -317,6 +330,162 @@ void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
||||||
return arena->start;
|
return arena->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* -----------------------------------------------------------
|
||||||
|
Arena purge
|
||||||
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
|
// either resets or decommits memory, returns true if the memory was decommitted.
|
||||||
|
static bool mi_os_purge(void* p, size_t size, mi_stats_t* stats) {
|
||||||
|
if (mi_option_is_enabled(mi_option_reset_decommits) && // should decommit?
|
||||||
|
!_mi_preloading()) // don't decommit during preloading (unsafe)
|
||||||
|
{
|
||||||
|
_mi_os_decommit(p, size, stats);
|
||||||
|
return true; // decommitted
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
_mi_os_reset(p, size, stats);
|
||||||
|
return false; // not decommitted
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset or decommit in an arena and update the committed/decommit bitmaps
|
||||||
|
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
|
||||||
|
mi_assert_internal(arena->blocks_committed != NULL);
|
||||||
|
mi_assert_internal(arena->blocks_purge != NULL);
|
||||||
|
mi_assert_internal(arena->allow_decommit);
|
||||||
|
const size_t size = blocks * MI_ARENA_BLOCK_SIZE;
|
||||||
|
void* const p = arena->start + (mi_bitmap_index_bit(bitmap_idx) * MI_ARENA_BLOCK_SIZE);
|
||||||
|
const bool decommitted = mi_os_purge(p, size, stats);
|
||||||
|
// update committed bitmap
|
||||||
|
if (decommitted) {
|
||||||
|
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||||
|
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
|
||||||
|
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
|
||||||
|
mi_assert_internal(arena->blocks_purge != NULL);
|
||||||
|
const long delay = mi_option_get(mi_option_arena_purge_delay);
|
||||||
|
if (_mi_preloading() || delay == 0) {
|
||||||
|
// decommit directly
|
||||||
|
mi_arena_purge(arena, bitmap_idx, blocks, stats);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// schedule decommit
|
||||||
|
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
|
||||||
|
if (expire != 0) {
|
||||||
|
mi_atomic_add_acq_rel(&arena->purge_expire, delay/10); // add smallish extra delay
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
|
||||||
|
}
|
||||||
|
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// return true if the full range was purged.
|
||||||
|
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
|
||||||
|
const size_t endidx = startidx + bitlen;
|
||||||
|
size_t bitidx = startidx;
|
||||||
|
bool all_purged = false;
|
||||||
|
while (bitidx < endidx) {
|
||||||
|
size_t count = 0;
|
||||||
|
while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) == 1) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
if (count > 0) {
|
||||||
|
// found range to be purged
|
||||||
|
const mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(idx, bitidx);
|
||||||
|
mi_arena_purge(arena, bitmap_idx, count, stats);
|
||||||
|
if (count == bitlen) {
|
||||||
|
all_purged = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bitidx += (count+1); // +1 to skip the zero bit (or end)
|
||||||
|
}
|
||||||
|
return all_purged;
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns true if anything was purged
|
||||||
|
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
|
||||||
|
{
|
||||||
|
if (!arena->allow_decommit || arena->blocks_purge == NULL) return false;
|
||||||
|
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
|
||||||
|
if (expire == 0) return false;
|
||||||
|
if (!force && expire > now) return false;
|
||||||
|
|
||||||
|
// reset expire (if not already set concurrently)
|
||||||
|
mi_atomic_cas_strong_acq_rel(&arena->purge_expire, &expire, 0);
|
||||||
|
|
||||||
|
// potential purges scheduled, walk through the bitmap
|
||||||
|
bool any_purged = false;
|
||||||
|
bool full_purge = true;
|
||||||
|
for (size_t i = 0; i < arena->field_count; i++) {
|
||||||
|
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
|
||||||
|
if (purge != 0) {
|
||||||
|
size_t bitidx = 0;
|
||||||
|
while (bitidx < MI_BITMAP_FIELD_BITS) {
|
||||||
|
// find length 1 bit range
|
||||||
|
size_t bitlen = 0;
|
||||||
|
while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
|
||||||
|
bitlen++;
|
||||||
|
}
|
||||||
|
// try to claim the longest range of corresponding in_use bits
|
||||||
|
const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
|
||||||
|
while( bitlen > 0 ) {
|
||||||
|
if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
bitlen--;
|
||||||
|
}
|
||||||
|
// claimed count bits at in_use
|
||||||
|
if (bitlen > 0) {
|
||||||
|
// read purge again now that we have the in_use bits
|
||||||
|
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
|
||||||
|
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
|
||||||
|
full_purge = false;
|
||||||
|
}
|
||||||
|
any_purged = true;
|
||||||
|
// release claimed in_use bits again
|
||||||
|
_mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
|
||||||
|
}
|
||||||
|
bitidx += (bitlen+1); // +1 to skip the zero (or end)
|
||||||
|
} // while bitidx
|
||||||
|
} // purge != 0
|
||||||
|
}
|
||||||
|
// if not fully purged, make sure to purge again in the future
|
||||||
|
if (!full_purge) {
|
||||||
|
const long delay = mi_option_get(mi_option_arena_purge_delay);
|
||||||
|
mi_msecs_t expected = 0;
|
||||||
|
mi_atomic_cas_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
|
||||||
|
}
|
||||||
|
return any_purged;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
|
||||||
|
const long delay = mi_option_get(mi_option_arena_purge_delay);
|
||||||
|
if (_mi_preloading() || delay == 0 /* || !mi_option_is_enabled(mi_option_allow_decommit) */) return; // nothing will be scheduled
|
||||||
|
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||||
|
if (max_arena == 0) return;
|
||||||
|
|
||||||
|
// allow only one thread to purge at a time
|
||||||
|
static mi_atomic_guard_t purge_guard;
|
||||||
|
mi_atomic_guard(&purge_guard)
|
||||||
|
{
|
||||||
|
mi_msecs_t now = _mi_clock_now();
|
||||||
|
size_t max_purge_count = (visit_all ? max_arena : 1);
|
||||||
|
for (size_t i = 0; i < max_arena; i++) {
|
||||||
|
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||||
|
if (mi_arena_try_purge(arena, now, force, stats)) {
|
||||||
|
if (max_purge_count <= 1) break;
|
||||||
|
max_purge_count--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Arena free
|
Arena free
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
@ -340,6 +509,7 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
|
||||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
|
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
|
||||||
mi_assert_internal(arena != NULL);
|
mi_assert_internal(arena != NULL);
|
||||||
const size_t blocks = mi_block_count_of_size(size);
|
const size_t blocks = mi_block_count_of_size(size);
|
||||||
|
|
||||||
// checks
|
// checks
|
||||||
if (arena == NULL) {
|
if (arena == NULL) {
|
||||||
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||||
|
@ -350,15 +520,17 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
|
||||||
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// potentially decommit
|
// potentially decommit
|
||||||
if (!arena->allow_decommit || arena->blocks_committed == NULL) {
|
if (!arena->allow_decommit || arena->blocks_committed == NULL) {
|
||||||
mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c)
|
mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c)
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
mi_assert_internal(arena->blocks_committed != NULL);
|
mi_assert_internal(arena->blocks_committed != NULL);
|
||||||
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails
|
mi_assert_internal(arena->blocks_purge != NULL);
|
||||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
// and make it available to others again
|
// and make it available to others again
|
||||||
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
|
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
|
||||||
if (!all_inuse) {
|
if (!all_inuse) {
|
||||||
|
@ -366,6 +538,15 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// purge expired decommits
|
||||||
|
mi_arenas_try_purge(false, false, stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats) {
|
||||||
|
MI_UNUSED(free_arenas); // todo
|
||||||
|
mi_arenas_try_purge(force_decommit, true, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
|
@ -399,13 +580,17 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
|
||||||
is_committed = true;
|
is_committed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const bool allow_decommit = !is_large; // && !is_committed; // only allow decommit for initially uncommitted memory
|
||||||
|
|
||||||
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
|
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
|
||||||
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
|
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
|
||||||
const size_t bitmaps = (is_committed ? 2 : 3);
|
const size_t bitmaps = (allow_decommit ? 4 : 2);
|
||||||
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
|
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
|
||||||
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
||||||
if (arena == NULL) return false;
|
if (arena == NULL) return false;
|
||||||
|
|
||||||
|
// already zero'd due to os_alloc
|
||||||
|
// _mi_memzero(arena, asize);
|
||||||
arena->id = _mi_arena_id_none();
|
arena->id = _mi_arena_id_none();
|
||||||
arena->exclusive = exclusive;
|
arena->exclusive = exclusive;
|
||||||
arena->block_count = bcount;
|
arena->block_count = bcount;
|
||||||
|
@ -414,11 +599,12 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
|
||||||
arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
|
arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
|
||||||
arena->is_large = is_large;
|
arena->is_large = is_large;
|
||||||
arena->is_zero_init = is_zero;
|
arena->is_zero_init = is_zero;
|
||||||
arena->allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory
|
arena->allow_decommit = allow_decommit;
|
||||||
|
arena->purge_expire = 0;
|
||||||
arena->search_idx = 0;
|
arena->search_idx = 0;
|
||||||
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
|
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
|
||||||
arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
|
arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
|
||||||
// the bitmaps are already zero initialized due to os_alloc
|
arena->blocks_purge = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
|
||||||
// initialize committed bitmap?
|
// initialize committed bitmap?
|
||||||
if (arena->blocks_committed != NULL && is_committed) {
|
if (arena->blocks_committed != NULL && is_committed) {
|
||||||
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
|
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
|
||||||
|
|
14
src/bitmap.c
14
src/bitmap.c
|
@ -172,6 +172,20 @@ static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size
|
||||||
return ((field & mask) == mask);
|
return ((field & mask) == mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
|
||||||
|
// Returns `true` if successful when all previous `count` bits were 0.
|
||||||
|
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||||
|
const size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||||
|
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||||
|
const size_t mask = mi_bitmap_mask_(count, bitidx);
|
||||||
|
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
|
||||||
|
size_t expected = 0;
|
||||||
|
if (mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, mask)) return true;
|
||||||
|
if ((expected & mask) != 0) return false;
|
||||||
|
return mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||||
return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,6 +80,10 @@ bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap
|
||||||
// Returns `true` if all `count` bits were 1 previously.
|
// Returns `true` if all `count` bits were 1 previously.
|
||||||
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||||
|
|
||||||
|
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
|
||||||
|
// Returns `true` if successful when all previous `count` bits were 0.
|
||||||
|
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||||
|
|
||||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||||
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero);
|
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero);
|
||||||
|
|
|
@ -170,6 +170,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
// collect regions on program-exit (or shared library unload)
|
// collect regions on program-exit (or shared library unload)
|
||||||
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
|
||||||
//_mi_mem_collect(&heap->tld->os);
|
//_mi_mem_collect(&heap->tld->os);
|
||||||
|
_mi_arena_collect(false,true /* force purge */,&heap->tld->stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -620,6 +620,7 @@ static void mi_cdecl mi_process_done(void) {
|
||||||
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
|
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
|
||||||
_mi_heap_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
|
_mi_heap_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
|
||||||
_mi_segment_cache_free_all(&_mi_heap_main_get()->tld->os); // release all cached segments
|
_mi_segment_cache_free_all(&_mi_heap_main_get()->tld->os); // release all cached segments
|
||||||
|
_mi_arena_collect(true /* free arenas */,true,&_mi_heap_main_get()->tld->stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
|
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
|
||||||
|
|
|
@ -61,7 +61,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
// Some of the following options are experimental and not all combinations are valid. Use with care.
|
// Some of the following options are experimental and not all combinations are valid. Use with care.
|
||||||
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
|
{ 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
|
||||||
{ 0, UNINIT, MI_OPTION(deprecated_eager_region_commit) },
|
{ 0, UNINIT, MI_OPTION(deprecated_eager_region_commit) },
|
||||||
{ 0, UNINIT, MI_OPTION(deprecated_reset_decommits) },
|
{ 0, UNINIT, MI_OPTION(reset_decommits) },
|
||||||
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||||
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||||
|
@ -89,10 +89,11 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
{ 1, UNINIT, MI_OPTION(decommit_extend_delay) },
|
{ 1, UNINIT, MI_OPTION(decommit_extend_delay) },
|
||||||
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
|
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
|
||||||
#if (MI_INTPTR_SIZE>4)
|
#if (MI_INTPTR_SIZE>4)
|
||||||
{ 1024L*1024L, UNINIT, MI_OPTION(eager_reserve) } // reserve memory N KiB at a time
|
{ 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time
|
||||||
#else
|
#else
|
||||||
{ 128L*1024L, UNINIT, MI_OPTION(eager_reserve) }
|
{ 128L*1024L, UNINIT, MI_OPTION(arena_reserve) },
|
||||||
#endif
|
#endif
|
||||||
|
{ 500, UNINIT, MI_OPTION(arena_purge_delay) } // reset/decommit delay in milli-seconds for arena allocation
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mi_option_init(mi_option_desc_t* desc);
|
static void mi_option_init(mi_option_desc_t* desc);
|
||||||
|
@ -131,7 +132,7 @@ mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long ma
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
|
mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
|
||||||
mi_assert_internal(option == mi_option_reserve_os_memory || option == mi_option_eager_reserve);
|
mi_assert_internal(option == mi_option_reserve_os_memory || option == mi_option_arena_reserve);
|
||||||
long x = mi_option_get(option);
|
long x = mi_option_get(option);
|
||||||
return (x < 0 ? 0 : (size_t)x * MI_KiB);
|
return (x < 0 ? 0 : (size_t)x * MI_KiB);
|
||||||
}
|
}
|
||||||
|
@ -538,7 +539,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
|
||||||
else {
|
else {
|
||||||
char* end = buf;
|
char* end = buf;
|
||||||
long value = strtol(buf, &end, 10);
|
long value = strtol(buf, &end, 10);
|
||||||
if (desc->option == mi_option_reserve_os_memory || desc->option == mi_option_eager_reserve) {
|
if (desc->option == mi_option_reserve_os_memory || desc->option == mi_option_arena_reserve) {
|
||||||
// this option is interpreted in KiB to prevent overflow of `long`
|
// this option is interpreted in KiB to prevent overflow of `long`
|
||||||
if (*end == 'K') { end++; }
|
if (*end == 'K') { end++; }
|
||||||
else if (*end == 'M') { value *= MI_KiB; end++; }
|
else if (*end == 'M') { value *= MI_KiB; end++; }
|
||||||
|
|
|
@ -289,7 +289,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
|
||||||
bool commit_zero = false;
|
bool commit_zero = false;
|
||||||
if (!_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld)) {
|
if (!_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld)) {
|
||||||
// failed to commit! unclaim and return
|
// failed to commit! unclaim and return
|
||||||
mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx);
|
_mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (commit_zero) *is_zero = true;
|
if (commit_zero) *is_zero = true;
|
||||||
|
@ -306,7 +306,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
|
||||||
// some blocks are still reset
|
// some blocks are still reset
|
||||||
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
|
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
|
||||||
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
||||||
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
_mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
||||||
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
||||||
bool reset_zero = false;
|
bool reset_zero = false;
|
||||||
_mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
|
_mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
|
||||||
|
@ -426,7 +426,7 @@ void _mi_mem_free(void* p, size_t size, size_t alignment, size_t align_offset, s
|
||||||
}
|
}
|
||||||
|
|
||||||
// and unclaim
|
// and unclaim
|
||||||
bool all_unclaimed = mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx);
|
bool all_unclaimed = _mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx);
|
||||||
mi_assert_internal(all_unclaimed); MI_UNUSED(all_unclaimed);
|
mi_assert_internal(all_unclaimed); MI_UNUSED(all_unclaimed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue