mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
wip: purgeable arenas; fix asan warnings
This commit is contained in:
parent
fcec09832a
commit
94a867869e
4 changed files with 27 additions and 9 deletions
18
src/arena.c
18
src/arena.c
|
@ -133,7 +133,7 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
|
|||
{
|
||||
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
|
||||
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
|
||||
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
|
||||
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
|
||||
return true;
|
||||
};
|
||||
return false;
|
||||
|
@ -189,6 +189,8 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
|
|||
// no need to commit, but check if already fully committed
|
||||
*commit = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
}
|
||||
|
||||
mi_track_mem_undefined(p,needed_bcount*MI_ARENA_BLOCK_SIZE);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -300,7 +302,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
|||
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finally, fall back to the OS
|
||||
|
@ -356,10 +358,11 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
|
|||
const size_t size = blocks * MI_ARENA_BLOCK_SIZE;
|
||||
void* const p = arena->start + (mi_bitmap_index_bit(bitmap_idx) * MI_ARENA_BLOCK_SIZE);
|
||||
const bool decommitted = mi_os_purge(p, size, stats);
|
||||
// clear the purged blocks
|
||||
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
|
||||
// update committed bitmap
|
||||
if (decommitted) {
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -520,14 +523,19 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
|
|||
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// potentially decommit
|
||||
if (!arena->allow_decommit || arena->blocks_committed == NULL) {
|
||||
mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c)
|
||||
mi_assert_internal(all_committed);
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(arena->blocks_committed != NULL);
|
||||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
if (!all_committed) {
|
||||
// assume the entire range as no longer committed
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||
}
|
||||
// (delay) purge the entire range
|
||||
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
|
||||
}
|
||||
|
||||
|
|
3
src/os.c
3
src/os.c
|
@ -411,6 +411,9 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
|
|||
if (err != 0) {
|
||||
_mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
}
|
||||
else {
|
||||
mi_track_mem_undefined(start, csize);
|
||||
}
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -380,7 +380,8 @@ int _mi_prim_commit(void* start, size_t size, bool commit) {
|
|||
}
|
||||
|
||||
int _mi_prim_reset(void* start, size_t size) {
|
||||
#if defined(MADV_FREE)
|
||||
// note: disable the use of MADV_FREE since it leads to confusing stats :-(
|
||||
#if 0 // defined(MADV_FREE)
|
||||
static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
|
||||
int oadvice = (int)mi_atomic_load_relaxed(&advice);
|
||||
int err;
|
||||
|
|
|
@ -400,12 +400,18 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
|| !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
||||
#endif
|
||||
{
|
||||
if (!segment->mem_is_pinned) {
|
||||
// if not all committed, an arena may decommit the whole area, but that double counts
|
||||
// the already decommitted parts; adjust for that in the stats.
|
||||
if (!mi_commit_mask_is_full(&segment->commit_mask)) {
|
||||
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
||||
if (csize > 0) { _mi_stat_decrease(&_mi_stats_main.committed, csize); }
|
||||
mi_assert_internal(size > csize);
|
||||
if (size > csize) {
|
||||
_mi_stat_increase(&_mi_stats_main.committed, size - csize);
|
||||
}
|
||||
}
|
||||
_mi_abandoned_await_readers(); // wait until safe to free
|
||||
_mi_arena_free(segment, mi_segment_size(segment), segment->mem_alignment, segment->mem_align_offset, segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->stats);
|
||||
_mi_arena_free(segment, mi_segment_size(segment), segment->mem_alignment, segment->mem_align_offset, segment->memid,
|
||||
mi_commit_mask_is_full(&segment->commit_mask) /* all committed? */, tld->stats);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue