mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-08-24 00:04:48 +03:00
Compare commits
3 commits
8b2f774656
...
f5ce881fdf
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f5ce881fdf | ||
![]() |
088e32e4ef | ||
![]() |
b95268d46c |
4 changed files with 74 additions and 18 deletions
35
src/arena.c
35
src/arena.c
|
@ -200,29 +200,26 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
|
|||
}
|
||||
|
||||
// set commit state
|
||||
if (commit) {
|
||||
memid->initially_committed = true;
|
||||
|
||||
if (commit) {
|
||||
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
||||
if (!mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)) {
|
||||
// not fully committed: commit the full range and set the commit bits
|
||||
// we set the bits first since we own these slices (they are no longer free)
|
||||
size_t already_committed_count = 0;
|
||||
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count);
|
||||
// adjust the stats so we don't double count the commits
|
||||
//if (already_committed_count > 0) {
|
||||
// mi_subproc_stat_adjust_decrease(arena->subproc, committed, mi_size_of_slices(already_committed_count), true /* on alloc */);
|
||||
//}
|
||||
// now actually commit
|
||||
const size_t already_committed = mi_bitmap_popcountN(arena->slices_committed, slice_index, slice_count);
|
||||
if (already_committed < slice_count) {
|
||||
// not all committed, try to commit now
|
||||
bool commit_zero = false;
|
||||
if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed_count))) {
|
||||
// if the commit fails, roll back and return NULL
|
||||
_mi_arenas_free(p, mi_size_of_slices(slice_count), *memid); // this will decommit as well (if partially committed)
|
||||
if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed))) {
|
||||
// if the commit fails, release ownership, and return NULL;
|
||||
// note: this does not roll back dirty bits but that is ok.
|
||||
mi_bbitmap_setN(arena->slices_free, slice_index, slice_count);
|
||||
return NULL;
|
||||
}
|
||||
if (commit_zero) {
|
||||
memid->initially_zero = true;
|
||||
}
|
||||
|
||||
// set the commit bits
|
||||
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, NULL);
|
||||
|
||||
// committed
|
||||
if (commit_zero) { memid->initially_zero = true; }
|
||||
#if MI_DEBUG > 1
|
||||
if (memid->initially_zero) {
|
||||
if (!mi_mem_is_zero(p, mi_size_of_slices(slice_count))) {
|
||||
|
@ -240,6 +237,10 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
|
|||
mi_subproc_stat_increase( arena->subproc, committed, mi_size_of_slices(touched_slices));
|
||||
}
|
||||
}
|
||||
|
||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
||||
memid->initially_committed = true;
|
||||
|
||||
// tool support
|
||||
if (memid->initially_zero) {
|
||||
mi_track_mem_defined(p, slice_count * MI_ARENA_SLICE_SIZE);
|
||||
|
|
51
src/bitmap.c
51
src/bitmap.c
|
@ -256,6 +256,11 @@ static inline bool mi_bfield_atomic_is_xset_mask(mi_xset_t set, _Atomic(mi_bfiel
|
|||
else return mi_bfield_atomic_is_clear_mask(b, mask);
|
||||
}
|
||||
|
||||
// Count bits in a mask
|
||||
static inline size_t mi_bfield_atomic_popcount_mask(_Atomic(mi_bfield_t)*b, mi_bfield_t mask) {
|
||||
const mi_bfield_t x = mi_atomic_load_relaxed(b);
|
||||
return mi_bfield_popcount(x & mask);
|
||||
}
|
||||
|
||||
|
||||
/* --------------------------------------------------------------------------------
|
||||
|
@ -366,6 +371,38 @@ static inline bool mi_bchunk_clearN(mi_bchunk_t* chunk, size_t cidx, size_t n, b
|
|||
return mi_bchunk_xsetN_(MI_BIT_CLEAR, chunk, cidx, n, NULL, maybe_all_clear);
|
||||
}
|
||||
|
||||
// Check if a sequence of `n` bits within a chunk are all set/cleared.
|
||||
// This can cross bfield's
|
||||
mi_decl_noinline static size_t mi_bchunk_popcountN_(mi_bchunk_t* chunk, size_t field_idx, size_t idx, size_t n) {
|
||||
mi_assert_internal((field_idx*MI_BFIELD_BITS) + idx + n <= MI_BCHUNK_BITS);
|
||||
size_t count = 0;
|
||||
while (n > 0) {
|
||||
size_t m = MI_BFIELD_BITS - idx; // m is the bits to xset in this field
|
||||
if (m > n) { m = n; }
|
||||
mi_assert_internal(idx + m <= MI_BFIELD_BITS);
|
||||
mi_assert_internal(field_idx < MI_BCHUNK_FIELDS);
|
||||
const size_t mask = mi_bfield_mask(m, idx);
|
||||
count += mi_bfield_atomic_popcount_mask(&chunk->bfields[field_idx], mask);
|
||||
// next field
|
||||
field_idx++;
|
||||
idx = 0;
|
||||
n -= m;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
// Count set bits a sequence of `n` bits.
|
||||
static inline size_t mi_bchunk_popcountN(mi_bchunk_t* chunk, size_t cidx, size_t n) {
|
||||
mi_assert_internal(cidx + n <= MI_BCHUNK_BITS);
|
||||
mi_assert_internal(n>0);
|
||||
if (n==0) return 0;
|
||||
const size_t i = cidx / MI_BFIELD_BITS;
|
||||
const size_t idx = cidx % MI_BFIELD_BITS;
|
||||
if (n==1) { return (mi_bfield_atomic_is_set(&chunk->bfields[i], idx) ? 1 : 0); }
|
||||
if (idx + n <= MI_BFIELD_BITS) { return mi_bfield_atomic_popcount_mask(&chunk->bfields[i], mi_bfield_mask(n, idx)); }
|
||||
return mi_bchunk_popcountN_(chunk, i, idx, n);
|
||||
}
|
||||
|
||||
|
||||
// ------- mi_bchunk_is_xset ---------------------------------------
|
||||
|
||||
|
@ -1100,6 +1137,20 @@ bool mi_bitmap_clearN(mi_bitmap_t* bitmap, size_t idx, size_t n) {
|
|||
return were_allset;
|
||||
}
|
||||
|
||||
// Count bits set in a range of `n` bits.
|
||||
// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)!
|
||||
size_t mi_bitmap_popcountN( mi_bitmap_t* bitmap, size_t idx, size_t n) {
|
||||
mi_assert_internal(n>0);
|
||||
mi_assert_internal(n<=MI_BCHUNK_BITS);
|
||||
|
||||
const size_t chunk_idx = idx / MI_BCHUNK_BITS;
|
||||
const size_t cidx = idx % MI_BCHUNK_BITS;
|
||||
mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now)
|
||||
mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap));
|
||||
if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia
|
||||
return mi_bchunk_popcountN(&bitmap->chunks[chunk_idx], cidx, n);
|
||||
}
|
||||
|
||||
|
||||
// Set/clear a bit in the bitmap; returns `true` if atomically transitioned from 0 to 1 (or 1 to 0)
|
||||
bool mi_bitmap_set(mi_bitmap_t* bitmap, size_t idx) {
|
||||
|
|
|
@ -206,6 +206,9 @@ bool _mi_bitmap_forall_set(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visit, mi_a
|
|||
bool _mi_bitmap_forall_setc_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visit, mi_arena_t* arena, void* arg);
|
||||
|
||||
|
||||
// Count all set bits in given range in the bitmap. (cannot cross chunks)
|
||||
size_t mi_bitmap_popcountN( mi_bitmap_t* bitmap, size_t idx, size_t n);
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Binned concurrent bitmap
|
||||
Assigns a size class to each chunk such that small blocks don't cause too
|
||||
|
|
3
src/os.c
3
src/os.c
|
@ -431,7 +431,6 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
|
|||
|
||||
bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size) {
|
||||
if (is_zero != NULL) { *is_zero = false; }
|
||||
mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit
|
||||
mi_os_stat_counter_increase(commit_calls, 1);
|
||||
|
||||
// page align range
|
||||
|
@ -455,6 +454,8 @@ bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size)
|
|||
if (os_is_zero) { mi_track_mem_defined(start,csize); }
|
||||
else { mi_track_mem_undefined(start,csize); }
|
||||
#endif
|
||||
|
||||
mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue