rename cbin to chunkbin

This commit is contained in:
Daan 2025-03-14 09:57:52 -07:00
parent 265af0c90e
commit a0a22d954b
4 changed files with 45 additions and 45 deletions

View file

@ -66,14 +66,14 @@ typedef struct mi_stat_counter_s {
// Size bins for chunks
typedef enum mi_bbin_e {
MI_BBIN_SMALL, // slice_count == 1
MI_BBIN_OTHER, // slice_count: any other from the other bins, and 1 <= slice_count <= MI_BCHUNK_BITS
MI_BBIN_MEDIUM, // slice_count == 8
MI_BBIN_LARGE, // slice_count == MI_SIZE_BITS (only used if MI_ENABLE_LARGE_PAGES is 1)
MI_BBIN_NONE, // no bin assigned yet (the chunk is completely free)
MI_BBIN_COUNT
} mi_bbin_t;
typedef enum mi_chunkbin_e {
MI_CBIN_SMALL, // slice_count == 1
MI_CBIN_OTHER, // slice_count: any other from the other bins, and 1 <= slice_count <= MI_BCHUNK_BITS
MI_CBIN_MEDIUM, // slice_count == 8
MI_CBIN_LARGE, // slice_count == MI_SIZE_BITS (only used if MI_ENABLE_LARGE_PAGES is 1)
MI_CBIN_NONE, // no bin assigned yet (the chunk is completely free)
MI_CBIN_COUNT
} mi_chunkbin_t;
// Define the statistics structure
@ -94,7 +94,7 @@ typedef struct mi_stats_s
// size segregated statistics
mi_stat_count_t malloc_bins[MI_BIN_HUGE+1]; // allocation per size bin
mi_stat_count_t page_bins[MI_BIN_HUGE+1]; // pages allocated per size bin
mi_stat_count_t chunk_bins[MI_BBIN_COUNT]; // chunks per page sizes
mi_stat_count_t chunk_bins[MI_CBIN_COUNT]; // chunks per page sizes
} mi_stats_t;
#undef MI_STAT_COUNT

View file

@ -1541,12 +1541,12 @@ static size_t mi_debug_show_chunks(const char* header1, const char* header2, con
char chunk_kind = ' ';
if (chunk_bins != NULL) {
switch (mi_bbitmap_debug_get_bin(chunk_bins,i)) {
case MI_BBIN_SMALL: chunk_kind = 'S'; break;
case MI_BBIN_MEDIUM: chunk_kind = 'M'; break;
case MI_BBIN_LARGE: chunk_kind = 'L'; break;
case MI_BBIN_OTHER: chunk_kind = 'X'; break;
case MI_CBIN_SMALL: chunk_kind = 'S'; break;
case MI_CBIN_MEDIUM: chunk_kind = 'M'; break;
case MI_CBIN_LARGE: chunk_kind = 'L'; break;
case MI_CBIN_OTHER: chunk_kind = 'X'; break;
default: chunk_kind = ' '; break; // suppress warning
// case MI_BBIN_NONE: chunk_kind = 'N'; break;
// case MI_CBIN_NONE: chunk_kind = 'N'; break;
}
}
buf[k++] = chunk_kind;

View file

@ -1440,9 +1440,9 @@ void mi_bbitmap_unsafe_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) {
-------------------------------------------------------------------------------- */
// Assign a specific size bin to a chunk
static void mi_bbitmap_set_chunk_bin(mi_bbitmap_t* bbitmap, size_t chunk_idx, mi_bbin_t bin) {
static void mi_bbitmap_set_chunk_bin(mi_bbitmap_t* bbitmap, size_t chunk_idx, mi_chunkbin_t bin) {
mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap));
for (mi_bbin_t ibin = MI_BBIN_SMALL; ibin < MI_BBIN_NONE; ibin = mi_bbin_inc(ibin)) {
for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin < MI_CBIN_NONE; ibin = mi_chunkbin_inc(ibin)) {
if (ibin == bin) {
const bool was_clear = mi_bchunk_set(& bbitmap->chunkmap_bins[ibin], chunk_idx, NULL);
if (was_clear) { mi_os_stat_increase(chunk_bins[ibin],1); }
@ -1454,13 +1454,13 @@ static void mi_bbitmap_set_chunk_bin(mi_bbitmap_t* bbitmap, size_t chunk_idx, mi
}
}
mi_bbin_t mi_bbitmap_debug_get_bin(const mi_bchunkmap_t* chunkmap_bins, size_t chunk_idx) {
for (mi_bbin_t ibin = MI_BBIN_SMALL; ibin < MI_BBIN_NONE; ibin = mi_bbin_inc(ibin)) {
mi_chunkbin_t mi_bbitmap_debug_get_bin(const mi_bchunkmap_t* chunkmap_bins, size_t chunk_idx) {
for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin < MI_CBIN_NONE; ibin = mi_chunkbin_inc(ibin)) {
if (mi_bchunk_is_xsetN(MI_BIT_SET, &chunkmap_bins[ibin], chunk_idx, 1)) {
return ibin;
}
}
return MI_BBIN_NONE;
return MI_CBIN_NONE;
}
// Track the index of the highest chunk that is accessed.
@ -1477,7 +1477,7 @@ static void mi_bbitmap_chunkmap_set(mi_bbitmap_t* bbitmap, size_t chunk_idx, boo
if (check_all_set) {
if (mi_bchunk_all_are_set_relaxed(&bbitmap->chunks[chunk_idx])) {
// all slices are free in this chunk: return back to the NONE bin
mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, MI_BBIN_NONE);
mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, MI_CBIN_NONE);
}
}
mi_bchunk_set(&bbitmap->chunkmap, chunk_idx, NULL);
@ -1588,7 +1588,7 @@ static inline bool mi_bbitmap_try_find_and_clear_generic(mi_bbitmap_t* bbitmap,
mi_assert_internal(MI_BFIELD_BITS >= MI_BCHUNK_FIELDS);
const mi_bfield_t cmap_mask = mi_bfield_mask(cmap_max_count,0);
const size_t cmap_cycle = cmap_acc+1;
const mi_bbin_t bbin = mi_bbin_of(n);
const mi_chunkbin_t bbin = mi_chunkbin_of(n);
// visit each cmap entry
size_t cmap_idx = 0;
mi_bfield_cycle_iterate(cmap_mask, tseq, cmap_cycle, cmap_idx, X)
@ -1599,29 +1599,29 @@ static inline bool mi_bbitmap_try_find_and_clear_generic(mi_bbitmap_t* bbitmap,
if (cmap_entry == 0) continue;
// get size bin masks
mi_bfield_t cmap_bins[MI_BBIN_COUNT] = { 0 };
cmap_bins[MI_BBIN_NONE] = cmap_entry;
for (mi_bbin_t ibin = MI_BBIN_SMALL; ibin < MI_BBIN_NONE; ibin = mi_bbin_inc(ibin)) {
mi_bfield_t cmap_bins[MI_CBIN_COUNT] = { 0 };
cmap_bins[MI_CBIN_NONE] = cmap_entry;
for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin < MI_CBIN_NONE; ibin = mi_chunkbin_inc(ibin)) {
const mi_bfield_t cmap_bin = mi_atomic_load_relaxed(&bbitmap->chunkmap_bins[ibin].bfields[cmap_idx]);
cmap_bins[ibin] = cmap_bin & cmap_entry;
cmap_bins[MI_BBIN_NONE] &= ~cmap_bin; // clear bits that are in an assigned size bin
cmap_bins[MI_CBIN_NONE] &= ~cmap_bin; // clear bits that are in an assigned size bin
}
// consider only chunks for a particular size bin at a time
// this picks the best bin only within a cmap entry (~ 1GiB address space), but avoids multiple
// iterations through all entries.
mi_assert_internal(bbin < MI_BBIN_NONE);
for (mi_bbin_t ibin = MI_BBIN_SMALL; ibin <= MI_BBIN_NONE;
mi_assert_internal(bbin < MI_CBIN_NONE);
for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin <= MI_CBIN_NONE;
// skip from bbin to NONE (so, say, a SMALL will never be placed in a OTHER, MEDIUM, or LARGE chunk to reduce fragmentation)
ibin = (ibin == bbin ? MI_BBIN_NONE : mi_bbin_inc(ibin)))
ibin = (ibin == bbin ? MI_CBIN_NONE : mi_chunkbin_inc(ibin)))
{
mi_assert_internal(ibin < MI_BBIN_COUNT);
mi_assert_internal(ibin < MI_CBIN_COUNT);
const mi_bfield_t cmap_bin = cmap_bins[ibin];
size_t eidx = 0;
mi_bfield_cycle_iterate(cmap_bin, tseq, cmap_entry_cycle, eidx, Y)
{
// assertion doesn't quite hold as the max_accessed may be out-of-date
// mi_assert_internal(cmap_entry_cycle > eidx || ibin == MI_BBIN_NONE);
// mi_assert_internal(cmap_entry_cycle > eidx || ibin == MI_CBIN_NONE);
// get the chunk
const size_t chunk_idx = cmap_idx*MI_BFIELD_BITS + eidx;
@ -1629,7 +1629,7 @@ static inline bool mi_bbitmap_try_find_and_clear_generic(mi_bbitmap_t* bbitmap,
size_t cidx;
if ((*on_find)(chunk, n, &cidx)) {
if (cidx==0 && ibin == MI_BBIN_NONE) { // only the first block determines the size bin
if (cidx==0 && ibin == MI_CBIN_NONE) { // only the first block determines the size bin
// this chunk is now reserved for the `bbin` size class
mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, bbin);
}

View file

@ -225,25 +225,25 @@ bool _mi_bitmap_forall_setc_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* vis
much fragmentation since we keep chunks for larger blocks separate.
---------------------------------------------------------------------------- */
// mi_bbin_t is defined in mimalloc-stats.h
// mi_chunkbin_t is defined in mimalloc-stats.h
static inline mi_bbin_t mi_bbin_inc(mi_bbin_t bbin) {
mi_assert_internal(bbin < MI_BBIN_COUNT);
return (mi_bbin_t)((int)bbin + 1);
static inline mi_chunkbin_t mi_chunkbin_inc(mi_chunkbin_t bbin) {
mi_assert_internal(bbin < MI_CBIN_COUNT);
return (mi_chunkbin_t)((int)bbin + 1);
}
static inline mi_bbin_t mi_bbin_dec(mi_bbin_t bbin) {
mi_assert_internal(bbin > MI_BBIN_NONE);
return (mi_bbin_t)((int)bbin - 1);
static inline mi_chunkbin_t mi_chunkbin_dec(mi_chunkbin_t bbin) {
mi_assert_internal(bbin > MI_CBIN_NONE);
return (mi_chunkbin_t)((int)bbin - 1);
}
static inline mi_bbin_t mi_bbin_of(size_t slice_count) {
if (slice_count==1) return MI_BBIN_SMALL;
if (slice_count==8) return MI_BBIN_MEDIUM;
static inline mi_chunkbin_t mi_chunkbin_of(size_t slice_count) {
if (slice_count==1) return MI_CBIN_SMALL;
if (slice_count==8) return MI_CBIN_MEDIUM;
#if MI_ENABLE_LARGE_PAGES
if (slice_count==MI_BFIELD_BITS) return MI_BBIN_LARGE;
if (slice_count==MI_BFIELD_BITS) return MI_CBIN_LARGE;
#endif
return MI_BBIN_OTHER;
return MI_CBIN_OTHER;
}
// An atomic "binned" bitmap for the free slices where we keep chunks reserved for particalar size classes
@ -254,7 +254,7 @@ typedef mi_decl_bchunk_align struct mi_bbitmap_s {
size_t _padding[MI_BCHUNK_SIZE/MI_SIZE_SIZE - 2]; // suppress warning on msvc by aligning manually
#endif
mi_bchunkmap_t chunkmap;
mi_bchunkmap_t chunkmap_bins[MI_BBIN_COUNT - 1]; // chunkmaps with bit set if the chunk is in that size class (excluding MI_BBIN_NONE)
mi_bchunkmap_t chunkmap_bins[MI_CBIN_COUNT - 1]; // chunkmaps with bit set if the chunk is in that size class (excluding MI_CBIN_NONE)
mi_bchunk_t chunks[MI_BITMAP_DEFAULT_CHUNK_COUNT]; // usually dynamic MI_BITMAP_MAX_CHUNK_COUNT
} mi_bbitmap_t;
@ -267,7 +267,7 @@ static inline size_t mi_bbitmap_max_bits(const mi_bbitmap_t* bbitmap) {
return (mi_bbitmap_chunk_count(bbitmap) * MI_BCHUNK_BITS);
}
mi_bbin_t mi_bbitmap_debug_get_bin(const mi_bchunk_t* chunkmap_bins, size_t chunk_idx);
mi_chunkbin_t mi_bbitmap_debug_get_bin(const mi_bchunk_t* chunkmap_bins, size_t chunk_idx);
size_t mi_bbitmap_size(size_t bit_count, size_t* chunk_count);