merge from dev

This commit is contained in:
daanx 2024-12-08 18:27:05 -08:00
commit 88990cec2d
7 changed files with 59 additions and 14 deletions

View file

@ -38,7 +38,8 @@ static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, si
static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) {
const size_t rate = heap->guarded_sample_rate;
if (rate != 0) { heap->guarded_sample_rate = 0; } // don't write to constant heap_empty
// only write if `rate!=0` so we don't write to the constant `_mi_heap_empty`
if (rate != 0) { heap->guarded_sample_rate = 0; }
void* p = _mi_heap_malloc_zero(heap, size, zero);
if (rate != 0) { heap->guarded_sample_rate = rate; }
return p;
@ -59,7 +60,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
size_t oversize;
if mi_unlikely(alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) {
// use OS allocation for large alignments and allocate inside a singleton page (not in an arena)
// This can support alignments >= MI_PAGE_ALIGN by ensuring the object can be aligned
// This can support alignments >= MI_PAGE_ALIGN by ensuring the object can be aligned
// in the first (and single) page such that the page info is `MI_PAGE_ALIGN` bytes before it (and can be found in the _mi_page_map).
if mi_unlikely(offset != 0) {
// todo: cannot support offset alignment for very large alignments yet

View file

@ -202,7 +202,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
// set the dirty bits
if (arena->memid.initially_zero) {
memid->initially_zero = mi_bitmap_setN(arena->slices_dirty, slice_index, slice_count, NULL);
memid->initially_zero = mi_bitmap_setN(arena->slices_dirty, slice_index, slice_count, NULL);
}
// set commit state
@ -584,7 +584,7 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
mi_assert_internal(!os_align || _mi_is_aligned((uint8_t*)page + page_alignment, block_alignment));
// claimed free slices: initialize the page partly
// claimed free slices: initialize the page partly
if (!memid.initially_zero) {
mi_track_mem_undefined(page, slice_count * MI_ARENA_SLICE_SIZE);
_mi_memzero_aligned(page, sizeof(*page));

View file

@ -920,7 +920,7 @@ bool mi_bitmap_xset(mi_xset_t set, mi_bitmap_t* bitmap, size_t idx) {
}
}
// Set/clear aligned 8-bits in the bitmap (with `(idx%8)==0`).
// Set/clear aligned 8-bits in the bitmap (with `(idx%8)==0`).
// Returns `true` if atomically transitioned from 0 to 1 (or 1 to 0)
static bool mi_bitmap_xset8(mi_xset_t set, mi_bitmap_t* bitmap, size_t idx) {
mi_assert_internal(idx < mi_bitmap_max_bits(bitmap));

View file

@ -134,7 +134,7 @@ extern mi_heap_t _mi_heap_main;
static mi_decl_cache_align mi_subproc_t mi_subproc_default;
static mi_decl_cache_align mi_tld_t tld_main = {
0,
0,
&_mi_heap_main, &_mi_heap_main,
&mi_subproc_default, // subproc
0, // tseq
@ -241,7 +241,7 @@ mi_heap_t* _mi_heap_main_get(void) {
// Thread sequence number
static _Atomic(size_t) mi_tcount;
// The mimalloc thread local data
// The mimalloc thread local data
mi_decl_thread mi_tld_t* mi_tld;
// Allocate fresh tld