fix full SEGMENT_SIZE internal alignment by adding one more slice entry

This commit is contained in:
daan 2022-11-06 20:36:51 -08:00
parent 4814a649be
commit 562efed54d
4 changed files with 14 additions and 10 deletions

View file

@ -481,11 +481,11 @@ static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
// Get the page containing the pointer
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
mi_assert_internal(p > segment);
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
mi_assert_internal(diff >= 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE /* can be equal for large alignment */);
if (diff == MI_SEGMENT_SIZE) diff--;
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
mi_assert_internal(idx < segment->slice_entries);
mi_assert_internal(idx <= segment->slice_entries);
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
mi_assert_internal(slice->slice_offset == 0);

View file

@ -381,7 +381,7 @@ typedef struct mi_segment_s {
mi_segment_kind_t kind;
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
mi_slice_t slices[MI_SLICES_PER_SEGMENT];
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
} mi_segment_t;

View file

@ -63,7 +63,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
if (aligned_p != p) { mi_page_set_has_aligned(_mi_ptr_page(p), true); }
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
// mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
#if MI_TRACK_ENABLED

View file

@ -277,7 +277,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
}
// and the last entry as well (for coalescing)
const mi_slice_t* last = slice + slice->slice_count - 1;
if (last > slice && last < mi_segment_slices_end(segment)) {
if (last > slice && last <= mi_segment_slices_end(segment)) {
mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
mi_assert_internal(last->slice_count == 0);
mi_assert_internal(last->xblock_size == 1);
@ -709,9 +709,13 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
// and also for the last one (if not set already) (the last one is needed for coalescing)
// note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543)
mi_slice_t* last = &((mi_slice_t*)segment->slices)[slice_index + slice_count - 1];
if (last < mi_segment_slices_end(segment) && last >= slice) {
last->slice_offset = (uint32_t)(sizeof(mi_slice_t)*(slice_count-1));
size_t slice_last_index = slice_index + slice_count - 1;
if (slice_last_index >= segment->slice_entries) {
slice_last_index = segment->slice_entries;
}
mi_slice_t* last = &((mi_slice_t*)segment->slices)[slice_last_index];
if (last <= mi_segment_slices_end(segment) && last >= slice) {
last->slice_offset = (uint32_t)(sizeof(mi_slice_t)*(slice_last_index - slice_index));
last->slice_count = 0;
last->xblock_size = 1;
}
@ -853,7 +857,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, siz
if (!is_zero) {
ptrdiff_t ofs = offsetof(mi_segment_t, next);
size_t prefix = offsetof(mi_segment_t, slices) - ofs;
memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*segment_slices);
memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*(segment_slices+1)); // one more
}
if (!commit_info_still_good) {