diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 4620fb72..53c0340a 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -425,6 +425,9 @@ static inline mi_page_t* _mi_get_free_small_page(size_t size) { } // Segment that contains the pointer +// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE), +// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it; +// therefore we align one byte before `p`. static inline mi_segment_t* _mi_ptr_segment(const void* p) { mi_assert_internal(p != NULL); return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK); diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 86b0112b..db80baee 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -34,6 +34,8 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* size_t oversize; if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page) + // This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the + // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down) if mi_unlikely(offset != 0) { // todo: cannot support offset alignment for very large alignments yet #if MI_DEBUG > 0 diff --git a/src/page.c b/src/page.c index e359d5bb..7c3a30a8 100644 --- a/src/page.c +++ b/src/page.c @@ -795,6 +795,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex // Because huge pages contain just one block, and the segment contains // just that page, we always treat them as abandoned and any thread // that frees the block can free the whole page and segment directly. +// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX). static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) { size_t block_size = _mi_os_good_alloc_size(size); mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0); @@ -844,6 +845,8 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme // Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. +// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for +// very large requested alignments in which case we use a huge segment. void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { mi_assert_internal(heap != NULL); diff --git a/src/region.c b/src/region.c index ea376aa4..f069502f 100644 --- a/src/region.c +++ b/src/region.c @@ -65,8 +65,6 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offs #error "define the maximum heap space allowed for regions on this platform" #endif -#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE - #define MI_REGION_MAX_BLOCKS MI_BITMAP_FIELD_BITS #define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_BITMAP_FIELD_BITS) // 256MiB (64MiB on 32 bits) #define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE) // 1024 (48 on 32 bits)