mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-05 23:19:31 +03:00
add more comments
This commit is contained in:
parent
1f12c3dd12
commit
2daec6c72f
4 changed files with 8 additions and 2 deletions
|
@ -425,6 +425,9 @@ static inline mi_page_t* _mi_get_free_small_page(size_t size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Segment that contains the pointer
|
// Segment that contains the pointer
|
||||||
|
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
|
||||||
|
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
|
||||||
|
// therefore we align one byte before `p`.
|
||||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||||
mi_assert_internal(p != NULL);
|
mi_assert_internal(p != NULL);
|
||||||
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
||||||
|
|
|
@ -34,6 +34,8 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||||
size_t oversize;
|
size_t oversize;
|
||||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
||||||
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
||||||
|
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
|
||||||
|
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
||||||
if mi_unlikely(offset != 0) {
|
if mi_unlikely(offset != 0) {
|
||||||
// todo: cannot support offset alignment for very large alignments yet
|
// todo: cannot support offset alignment for very large alignments yet
|
||||||
#if MI_DEBUG > 0
|
#if MI_DEBUG > 0
|
||||||
|
|
|
@ -795,6 +795,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
||||||
// Because huge pages contain just one block, and the segment contains
|
// Because huge pages contain just one block, and the segment contains
|
||||||
// just that page, we always treat them as abandoned and any thread
|
// just that page, we always treat them as abandoned and any thread
|
||||||
// that frees the block can free the whole page and segment directly.
|
// that frees the block can free the whole page and segment directly.
|
||||||
|
// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX).
|
||||||
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||||
size_t block_size = _mi_os_good_alloc_size(size);
|
size_t block_size = _mi_os_good_alloc_size(size);
|
||||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||||
|
@ -844,6 +845,8 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
|
||||||
|
|
||||||
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
||||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||||
|
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
|
||||||
|
// very large requested alignments in which case we use a huge segment.
|
||||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
||||||
{
|
{
|
||||||
mi_assert_internal(heap != NULL);
|
mi_assert_internal(heap != NULL);
|
||||||
|
|
|
@ -65,8 +65,6 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offs
|
||||||
#error "define the maximum heap space allowed for regions on this platform"
|
#error "define the maximum heap space allowed for regions on this platform"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
|
|
||||||
|
|
||||||
#define MI_REGION_MAX_BLOCKS MI_BITMAP_FIELD_BITS
|
#define MI_REGION_MAX_BLOCKS MI_BITMAP_FIELD_BITS
|
||||||
#define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_BITMAP_FIELD_BITS) // 256MiB (64MiB on 32 bits)
|
#define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_BITMAP_FIELD_BITS) // 256MiB (64MiB on 32 bits)
|
||||||
#define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE) // 1024 (48 on 32 bits)
|
#define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE) // 1024 (48 on 32 bits)
|
||||||
|
|
Loading…
Add table
Reference in a new issue