mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-08-25 00:34:48 +03:00
remap: refactoring and start on expandable
This commit is contained in:
parent
54dee434a3
commit
ac21489ce7
8 changed files with 40 additions and 24 deletions
|
@ -33,7 +33,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
|
||||
void* p;
|
||||
size_t oversize;
|
||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
||||
if mi_unlikely(alignment >= MI_ALIGN_HUGE) {
|
||||
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
||||
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
|
||||
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
||||
|
@ -75,8 +75,8 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
||||
|
||||
// now zero the block if needed
|
||||
if (alignment > MI_ALIGNMENT_MAX) {
|
||||
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
||||
if (alignment >= MI_ALIGN_HUGE) {
|
||||
// for the tracker, on huge aligned allocations only the memory from the start of the large block is defined
|
||||
mi_track_mem_undefined(aligned_p, size);
|
||||
if (zero) {
|
||||
_mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
|
||||
|
|
|
@ -791,7 +791,7 @@ mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_
|
|||
// ------------------------------------------------------
|
||||
|
||||
static void* mi_heap_malloc_zero_remappable(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||
return _mi_heap_malloc_zero_ex(heap, size, zero, MI_PAGE_ALIGN_REMAPPABLE);
|
||||
return _mi_heap_malloc_zero_ex(heap, size, zero, MI_ALIGN_REMAP);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard void* mi_heap_malloc_remappable(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
||||
|
@ -836,19 +836,16 @@ mi_decl_nodiscard void* mi_remap(void* p, size_t newsize) mi_attr_noexcept {
|
|||
mi_heap_t* heap = mi_prim_get_default_heap();
|
||||
mi_assert_internal((void*)block == p);
|
||||
mi_assert_internal(heap->thread_id == tid);
|
||||
_mi_heap_huge_page_detach(heap, page);
|
||||
block = _mi_segment_huge_page_remap(segment, page, block, padsize, &heap->tld->segments);
|
||||
if (block != NULL) {
|
||||
// succes! re-establish the pointers to the potentially relocated memory
|
||||
segment = mi_checked_ptr_segment(block, "mi_remap");
|
||||
page = _mi_segment_page_of(segment, block);
|
||||
mi_padding_init(page, block, newsize);
|
||||
_mi_heap_huge_page_attach(heap, page);
|
||||
return block;
|
||||
}
|
||||
else {
|
||||
_mi_verbose_message("unable to remap memory, huge remap (address: %p, from %zu bytes to %zu bytes)\n", p, mi_usable_size(p), newsize);
|
||||
_mi_heap_huge_page_attach(heap, page);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -172,7 +172,7 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st
|
|||
*memid = _mi_memid_none();
|
||||
|
||||
// try static
|
||||
void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
|
||||
void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid);
|
||||
if (p != NULL) return p;
|
||||
|
||||
// or fall back to the OS
|
||||
|
|
|
@ -874,7 +874,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
|
|||
// Remappable memory
|
||||
//----------------------------------------------------------------
|
||||
|
||||
#if defined(xMREMAP_MAYMOVE) && defined(MREMAP_FIXED)
|
||||
#if defined(MREMAP_MAYMOVE) && defined(MREMAP_FIXED)
|
||||
int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) {
|
||||
mi_assert_internal((size%_mi_os_page_size()) == 0);
|
||||
*remap_info = NULL;
|
||||
|
|
|
@ -7,6 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#include "mimalloc/prim.h"
|
||||
|
||||
#include <string.h> // memset
|
||||
#include <stdio.h>
|
||||
|
@ -517,19 +518,26 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
|
|||
bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
|
||||
size_t align_offset = 0;
|
||||
size_t alignment = MI_SEGMENT_SIZE;
|
||||
if (page_alignment > MI_PAGE_ALIGN_REMAPPABLE) {
|
||||
if (page_alignment >= MI_ALIGN_HUGE) {
|
||||
alignment = page_alignment;
|
||||
align_offset = _mi_align_up(pre_size, MI_SEGMENT_SIZE);
|
||||
segment_size = segment_size + (align_offset - pre_size); // adjust the segment size
|
||||
}
|
||||
|
||||
mi_segment_t* segment = NULL;
|
||||
if (page_alignment == MI_PAGE_ALIGN_REMAPPABLE) {
|
||||
if (page_alignment == MI_ALIGN_REMAP) {
|
||||
segment = (mi_segment_t*)_mi_os_alloc_remappable(segment_size, alignment, &memid, tld_os->stats);
|
||||
}
|
||||
else if (page_alignment >= MI_ALIGN_EXPAND_MIN && page_alignment <= MI_ALIGN_EXPAND_MAX) {
|
||||
size_t future_reserve = (page_alignment - MI_ALIGN_EXPAND_MIN + 1) * MI_EXPAND_INCREMENT;
|
||||
if (future_reserve < 2 * segment_size) { future_reserve = 2 * segment_size; }
|
||||
segment = (mi_segment_t*)_mi_os_alloc_expandable(segment_size, alignment, future_reserve, &memid, tld_os->stats);
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(page_alignment == 0 || page_alignment >= MI_ALIGN_HUGE);
|
||||
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, tld_os);
|
||||
}
|
||||
|
||||
if (segment == NULL) {
|
||||
return NULL; // failed to allocate
|
||||
}
|
||||
|
@ -1230,7 +1238,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
|||
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
|
||||
|
||||
// reset the part of the page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
|
||||
if (page_alignment > MI_PAGE_ALIGN_REMAPPABLE && segment->allow_decommit && page->is_committed) {
|
||||
if (page_alignment >= MI_ALIGN_HUGE && segment->allow_decommit && page->is_committed) {
|
||||
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
|
||||
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
|
||||
mi_assert_internal(psize - (aligned_p - start) >= size);
|
||||
|
@ -1299,15 +1307,21 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
|
|||
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
|
||||
mi_assert_internal(page->next == NULL && page->prev == NULL);
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(heap->thread_id == _mi_prim_thread_id());
|
||||
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t newssize = _mi_align_up(_mi_align_up(newsize, _mi_os_page_size()) + (mi_segment_size(segment) - bsize), MI_SEGMENT_SIZE);
|
||||
mi_memid_t memid = segment->memid;
|
||||
const ptrdiff_t block_ofs = (uint8_t*)block - (uint8_t*)segment;
|
||||
const uintptr_t cookie = segment->cookie;
|
||||
_mi_heap_huge_page_detach(heap, page);
|
||||
mi_segment_protect(segment, false, tld->os);
|
||||
mi_segment_t* newsegment = (mi_segment_t*)_mi_os_remap(segment, mi_segment_size(segment), newssize, &memid, tld->stats);
|
||||
if (newsegment == NULL) {
|
||||
// failed to remap: roll back
|
||||
mi_segment_protect(segment, true, tld->os);
|
||||
_mi_heap_huge_page_attach(heap, page);
|
||||
return NULL;
|
||||
}
|
||||
mi_assert_internal(cookie == newsegment->cookie);
|
||||
|
@ -1321,7 +1335,8 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
|
|||
mi_block_t* newblock = (mi_block_t*)((uint8_t*)newsegment + block_ofs);
|
||||
mi_assert_internal(_mi_ptr_segment(newblock) == newsegment);
|
||||
mi_page_t* newpage = _mi_ptr_page(newblock);
|
||||
mi_assert_internal(mi_page_block_size(newpage) >= newsize); MI_UNUSED(newpage);
|
||||
mi_assert_internal(mi_page_block_size(newpage) >= newsize);
|
||||
_mi_heap_huge_page_attach(heap, newpage);
|
||||
return newblock;
|
||||
}
|
||||
|
||||
|
@ -1332,8 +1347,8 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
|
|||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
mi_page_t* page;
|
||||
if mi_unlikely(page_alignment > 0) {
|
||||
mi_assert_internal(page_alignment == MI_PAGE_ALIGN_REMAPPABLE || (_mi_is_power_of_two(page_alignment) && page_alignment >= MI_SEGMENT_SIZE));
|
||||
if (page_alignment != MI_PAGE_ALIGN_REMAPPABLE && page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
|
||||
mi_assert_internal(page_alignment <= MI_ALIGN_REMAP || (_mi_is_power_of_two(page_alignment) && page_alignment >= MI_ALIGN_HUGE));
|
||||
if (page_alignment >= MI_ALIGN_HUGE && page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
|
||||
page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld);
|
||||
}
|
||||
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue