wip: adding mi_remap; first working version

This commit is contained in:
daanx 2023-04-26 16:06:36 -07:00
parent be2f35641a
commit 0c0a753aa0
6 changed files with 74 additions and 28 deletions

View file

@ -174,6 +174,9 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback fro
size_t _mi_bin_size(uint8_t bin); // for stats size_t _mi_bin_size(uint8_t bin); // for stats
uint8_t _mi_bin(size_t size); // for stats uint8_t _mi_bin(size_t size); // for stats
void _mi_heap_huge_page_attach(mi_heap_t* heap, mi_page_t* page);
void _mi_heap_huge_page_detach(mi_heap_t* heap, mi_page_t* page);
// "heap.c" // "heap.c"
void _mi_heap_destroy_pages(mi_heap_t* heap); void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap); void _mi_heap_collect_abandon(mi_heap_t* heap);

View file

@ -24,6 +24,30 @@ terms of the MIT license. A copy of the license can be found in the file
// Allocation // Allocation
// ------------------------------------------------------ // ------------------------------------------------------
#if MI_PADDING
static void mi_padding_init(mi_page_t* page, mi_block_t* block, size_t size /* block size minus MI_PADDING_SIZE */) {
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - size);
#if (MI_DEBUG>=2)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size + delta));
#endif
mi_track_mem_defined(padding, sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
padding->canary = (uint32_t)(mi_ptr_encode(page, block, page->keys));
padding->delta = (uint32_t)(delta);
#if MI_PADDING_CHECK
if (!mi_page_is_huge(page)) {
uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
}
#endif
}
#else
static void mi_padding_init(mi_page_t* page, mi_block_t* block, size_t size) {
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(size);
}
#endif
// Fast allocation in a page: just pop from the free list. // Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty. // Fall back to generic allocation only if the list is empty.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept { extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
@ -81,24 +105,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
} }
#endif #endif
#if MI_PADDING // && !MI_TRACK_ENABLED mi_padding_init(page, block, size - MI_PADDING_SIZE);
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
#if (MI_DEBUG>=2)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
#endif
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
#if MI_PADDING_CHECK
if (!mi_page_is_huge(page)) {
uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
}
#endif
#endif
return block; return block;
} }
@ -807,12 +814,14 @@ mi_decl_nodiscard void* mi_zalloc_remappable(size_t size) mi_attr_noexcept {
mi_decl_nodiscard void* mi_remap(void* p, size_t newsize) mi_attr_noexcept { mi_decl_nodiscard void* mi_remap(void* p, size_t newsize) mi_attr_noexcept {
if (p == NULL) return mi_malloc(newsize); if (p == NULL) return mi_malloc(newsize);
const size_t padsize = newsize + MI_PADDING_SIZE;
mi_segment_t* segment = mi_checked_ptr_segment(p, "mi_remap"); mi_segment_t* segment = mi_checked_ptr_segment(p, "mi_remap");
mi_assert_internal(segment != NULL); mi_assert_internal(segment != NULL);
mi_page_t* const page = _mi_segment_page_of(segment, p); mi_page_t* page = _mi_segment_page_of(segment, p);
mi_block_t* block = _mi_page_ptr_unalign(segment, page, p);
const size_t bsize = mi_page_usable_block_size(page); const size_t bsize = mi_page_usable_block_size(page);
if (bsize >= newsize) { if (bsize >= padsize) {
// TODO: adjust padding mi_padding_init(page, block, newsize);
return p; return p;
} }
@ -820,14 +829,22 @@ mi_decl_nodiscard void* mi_remap(void* p, size_t newsize) mi_attr_noexcept {
if (segment->thread_id == heap->thread_id && if (segment->thread_id == heap->thread_id &&
segment->memid.memkind == MI_MEM_OS_REMAP) segment->memid.memkind == MI_MEM_OS_REMAP)
{ {
mi_heap_t* heap = mi_prim_get_default_heap(); mi_assert_internal((void*)block == p);
mi_block_t* block = _mi_segment_huge_page_remap(segment, page, (mi_block_t*)p, newsize, &heap->tld->segments); _mi_heap_huge_page_detach(heap, page);
block = _mi_segment_huge_page_remap(segment, page, block, padsize, &heap->tld->segments);
if (block != NULL) { if (block != NULL) {
// TODO: adjust padding? segment = mi_checked_ptr_segment(block, "mi_remap");
page = _mi_segment_page_of(segment, block);
mi_padding_init(page, block, newsize);
_mi_heap_huge_page_attach(heap, page);
return block; return block;
} }
else {
_mi_heap_huge_page_attach(heap, page);
}
} }
_mi_warning_message("unable to remap block, fall back to reallocation (address: %p from %zu bytes to %zu bytes)\n", p, 0, newsize); _mi_warning_message("unable to remap block, fall back to reallocation (address: %p from %zu bytes to %zu bytes)\n", p, mi_usable_size(p), newsize);
return mi_realloc(p, newsize); return mi_realloc(p, newsize);
} }

View file

@ -403,6 +403,27 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
_mi_segment_page_abandon(page,segments_tld); _mi_segment_page_abandon(page,segments_tld);
} }
// Detach a huge page (used for remapping)
void _mi_heap_huge_page_detach(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
#if !MI_HUGE_PAGE_ABANDON
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_is_huge(pq));
mi_page_queue_remove(pq, page);
#endif
}
// (re)attach a huge page
void _mi_heap_huge_page_attach(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
#if !MI_HUGE_PAGE_ABANDON
mi_page_queue_t* pq = mi_page_queue(heap, MI_HUGE_OBJ_SIZE_MAX); // not block_size as that can be low if the page_alignment > 0
mi_assert_internal(mi_page_queue_is_huge(pq));
mi_page_queue_push(heap, pq, page);
#endif
}
// Free a page with no more free blocks // Free a page with no more free blocks
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {

View file

@ -629,7 +629,8 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
int _mi_prim_alloc_remappable(size_t size, size_t future_reserve, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) { int _mi_prim_alloc_remappable(size_t size, size_t future_reserve, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) {
MI_UNUSED(size); MI_UNUSED(future_reserve); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info); MI_UNUSED(size); MI_UNUSED(future_reserve); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info);
return EINVAL; // return EINVAL;
return _mi_prim_alloc(size, 1, true, true, is_pinned, is_zero, addr);
} }
int _mi_prim_realloc_remappable(void* addr, size_t size, size_t newsize, bool* extend_is_zero, void** newaddr, void** remap_info ) { int _mi_prim_realloc_remappable(void* addr, size_t size, size_t newsize, bool* extend_is_zero, void** newaddr, void** remap_info ) {
@ -639,5 +640,6 @@ int _mi_prim_realloc_remappable(void* addr, size_t size, size_t newsize, bool* e
int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) { int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) {
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(remap_info); MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(remap_info);
return EINVAL; return _mi_prim_free(addr, size);
// return EINVAL;
} }

View file

@ -1317,6 +1317,8 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
newsegment->segment_size = newssize; newsegment->segment_size = newssize;
newsegment->cookie = _mi_ptr_cookie(newsegment); newsegment->cookie = _mi_ptr_cookie(newsegment);
mi_segment_protect(newsegment, true, tld->os); mi_segment_protect(newsegment, true, tld->os);
_mi_segment_map_freed_at(segment);
_mi_segment_map_allocated_at(newsegment);
} }
mi_block_t* newblock = (mi_block_t*)((uint8_t*)newsegment + block_ofs); mi_block_t* newblock = (mi_block_t*)((uint8_t*)newsegment + block_ofs);

View file

@ -229,6 +229,7 @@ static void test_remap(void) {
p = mi_remap(p, size + inc); p = mi_remap(p, size + inc);
memset(p + size, i, inc); memset(p + size, i, inc);
size += inc; size += inc;
printf("%3d: increased to size %zu\n", i, size);
} }
mi_free(p); mi_free(p);
} }