diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h index 0cd685d8..925b9043 100644 --- a/include/mimalloc/prim.h +++ b/include/mimalloc/prim.h @@ -70,22 +70,23 @@ int _mi_prim_protect(void* addr, size_t size, bool protect); int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr); -// Allocate remappable memory that can be used with `_mi_prim_remap`. +// Reserve a pure virtual address range that can be used with `_mi_prim_remap_to`. // Return `EINVAL` if this is not supported. -// The returned memory is always committed and aligned at `alignment`. -// If `is_pinned` is `true` the memory cannot be decommitted or reset. -// The `remap_info` argument can be used to store OS specific information that is passed to `_mi_prim_remap` and `_mi_prim_free_remappable`. -int _mi_prim_alloc_remappable(size_t size, size_t alignment, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ); +// If `is_pinned` is set to `true`, the memory cannot be decommitted or reset. +// The `remap_info` argument can be used to store OS specific information that is passed to `_mi_prim_remap_to` and `_mi_prim_remap_free`. +int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** addr, void** remap_info ); -// Remap remappable memory. Return `EINVAL` if this is not supported. -// If remapped, the alignment should be preserved. -// pre: `addr != NULL` and previously allocated using `_mi_prim_remap` or `_mi_prim_alloc_remappable`. -// `newsize > 0`, `size > 0`, `alignment > 0`, `allow_large != NULL`, `newaddr != NULL`. -int _mi_prim_remap(void* addr, size_t size, size_t newsize, size_t alignment, bool* extend_is_zero, void** newaddr, void** remap_info ); +// Remap remappable memory from `addr` to `newaddr` with the new `newsize`. Return `EINVAL` if this is not supported. +// Both `addr` (if not NULL) and `newaddr` are inside ranges returned from `_mi_prim_remap_reserve`. +// The `addr` can be NULL to allocate freshly. The `base` pointer is always `<= addr` and if `base != addr`, +// then it was the pointer returned from `_mi_prim_remap_reserve`. +// This is used to ensure we can remap _aligned_ addresses (`addr` and `newaddr`). +// pre: `newsize > 0`, `size > 0`, `newaddr != NULL`, `extend_zero != NULL`, `remap_info != NULL`. +int _mi_prim_remap_to(void* base, void* addr, size_t size, void* newaddr, size_t newsize, bool* extend_is_zero, void** remap_info, void** new_remap_info ); // Free remappable memory. Return `EINVAL` if this is not supported. -// pre: `addr != NULL` and previously allocated using `_mi_prim_remap` or `_mi_prim_alloc_remappable`. -int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ); +// pre: `addr != NULL` +int _mi_prim_remap_free(void* addr, size_t size, void* remap_info ); // Return the current NUMA node diff --git a/src/os.c b/src/os.c index 8ac224e1..82834f91 100644 --- a/src/os.c +++ b/src/os.c @@ -171,7 +171,7 @@ static void mi_os_prim_free_remappable(void* addr, size_t size, bool still_commi MI_UNUSED(tld_stats); mi_assert_internal((size % _mi_os_page_size()) == 0); if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) - int err = _mi_prim_free_remappable(addr, size, remap_info); + int err = _mi_prim_remap_free(addr, size, remap_info); if (err != 0) { _mi_warning_message("unable to free remappable OS memory (error: %d (0x%02x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); } @@ -392,73 +392,73 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offse ----------------------------------------------------------- */ void* _mi_os_alloc_remappable(size_t size, size_t alignment, mi_memid_t* memid, mi_stats_t* stats) { - mi_assert_internal(size > 0); - mi_assert_internal(memid != NULL); - *memid = _mi_memid_none(); - if (alignment == 0) { alignment = 1; } - size = mi_os_get_alloc_size(size); - bool os_is_pinned = true; - bool os_is_zero = false; - void* base = NULL; - void* remap_info = NULL; - int err = _mi_prim_alloc_remappable(size, alignment, &os_is_pinned, &os_is_zero, &base, &remap_info); - if (err != 0 || base == NULL) { - // fall back to regular allocation - return _mi_os_alloc_aligned(size, alignment, true /* commit */, true /* allow_large */, memid, stats); + if (alignment < _mi_os_page_size()) { + alignment = _mi_os_page_size(); } - mi_assert_internal(_mi_is_aligned(base, alignment)); - *memid = _mi_memid_create_os(base, size, alignment, true, os_is_zero, os_is_pinned); - memid->memkind = MI_MEM_OS_REMAP; - memid->mem.os.prim_info = remap_info; - return base; + *memid = _mi_memid_none(); + memid->mem.os.alignment = alignment; + return _mi_os_remap(NULL, 0, size, memid, stats); } void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_stats_t* stats) { - mi_assert_internal(size > 0); - mi_assert_internal(newsize > 0); - mi_assert_internal(p != NULL && memid != NULL); - mi_assert_internal(mi_memkind_is_os(memid->memkind)); - if (p == NULL) return NULL; - if (!mi_memkind_is_os(memid->memkind)) return NULL; - + mi_assert_internal(memid != NULL); + mi_assert_internal((memid->memkind == MI_MEM_NONE && p == NULL && size == 0) || (memid->memkind == MI_MEM_OS_REMAP && p != NULL && size > 0)); newsize = mi_os_get_alloc_size(newsize); - - if (memid->memkind == MI_MEM_OS_REMAP) { - bool extend_is_zero = false; - void* newp = NULL; - if (!memid->is_pinned || !memid->initially_committed) { - // if parts may have been decommitted, ensure it is committed now (or we get EFAULT from mremap) - _mi_os_commit(memid->mem.os.base, memid->mem.os.size, NULL, stats); + + // reserve virtual range + const size_t alignment = memid->mem.os.alignment; + mi_assert_internal(alignment >= _mi_os_page_size()); + const size_t oversize = mi_os_get_alloc_size(newsize + alignment - 1); + bool os_is_pinned = false; + void* base = NULL; + void* remap_info = NULL; + int err = _mi_prim_remap_reserve(oversize, &os_is_pinned, &base, &remap_info); + if (err != 0) { + // fall back to regular allocation + if (err != EINVAL) { // EINVAL means not supported + _mi_warning_message("failed to reserve remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, 0, size); } - const size_t alignment = memid->mem.os.alignment; - void* prim_info = memid->mem.os.prim_info; - int err = _mi_prim_remap(memid->mem.os.base, memid->mem.os.size, newsize, alignment, &extend_is_zero, &newp, &prim_info); - if (err == 0 && newp != NULL) { - mi_assert_internal(_mi_is_aligned(newp, alignment)); - *memid = _mi_memid_create_os(newp, newsize, alignment, true /* committed */, false /* iszero */, memid->is_pinned /* is pinned */); - memid->mem.os.prim_info = prim_info; - memid->memkind = MI_MEM_OS_REMAP; - return newp; - } - else { - _mi_warning_message("failed to remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, size, newsize); - } - } - - // fall back to copy (but in remappable memory if possible) - mi_memid_t newmemid = _mi_memid_none(); - void* newp = _mi_os_alloc_remappable(newsize, memid->mem.os.alignment, &newmemid, stats); - if (newp == NULL) { - newp = _mi_os_alloc_aligned(newsize, memid->mem.os.alignment, true /* commit */, false /* allow_large */, &newmemid, stats); - if (newp == NULL) return NULL; + return NULL; } - size_t csize = (size > newsize ? newsize : size); - _mi_memcpy_aligned(newp, p, csize); - _mi_os_free(p, size, *memid, stats); + // create an aligned pointer within + mi_memid_t newmemid = _mi_memid_create_os(base, oversize, 1, false /* commit */, false /* iszero */, os_is_pinned); + newmemid.memkind = MI_MEM_OS_REMAP; + newmemid.mem.os.prim_info = remap_info; + void* newp = mi_os_align_within(&newmemid, alignment, newsize, stats); + + // now map the new virtual adress range to physical memory + // this also releases the old virtual memory range (if there is no error) + bool extend_is_zero = false; + err = _mi_prim_remap_to(memid->mem.os.base, p, size, newp, newsize, &extend_is_zero, &memid->mem.os.prim_info, &newmemid.mem.os.prim_info); + if (err != 0) { + _mi_warning_message("failed to remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, 0, size); + _mi_prim_remap_free(newmemid.mem.os.base, newmemid.mem.os.size, newmemid.mem.os.prim_info); + return NULL; + } + + newmemid.initially_committed = true; + if (p == NULL && extend_is_zero) { + newmemid.initially_zero = true; + } *memid = newmemid; return newp; } + +// // fall back to copy (but in remappable memory if possible) +// mi_memid_t newmemid = _mi_memid_none(); +// void* newp = _mi_os_alloc_remappable(newsize, memid->mem.os.alignment, &newmemid, stats); +// if (newp == NULL) { +// newp = _mi_os_alloc_aligned(newsize, memid->mem.os.alignment, true /* commit */, false /* allow_large */, &newmemid, stats); +// if (newp == NULL) return NULL; +// } +// +// size_t csize = (size > newsize ? newsize : size); +// _mi_memcpy_aligned(newp, p, csize); +// _mi_os_free(p, size, *memid, stats); +// *memid = newmemid; +// return newp; +//} /* ----------------------------------------------------------- diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c index 399ed2d1..b7330940 100644 --- a/src/prim/unix/prim.c +++ b/src/prim/unix/prim.c @@ -873,74 +873,39 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { // Remappable memory //---------------------------------------------------------------- -static int mi_unix_alloc_aligned(size_t size, size_t alignment, bool commit, bool* is_pinned, bool* is_zero, void** addr) -{ - mi_assert_internal(alignment <= 1 || (alignment >= _mi_os_page_size())); - *addr = NULL; - void* base = NULL; - int err = _mi_prim_alloc(size,alignment,commit,false /*allow large*/, is_pinned, is_zero, &base); +int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) { + mi_assert_internal((size%_mi_os_page_size()) == 0); + *remap_info = NULL; + bool is_zero = false; + int err = _mi_prim_alloc(size, 1, false /* commit */, false /*allow large*/, is_pinned, &is_zero, base); if (err != 0) return err; - if (_mi_is_aligned(base,alignment)) { - *addr = base; - return 0; - } - _mi_prim_free(base,size); - const size_t oversize = _mi_align_up( _mi_align_up(size,alignment), _mi_os_page_size()); - err = _mi_prim_alloc(oversize,alignment,commit,false,is_pinned,is_zero,&base); - if (err != 0) return err; - mi_assert_internal(!(*is_pinned)); - if (!(*is_pinned)) { - void* p = _mi_align_up_ptr(base,alignment); - *addr = p; - size_t pre_size = (uint8_t*)p - (uint8_t*)base; - size_t mid_size = _mi_align_up(size,_mi_os_page_size()); - size_t post_size = oversize - pre_size - mid_size; - mi_assert_internal(pre_size < oversize && post_size < oversize && mid_size >= size); - if (pre_size > 0) { _mi_prim_free(base, pre_size); } - if (post_size > 0) { _mi_prim_free((uint8_t*)p + mid_size, post_size); } - return 0; + return 0; +} + +int _mi_prim_remap_to(void* base, void* addr, size_t size, void* newaddr, size_t newsize, bool* extend_is_zero, void** remap_info, void** new_remap_info) +{ + mi_assert_internal(base <= addr); + mi_assert_internal((size % _mi_os_page_size()) == 0); + mi_assert_internal((newsize % _mi_os_page_size()) == 0); + *new_remap_info = NULL; + *remap_info = NULL; + *extend_is_zero = false; // todo: can we assume zero'd? + int err = 0; + if (addr == NULL) { + err = _mi_prim_commit(newaddr, newsize, extend_is_zero); } else { - _mi_prim_free(base,oversize); - return EINVAL; - } -} - -int _mi_prim_alloc_remappable(size_t size, size_t alignment, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) { - #if !defined(MREMAP_MAYMOVE) - MI_UNUSED(size); MI_UNUSED(alignment); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info); - return EINVAL; - #else - *remap_info = NULL; - return mi_unix_alloc_aligned(size, alignment, true, is_pinned, is_zero, addr); - #endif -} - -int _mi_prim_remap(void* addr, size_t size, size_t newsize, size_t alignment, bool* extend_is_zero, void** newaddr, void** remap_info ) { - #if !defined(MREMAP_MAYMOVE) || !defined(MREMAP_FIXED) - MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(alignment); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info); - return EINVAL; - #else - mi_assert_internal(*remap_info == NULL); MI_UNUSED(remap_info); - void* p = NULL; - bool is_pinned = false; - // don't commit yet, mremap will take over the virtual rang completely due to MREMAP_FIXED - int err = mi_unix_alloc_aligned(size, alignment, false, &is_pinned, extend_is_zero, &p); - if (err != 0) return err; - void* res = mremap(addr, size, newsize, MREMAP_MAYMOVE | MREMAP_FIXED, p); - if (res == MAP_FAILED || res != p) { - err = errno; - _mi_prim_free(p,size); - return err; + void* p = mremap(addr, size, newsize, (MREMAP_MAYMOVE | MREMAP_FIXED), newaddr); + if (p == MAP_FAILED || newaddr != p) { + p = NULL; + err = errno; } - *extend_is_zero = true; - *newaddr = p; - return 0; - #endif + } + return err; } -int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) { +int _mi_prim_remap_free(void* base, size_t size, void* remap_info) { MI_UNUSED(remap_info); - mi_assert_internal(remap_info == NULL); - return _mi_prim_free(addr,size); + mi_assert_internal((size % _mi_os_page_size()) == 0); + return _mi_prim_free(base,size); } diff --git a/src/prim/wasi/prim.c b/src/prim/wasi/prim.c index 208203ff..177ca78f 100644 --- a/src/prim/wasi/prim.c +++ b/src/prim/wasi/prim.c @@ -279,17 +279,17 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { // Remappable memory //---------------------------------------------------------------- -int _mi_prim_alloc_remappable(size_t size, size_t future_reserve, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) { - MI_UNUSED(size); MI_UNUSED(future_reserve); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info); +int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) { + MI_UNUSED(size); MI_UNUSED(is_pinned); MI_UNUSED(base); MI_UNUSED(remap_info); return EINVAL; } -int _mi_prim_remap(void* addr, size_t size, size_t newsize, bool* extend_is_zero, void** newaddr, void** remap_info ) { - MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info); +int _mi_prim_remap_to(void* base, void* addr, size_t size, void* newaddr, size_t newsize, bool* extend_is_zero, void** remap_info, void** new_remap_info) { + MI_UNUSED(base); MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newaddr); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(remap_info); MI_UNUSED(new_remap_info); return EINVAL; } -int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) { - MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(remap_info); +int _mi_prim_remap_free(void* base, size_t size, void* remap_info) { + MI_UNUSED(base); MI_UNUSED(size); MI_UNUSED(remap_info); return EINVAL; } diff --git a/src/prim/windows/prim.c b/src/prim/windows/prim.c index 359ad1b3..47796c14 100644 --- a/src/prim/windows/prim.c +++ b/src/prim/windows/prim.c @@ -178,7 +178,7 @@ int _mi_prim_free(void* addr, size_t size ) { // VirtualAlloc //--------------------------------------------- -static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) { +static void* mi_win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) { #if (MI_INTPTR_SIZE >= 8) // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations if (addr == NULL) { @@ -207,7 +207,7 @@ static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignmen return VirtualAlloc(addr, size, flags, PAGE_READWRITE); } -static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { +static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { mi_assert_internal(!(large_only && !allow_large)); static _Atomic(size_t) large_page_try_ok; // = 0; void* p = NULL; @@ -223,7 +223,7 @@ static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DW else { // large OS pages must always reserve and commit. *is_large = true; - p = win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES); + p = mi_win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES); if (large_only) return p; // fall back to non-large page allocation on error (`p == NULL`). if (p == NULL) { @@ -234,7 +234,7 @@ static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DW // Fall back to regular page allocation if (p == NULL) { *is_large = ((flags&MEM_LARGE_PAGES) != 0); - p = win_virtual_alloc_prim(addr, size, try_alignment, flags); + p = mi_win_virtual_alloc_prim(addr, size, try_alignment, flags); } //if (p == NULL) { _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); } return p; @@ -247,7 +247,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la *is_zero = true; int flags = MEM_RESERVE; if (commit) { flags |= MEM_COMMIT; } - *addr = win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); + *addr = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); return (*addr != NULL ? 0 : (int)GetLastError()); } @@ -627,28 +627,16 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { // Remappable memory //---------------------------------------------------------------- +// tracks allocated physical memory backing a virtual address range typedef struct mi_win_remap_info_s { - void* base; // base of the virtual address space - size_t base_pages; // total virtual alloc'd pages from `base` - size_t alignment; // alignment: _mi_align_up_ptr(mapped_base,alignment) maps to the first physical page (= `mapped_base`) size_t page_count; // allocated physical pages (with info in page_info) size_t page_reserved; // available entries in page_info ULONG_PTR page_info[1]; } mi_win_remap_info_t; -static void* mi_win_mapped_base(mi_win_remap_info_t* rinfo) { - return _mi_align_up_ptr(rinfo->base, rinfo->alignment); -} - -static size_t mi_win_mapped_pages(mi_win_remap_info_t* rinfo) { - const size_t presize = (uint8_t*)mi_win_mapped_base(rinfo) - (uint8_t*)rinfo->base; - return (rinfo->base_pages - _mi_divide_up(presize, _mi_os_page_size())); -} - - // allocate remap info -static mi_win_remap_info_t* mi_win_alloc_remap_info(size_t page_count, size_t alignment) { +static mi_win_remap_info_t* mi_win_alloc_remap_info(size_t page_count) { const size_t remap_info_size = _mi_align_up(sizeof(mi_win_remap_info_t) + (page_count * sizeof(ULONG_PTR)), _mi_os_page_size()); mi_win_remap_info_t* rinfo = NULL; bool os_is_zero = false; @@ -656,29 +644,22 @@ static mi_win_remap_info_t* mi_win_alloc_remap_info(size_t page_count, size_t al int err = _mi_prim_alloc(remap_info_size, 1, true, false, &os_is_large, &os_is_zero, (void**)&rinfo); if (err != 0) return NULL; if (!os_is_zero) { _mi_memzero_aligned(rinfo, remap_info_size); } - rinfo->base = NULL; - rinfo->base_pages = 0; - rinfo->alignment = alignment; rinfo->page_count = 0; rinfo->page_reserved = (remap_info_size - offsetof(mi_win_remap_info_t, page_info)) / sizeof(ULONG_PTR); return rinfo; } // reallocate remap info -static mi_win_remap_info_t* mi_win_realloc_remap_info(mi_win_remap_info_t* rinfo, size_t newpage_count, size_t alignment) { +static mi_win_remap_info_t* mi_win_realloc_remap_info(mi_win_remap_info_t* rinfo, size_t newpage_count) { if (rinfo == NULL) { - return mi_win_alloc_remap_info(newpage_count,alignment); + return mi_win_alloc_remap_info(newpage_count); } else if (rinfo->page_reserved >= newpage_count) { - mi_assert_internal(alignment <= rinfo->alignment); return rinfo; // still fits } else { - mi_assert_internal(alignment <= rinfo->alignment); - mi_win_remap_info_t* newrinfo = mi_win_alloc_remap_info(newpage_count,rinfo->alignment); + mi_win_remap_info_t* newrinfo = mi_win_alloc_remap_info(newpage_count); if (newrinfo == NULL) return NULL; - newrinfo->base = rinfo->base; - newrinfo->base_pages = rinfo->base_pages; newrinfo->page_count = rinfo->page_count; _mi_memcpy(newrinfo->page_info, rinfo->page_info, rinfo->page_count * sizeof(ULONG_PTR)); _mi_prim_free(rinfo, sizeof(mi_win_remap_info_t) + ((rinfo->page_reserved - 1) * sizeof(ULONG_PTR))); @@ -686,14 +667,32 @@ static mi_win_remap_info_t* mi_win_realloc_remap_info(mi_win_remap_info_t* rinfo } } +// free meta info and the managed physical pages +static int mi_win_free_remap_info(mi_win_remap_info_t* rinfo) { + int err = 0; + if (rinfo == NULL) return 0; + if (rinfo->page_count > 0) { + size_t req_pages = rinfo->page_count; + if (!FreeUserPhysicalPages(GetCurrentProcess(), &req_pages, &rinfo->page_info[0])) { + err = (int)GetLastError(); + } + rinfo->page_count = 0; + } + if (!VirtualFree(rinfo, 0, MEM_RELEASE)) { + err = (int)GetLastError(); + } + return err; +} + // ensure enough physical pages are allocated -static int mi_win_ensure_physical_pages(mi_win_remap_info_t** prinfo, size_t newpage_count, size_t alignment) { +static int mi_win_ensure_physical_pages(mi_win_remap_info_t** prinfo, size_t newpage_count) +{ // ensure meta data is large enough - mi_win_remap_info_t* rinfo = mi_win_realloc_remap_info(*prinfo, newpage_count, alignment); + mi_win_remap_info_t* rinfo = mi_win_realloc_remap_info(*prinfo, newpage_count); if (rinfo == NULL) return ENOMEM; *prinfo = rinfo; - // allocate physical pages + // allocate physical pages; todo: allow shrinking? if (newpage_count > rinfo->page_count) { mi_assert_internal(rinfo->page_reserved >= newpage_count); const size_t extra_pages = newpage_count - rinfo->page_count; @@ -702,133 +701,101 @@ static int mi_win_ensure_physical_pages(mi_win_remap_info_t** prinfo, size_t new return (int)GetLastError(); } rinfo->page_count += req_pages; - if (req_pages < extra_pages) return ENOMEM; + if (req_pages < extra_pages) { + return ENOMEM; + } } return 0; } - - -static int mi_win_remap_virtual_pages(mi_win_remap_info_t* rinfo, size_t newpage_count) { +// Remap physical memory to another virtual address range +static int mi_win_remap_virtual_pages(mi_win_remap_info_t* rinfo, void* oldaddr, size_t oldpage_count, void* newaddr, size_t newpage_count) { mi_assert_internal(rinfo != NULL && rinfo->page_count >= newpage_count); - if (mi_win_mapped_pages(rinfo) >= newpage_count) return 0; // still good? should we shrink the mapping? - - - if (rinfo->base != NULL) { - printf("remap existing remap\n"); - } - - // we can now free the original virtual range ? - if (rinfo->base != NULL) { - if (!VirtualFree(rinfo->base, 0, MEM_RELEASE)) { - return (int)GetLastError(); - } - } - + // unmap the old range - /*if (rinfo->mapped_base != NULL) { - if (!MapUserPhysicalPages(rinfo->mapped_base, rinfo->mapped_pages, NULL)) { + if (oldaddr != NULL) { + if (!MapUserPhysicalPages(oldaddr, oldpage_count, NULL)) { return (int)GetLastError(); } - }*/ - - // allocate new virtual address range - const size_t newsize = _mi_align_up( newpage_count * _mi_os_page_size() + rinfo->alignment - 1, _mi_os_page_size()); - void* newbase = VirtualAlloc(NULL, newsize, MEM_RESERVE | MEM_PHYSICAL, PAGE_READWRITE); - if (newbase == NULL) { - // todo: remap old range? - return (int)GetLastError(); - } - if (rinfo->base != newbase) { - printf("different base %p vs previous %p\n", newbase, rinfo->base); } - // find the aligned point where we map to our physical pages - rinfo->base = newbase; - rinfo->base_pages = _mi_divide_up(newsize, _mi_os_page_size()); - mi_assert_internal(mi_win_mapped_pages(rinfo) >= newpage_count); - - // and remap it - if (!MapUserPhysicalPages(mi_win_mapped_base(rinfo), newpage_count, &rinfo->page_info[0])) { - // todo: remap old range? + // and remap the virtual addresses from newbase + if (!MapUserPhysicalPages(newaddr, newpage_count, &rinfo->page_info[0])) { + // if this fails that would be very bad as we already unmapped the old range.. + // todo: try to remap old range? int err = (int)GetLastError(); - VirtualFree(newbase, 0, MEM_RELEASE); + VirtualFree(newaddr, 0, MEM_RELEASE); return err; } return 0; } -static int mi_win_remap(mi_win_remap_info_t** prinfo, size_t newsize, size_t alignment) { - size_t newpage_count = _mi_divide_up(newsize, _mi_os_page_size()); - int err = mi_win_ensure_physical_pages(prinfo, newpage_count, alignment); - if (err != 0) return err; - err = mi_win_remap_virtual_pages(*prinfo, newpage_count); - if (err != 0) return err; +// Reserve a virtual address range to be mapped to physical memory later +int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) { + if (!mi_win_enable_large_os_pages(NULL)) return EINVAL; + mi_assert_internal((size % _mi_os_page_size()) == 0); + size = _mi_align_up(size, _mi_os_page_size()); + mi_win_remap_info_t** prinfo = (mi_win_remap_info_t**)remap_info; + *prinfo = NULL; + *is_pinned = true; + *base = NULL; + void* p = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_PHYSICAL, PAGE_READWRITE); + if (p == NULL) { return (int)GetLastError(); } + *base = p; return 0; } -static int mi_win_free_remap_info(mi_win_remap_info_t* rinfo) { +// Remap to a new virtual address range +int _mi_prim_remap_to(void* base, void* addr, size_t size, void* newaddr, size_t newsize, bool* extend_is_zero, void** remap_info, void** new_remap_info) +{ + *extend_is_zero = false; // todo: can we assume zero's ? + mi_win_remap_info_t**prinfo = (mi_win_remap_info_t**)remap_info; + mi_win_remap_info_t** pnewrinfo = (mi_win_remap_info_t**)new_remap_info; + mi_assert_internal(base <= addr); + mi_assert_internal(*pnewrinfo == NULL); + mi_assert_internal((size % _mi_os_page_size()) == 0); + mi_assert_internal((newsize % _mi_os_page_size()) == 0); + size = _mi_align_up(size, _mi_os_page_size()); + newsize = _mi_align_up(newsize, _mi_os_page_size()); + + size_t oldpage_count = _mi_divide_up(size, _mi_os_page_size()); + size_t newpage_count = _mi_divide_up(newsize, _mi_os_page_size()); + + int err = mi_win_ensure_physical_pages(prinfo, newpage_count); + if (err != 0) { return err; } + + err = mi_win_remap_virtual_pages(*prinfo, addr, oldpage_count, newaddr, newpage_count); + if (err != 0) { return err; } + + // release old virtual range + if (base != NULL) { + if (!VirtualFree(base, 0, MEM_RELEASE)) { + err = (int)GetLastError(); + _mi_warning_message("unable to release virtual address range on remap (error %d (0x%02x), address: %p)\n", err, err, base); + } + } + + *pnewrinfo = *prinfo; + *prinfo = NULL; + return 0; +} + +// Free remappable memory +int _mi_prim_remap_free(void* base, size_t size, void* remap_info) { + MI_UNUSED(size); int err = 0; - if (rinfo == NULL) return 0; - if (rinfo->base != NULL) { - if (!VirtualFree(rinfo->base, 0, MEM_RELEASE)) { + // release virtual address range + if (base != NULL) { + if (!VirtualFree(base, 0, MEM_RELEASE)) { err = (int)GetLastError(); } - rinfo->base = NULL; - rinfo->base_pages = 0; } - if (rinfo->page_count > 0) { - size_t req_pages = rinfo->page_count; - if (!FreeUserPhysicalPages(GetCurrentProcess(), &req_pages, &rinfo->page_info[0])) { - err = (int)GetLastError(); - } - rinfo->page_count = 0; + // release backing physical pages + mi_win_remap_info_t* rinfo = (mi_win_remap_info_t*)remap_info; + if (rinfo != NULL) { + err = mi_win_free_remap_info(rinfo); } - VirtualFree(rinfo, 0, MEM_RELEASE); return err; } -int _mi_prim_alloc_remappable(size_t size, size_t alignment, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) { - // MI_UNUSED(size); MI_UNUSED(alignment); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info); - // return EINVAL; - // return _mi_prim_alloc(size, 1, true, true, is_pinned, is_zero, addr); - - if (!mi_win_enable_large_os_pages(NULL)) return EINVAL; - - mi_win_remap_info_t* rinfo = NULL; - int err = mi_win_remap(&rinfo, size, alignment); - if (err != 0) { - if (rinfo != NULL) { mi_win_free_remap_info(rinfo); } - return err; - } - *is_pinned = true; - *is_zero = true; - *addr = mi_win_mapped_base(rinfo); - *remap_info = rinfo; - return 0; -} - -int _mi_prim_remap(void* addr, size_t size, size_t newsize, size_t alignment, bool* extend_is_zero, void** newaddr, void** remap_info ) { - MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info); - // return EINVAL; - mi_win_remap_info_t** prinfo = (mi_win_remap_info_t**)remap_info; - mi_assert_internal(*prinfo != NULL); - mi_assert_internal(mi_win_mapped_base(*prinfo) == addr); - mi_assert_internal((*prinfo)->alignment == alignment); - int err = mi_win_remap(prinfo, newsize, alignment); - if (err != 0) return err; - *extend_is_zero = false; - *newaddr = mi_win_mapped_base(*prinfo); - return 0; -} - -int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) { - MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(remap_info); - // return _mi_prim_free(addr, size); - // return EINVAL; - mi_win_remap_info_t* rinfo = (mi_win_remap_info_t*)remap_info; - mi_assert_internal(rinfo != NULL); - mi_assert_internal(mi_win_mapped_base(rinfo) == addr); - return mi_win_free_remap_info(rinfo); -}