check for remap support

This commit is contained in:
daanx 2023-04-30 13:41:19 -07:00
parent 10fbe6cf0f
commit 48ff5c178e
5 changed files with 77 additions and 44 deletions

View file

@ -28,6 +28,7 @@ typedef struct mi_os_mem_config_s {
bool has_overcommit; // can we reserve more memory than can be actually committed?
bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
bool has_remap; // able to remap memory to different virtual addresses?
} mi_os_mem_config_t;
// Initialize

View file

@ -22,7 +22,8 @@ static mi_os_mem_config_t mi_os_mem_config = {
4096, // allocation granularity
true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
false, // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
true, // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
false // support virtual memory remapping?
};
bool _mi_os_has_overcommit(void) {
@ -400,14 +401,34 @@ void* _mi_os_alloc_remappable(size_t size, size_t alignment, mi_memid_t* memid,
return _mi_os_remap(NULL, 0, size, memid, stats);
}
static void* mi_os_remap_copy(void* p, size_t size, size_t newsize, size_t alignment, mi_memid_t* memid, mi_stats_t* stats) {
mi_memid_t newmemid = _mi_memid_none();
void* newp = _mi_os_alloc_aligned(newsize, alignment, true /* commit */, false /* allow_large */, &newmemid, stats);
if (newp == NULL) return NULL;
const size_t csize = (size > newsize ? newsize : size);
if (csize > 0) {
_mi_memcpy_aligned(newp, p, csize);
_mi_os_free(p, size, *memid, stats);
}
*memid = newmemid;
return newp;
}
void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_stats_t* stats) {
mi_assert_internal(memid != NULL);
mi_assert_internal((memid->memkind == MI_MEM_NONE && p == NULL && size == 0) || (memid->memkind == MI_MEM_OS_REMAP && p != NULL && size > 0));
newsize = mi_os_get_alloc_size(newsize);
const size_t alignment = memid->mem.os.alignment;
mi_assert_internal(alignment >= _mi_os_page_size());
// supported?
if (!mi_os_mem_config.has_remap || (p!=NULL && memid->memkind != MI_MEM_OS_REMAP)) {
return mi_os_remap_copy(p, size, newsize, alignment, memid, stats);
}
// reserve virtual range
const size_t alignment = memid->mem.os.alignment;
mi_assert_internal(alignment >= _mi_os_page_size());
const size_t oversize = mi_os_get_alloc_size(newsize + alignment - 1);
bool os_is_pinned = false;
void* base = NULL;
@ -415,10 +436,13 @@ void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_s
int err = _mi_prim_remap_reserve(oversize, &os_is_pinned, &base, &remap_info);
if (err != 0) {
// fall back to regular allocation
if (err != EINVAL) { // EINVAL means not supported
if (err == EINVAL) { // EINVAL means not supported
mi_os_mem_config.has_remap = false;
}
else {
_mi_warning_message("failed to reserve remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, 0, size);
}
return NULL;
return mi_os_remap_copy(p, size, newsize, alignment, memid, stats);
}
// create an aligned pointer within
@ -434,7 +458,7 @@ void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_s
if (err != 0) {
_mi_warning_message("failed to remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, 0, size);
_mi_prim_remap_free(newmemid.mem.os.base, newmemid.mem.os.size, newmemid.mem.os.prim_info);
return NULL;
return mi_os_remap_copy(p, size, newsize, alignment, memid, stats);
}
newmemid.initially_committed = true;
@ -444,21 +468,6 @@ void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_s
*memid = newmemid;
return newp;
}
// // fall back to copy (but in remappable memory if possible)
// mi_memid_t newmemid = _mi_memid_none();
// void* newp = _mi_os_alloc_remappable(newsize, memid->mem.os.alignment, &newmemid, stats);
// if (newp == NULL) {
// newp = _mi_os_alloc_aligned(newsize, memid->mem.os.alignment, true /* commit */, false /* allow_large */, &newmemid, stats);
// if (newp == NULL) return NULL;
// }
//
// size_t csize = (size > newsize ? newsize : size);
// _mi_memcpy_aligned(newp, p, csize);
// _mi_os_free(p, size, *memid, stats);
// *memid = newmemid;
// return newp;
//}
/* -----------------------------------------------------------

View file

@ -139,6 +139,9 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
config->has_overcommit = unix_detect_overcommit();
config->must_free_whole = false; // mmap can free in parts
config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE)
#if defined(MREMAP_MAYMOVE) && defined(MREMAP_FIXED)
config->has_remap = true;
#endif
}
@ -850,7 +853,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
}
}
#else
#else // no pthreads
void _mi_prim_thread_init_auto_done(void) {
// nothing
@ -867,12 +870,11 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
#endif
//----------------------------------------------------------------
// Remappable memory
//----------------------------------------------------------------
#if defined(MREMAP_MAYMOVE) && defined(MREMAP_FIXED)
int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) {
mi_assert_internal((size%_mi_os_page_size()) == 0);
*remap_info = NULL;
@ -909,3 +911,21 @@ int _mi_prim_remap_free(void* base, size_t size, void* remap_info) {
mi_assert_internal((size % _mi_os_page_size()) == 0);
return _mi_prim_free(base,size);
}
#else // no mremap
int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) {
MI_UNUSED(size); MI_UNUSED(is_pinned); MI_UNUSED(base); MI_UNUSED(remap_info);
return EINVAL;
}
int _mi_prim_remap_to(void* base, void* addr, size_t size, void* newaddr, size_t newsize, bool* extend_is_zero, void** remap_info, void** new_remap_info) {
MI_UNUSED(base); MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newaddr); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(remap_info); MI_UNUSED(new_remap_info);
return EINVAL;
}
int _mi_prim_remap_free(void* base, size_t size, void* remap_info) {
MI_UNUSED(base); MI_UNUSED(size); MI_UNUSED(remap_info);
return EINVAL;
}
#endif

View file

@ -22,6 +22,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
config->has_overcommit = false;
config->must_free_whole = true;
config->has_virtual_reserve = false;
config->has_remap = false;
}
//---------------------------------------------

View file

@ -64,19 +64,20 @@ static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL;
static PGetNumaProcessorNode pGetNumaProcessorNode = NULL;
//---------------------------------------------
// Enable large page support dynamically (if possible)
// Get lock memory permission dynamically (if possible)
// To use large pages on Windows, or remappable memory, we first need access permission
// Set "Lock pages in memory" permission in the group policy editor
// <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643>
//---------------------------------------------
static bool mi_win_enable_large_os_pages(size_t* large_page_size)
static bool mi_win_get_lock_memory_privilege(void)
{
static bool large_initialized = false;
if (large_initialized) return (_mi_os_large_page_size() > 0);
large_initialized = true;
static bool lock_memory_initialized = false;
static int lock_memory_err = 0;
if (lock_memory_initialized) return (lock_memory_err == 0);
lock_memory_initialized = true;
// Try to see if large OS pages are supported
// To use large pages on Windows, we first need access permission
// Set "Lock pages in memory" permission in the group policy editor
// <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643>
// Try to see if we have permission can lock memory
unsigned long err = 0;
HANDLE token = NULL;
BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
@ -89,17 +90,15 @@ static bool mi_win_enable_large_os_pages(size_t* large_page_size)
ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
if (ok) {
err = GetLastError();
ok = (err == ERROR_SUCCESS);
if (ok && large_page_size != NULL) {
*large_page_size = GetLargePageMinimum();
}
ok = (err == ERROR_SUCCESS);
}
}
CloseHandle(token);
}
if (!ok) {
if (err == 0) err = GetLastError();
_mi_warning_message("cannot acquire the lock memory privilege (needed for large OS page or remap support), error %lu\n", err);
if (err == 0) { err = GetLastError(); }
lock_memory_err = (int)err;
_mi_warning_message("cannot acquire the lock memory privilege (needed for large OS page or remap support), error %lu (0x%04lx)\n", err);
}
return (ok!=0);
}
@ -143,9 +142,12 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode");
FreeLibrary(hDll);
}
if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
mi_win_enable_large_os_pages(&config->large_page_size);
}
// if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
if (mi_win_get_lock_memory_privilege()) {
config->large_page_size = GetLargePageMinimum();
config->has_remap = true;
};
// }
}
@ -308,7 +310,7 @@ static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int num
{
const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
if (!mi_win_enable_large_os_pages(NULL)) return NULL;
if (!mi_win_get_lock_memory_privilege()) return NULL;
MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
@ -733,7 +735,7 @@ static int mi_win_remap_virtual_pages(mi_win_remap_info_t* rinfo, void* oldaddr,
// Reserve a virtual address range to be mapped to physical memory later
int _mi_prim_remap_reserve(size_t size, bool* is_pinned, void** base, void** remap_info) {
if (!mi_win_enable_large_os_pages(NULL)) return EINVAL;
if (!mi_win_get_lock_memory_privilege()) return EINVAL;
mi_assert_internal((size % _mi_os_page_size()) == 0);
size = _mi_align_up(size, _mi_os_page_size());
mi_win_remap_info_t** prinfo = (mi_win_remap_info_t**)remap_info;