mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
wip: initial working windows remap
This commit is contained in:
parent
e4c914565d
commit
1246f46625
7 changed files with 287 additions and 53 deletions
|
@ -116,7 +116,7 @@ size_t _mi_os_large_page_size(void);
|
||||||
|
|
||||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
||||||
|
|
||||||
void* _mi_os_alloc_remappable(size_t size, size_t future_reserve, size_t alignment, mi_memid_t* memid, mi_stats_t* stats);
|
void* _mi_os_alloc_remappable(size_t size, size_t alignment, mi_memid_t* memid, mi_stats_t* stats);
|
||||||
void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_stats_t* stats);
|
void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_stats_t* stats);
|
||||||
|
|
||||||
|
|
||||||
|
@ -304,6 +304,11 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Align upwards for a pointer
|
||||||
|
static inline void* _mi_align_up_ptr(void* p, size_t alignment) {
|
||||||
|
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
||||||
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
||||||
mi_assert_internal(divider != 0);
|
mi_assert_internal(divider != 0);
|
||||||
|
|
|
@ -72,15 +72,16 @@ int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bo
|
||||||
|
|
||||||
// Allocate remappable memory that can be used with `_mi_prim_remap`.
|
// Allocate remappable memory that can be used with `_mi_prim_remap`.
|
||||||
// Return `EINVAL` if this is not supported.
|
// Return `EINVAL` if this is not supported.
|
||||||
// The returned memory is always committed.
|
// The returned memory is always committed and aligned at `alignment`.
|
||||||
// If `is_pinned` is `true` the memory cannot be decommitted or reset.
|
// If `is_pinned` is `true` the memory cannot be decommitted or reset.
|
||||||
// The `remap_info` argument can be used to store OS specific information that is passed to `_mi_prim_remap` and `_mi_prim_free_remappable`.
|
// The `remap_info` argument can be used to store OS specific information that is passed to `_mi_prim_remap` and `_mi_prim_free_remappable`.
|
||||||
int _mi_prim_alloc_remappable(size_t size, size_t future_reserve, bool* is_pinned, bool* is_zero, void** addr, void** remap_info );
|
int _mi_prim_alloc_remappable(size_t size, size_t alignment, bool* is_pinned, bool* is_zero, void** addr, void** remap_info );
|
||||||
|
|
||||||
// Remap remappable memory. Return `EINVAL` if this is not supported.
|
// Remap remappable memory. Return `EINVAL` if this is not supported.
|
||||||
|
// If remapped, the alignment should be preserved.
|
||||||
// pre: `addr != NULL` and previously allocated using `_mi_prim_remap` or `_mi_prim_alloc_remappable`.
|
// pre: `addr != NULL` and previously allocated using `_mi_prim_remap` or `_mi_prim_alloc_remappable`.
|
||||||
// `newsize > 0`, `size > 0`, `alignment > 0`, `allow_large != NULL`, `newaddr != NULL`.
|
// `newsize > 0`, `size > 0`, `alignment > 0`, `allow_large != NULL`, `newaddr != NULL`.
|
||||||
int _mi_prim_remap(void* addr, size_t size, size_t newsize, bool* extend_is_zero, void** newaddr, void** remap_info );
|
int _mi_prim_remap(void* addr, size_t size, size_t newsize, size_t alignment, bool* extend_is_zero, void** newaddr, void** remap_info );
|
||||||
|
|
||||||
// Free remappable memory. Return `EINVAL` if this is not supported.
|
// Free remappable memory. Return `EINVAL` if this is not supported.
|
||||||
// pre: `addr != NULL` and previously allocated using `_mi_prim_remap` or `_mi_prim_alloc_remappable`.
|
// pre: `addr != NULL` and previously allocated using `_mi_prim_remap` or `_mi_prim_alloc_remappable`.
|
||||||
|
|
40
src/os.c
40
src/os.c
|
@ -81,10 +81,6 @@ void _mi_os_init(void) {
|
||||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
|
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
|
||||||
|
|
||||||
static void* mi_align_up_ptr(void* p, size_t alignment) {
|
|
||||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
||||||
mi_assert_internal(alignment != 0);
|
mi_assert_internal(alignment != 0);
|
||||||
uintptr_t mask = alignment - 1;
|
uintptr_t mask = alignment - 1;
|
||||||
|
@ -256,9 +252,10 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
|
||||||
// aligns within an already allocated area; may modify `memid` with a new base and size.
|
// aligns within an already allocated area; may modify `memid` with a new base and size.
|
||||||
static void* mi_os_align_within(mi_memid_t* memid, size_t alignment, size_t size, mi_stats_t* stats)
|
static void* mi_os_align_within(mi_memid_t* memid, size_t alignment, size_t size, mi_stats_t* stats)
|
||||||
{
|
{
|
||||||
mi_assert_internal(alignment >= _mi_os_page_size());
|
mi_assert_internal(alignment <= 1 || (alignment >= _mi_os_page_size()));
|
||||||
mi_assert_internal((size + alignment - 1) <= memid->mem.os.size);
|
mi_assert_internal((size + alignment - 1) <= memid->mem.os.size);
|
||||||
void* p = mi_align_up_ptr(memid->mem.os.base, alignment);
|
memid->mem.os.alignment = alignment;
|
||||||
|
void* p = _mi_align_up_ptr(memid->mem.os.base, alignment);
|
||||||
mi_assert_internal((uintptr_t)p + size <= (uintptr_t)memid->mem.os.base + memid->mem.os.size);
|
mi_assert_internal((uintptr_t)p + size <= (uintptr_t)memid->mem.os.base + memid->mem.os.size);
|
||||||
if (!memid->is_pinned) {
|
if (!memid->is_pinned) {
|
||||||
size_t pre_size = (uint8_t*)p - (uint8_t*)memid->mem.os.base;
|
size_t pre_size = (uint8_t*)p - (uint8_t*)memid->mem.os.base;
|
||||||
|
@ -278,7 +275,6 @@ static void* mi_os_align_within(mi_memid_t* memid, size_t alignment, size_t size
|
||||||
if (post_size > 0) { mi_os_prim_free((uint8_t*)p + mid_size, post_size, memid->initially_committed, stats); }
|
if (post_size > 0) { mi_os_prim_free((uint8_t*)p + mid_size, post_size, memid->initially_committed, stats); }
|
||||||
memid->mem.os.base = p;
|
memid->mem.os.base = p;
|
||||||
memid->mem.os.size = mid_size;
|
memid->mem.os.size = mid_size;
|
||||||
memid->mem.os.alignment = alignment;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mi_assert_internal(_mi_is_aligned(p, alignment));
|
mi_assert_internal(_mi_is_aligned(p, alignment));
|
||||||
|
@ -395,26 +391,26 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offse
|
||||||
Remappable memory
|
Remappable memory
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
void* _mi_os_alloc_remappable(size_t size, size_t future_reserve, size_t alignment, mi_memid_t* memid, mi_stats_t* stats) {
|
void* _mi_os_alloc_remappable(size_t size, size_t alignment, mi_memid_t* memid, mi_stats_t* stats) {
|
||||||
mi_assert_internal(size > 0);
|
mi_assert_internal(size > 0);
|
||||||
mi_assert_internal(memid != NULL);
|
mi_assert_internal(memid != NULL);
|
||||||
*memid = _mi_memid_none();
|
*memid = _mi_memid_none();
|
||||||
if (alignment == 0) { alignment = 1; }
|
if (alignment == 0) { alignment = 1; }
|
||||||
const size_t oversize = mi_os_get_alloc_size(size + alignment - 1);
|
size = mi_os_get_alloc_size(size);
|
||||||
if (future_reserve < oversize) { future_reserve = oversize; }
|
|
||||||
bool os_is_pinned = true;
|
bool os_is_pinned = true;
|
||||||
bool os_is_zero = false;
|
bool os_is_zero = false;
|
||||||
void* base = NULL;
|
void* base = NULL;
|
||||||
void* remap_info = NULL;
|
void* remap_info = NULL;
|
||||||
int err = _mi_prim_alloc_remappable(oversize, future_reserve, &os_is_pinned, &os_is_zero, &base, &remap_info);
|
int err = _mi_prim_alloc_remappable(size, alignment, &os_is_pinned, &os_is_zero, &base, &remap_info);
|
||||||
if (err != 0 || base == NULL) {
|
if (err != 0 || base == NULL) {
|
||||||
// fall back to regular allocation
|
// fall back to regular allocation
|
||||||
return _mi_os_alloc_aligned(size, alignment, true /* commit */, true /* allow_large */, memid, stats);
|
return _mi_os_alloc_aligned(size, alignment, true /* commit */, true /* allow_large */, memid, stats);
|
||||||
}
|
}
|
||||||
*memid = _mi_memid_create_os(base, oversize, alignment, true, os_is_zero, os_is_pinned);
|
mi_assert_internal(_mi_is_aligned(base, alignment));
|
||||||
|
*memid = _mi_memid_create_os(base, size, alignment, true, os_is_zero, os_is_pinned);
|
||||||
memid->memkind = MI_MEM_OS_REMAP;
|
memid->memkind = MI_MEM_OS_REMAP;
|
||||||
memid->mem.os.prim_info = remap_info;
|
memid->mem.os.prim_info = remap_info;
|
||||||
return mi_os_align_within(memid,alignment,size,stats);
|
return base;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_stats_t* stats) {
|
void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_stats_t* stats) {
|
||||||
|
@ -426,7 +422,6 @@ void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_s
|
||||||
if (!mi_memkind_is_os(memid->memkind)) return NULL;
|
if (!mi_memkind_is_os(memid->memkind)) return NULL;
|
||||||
|
|
||||||
newsize = mi_os_get_alloc_size(newsize);
|
newsize = mi_os_get_alloc_size(newsize);
|
||||||
const size_t oversize = mi_os_get_alloc_size(newsize + memid->mem.os.alignment - 1);
|
|
||||||
|
|
||||||
if (memid->memkind == MI_MEM_OS_REMAP) {
|
if (memid->memkind == MI_MEM_OS_REMAP) {
|
||||||
bool extend_is_zero = false;
|
bool extend_is_zero = false;
|
||||||
|
@ -435,12 +430,15 @@ void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_s
|
||||||
// if parts may have been decommitted, ensure it is committed now (or we get EFAULT from mremap)
|
// if parts may have been decommitted, ensure it is committed now (or we get EFAULT from mremap)
|
||||||
_mi_os_commit(memid->mem.os.base, memid->mem.os.size, NULL, stats);
|
_mi_os_commit(memid->mem.os.base, memid->mem.os.size, NULL, stats);
|
||||||
}
|
}
|
||||||
int err = _mi_prim_remap(memid->mem.os.base, memid->mem.os.size, oversize, &extend_is_zero, &newp, &memid->mem.os.prim_info);
|
const size_t alignment = memid->mem.os.alignment;
|
||||||
|
void* prim_info = memid->mem.os.prim_info;
|
||||||
|
int err = _mi_prim_remap(memid->mem.os.base, memid->mem.os.size, newsize, alignment, &extend_is_zero, &newp, &prim_info);
|
||||||
if (err == 0 && newp != NULL) {
|
if (err == 0 && newp != NULL) {
|
||||||
const size_t alignment = memid->mem.os.alignment;
|
mi_assert_internal(_mi_is_aligned(newp, alignment));
|
||||||
*memid = _mi_memid_create_os(newp, oversize, 1, true /* committed */, false /* iszero */, false /* islarge */);
|
*memid = _mi_memid_create_os(newp, newsize, alignment, true /* committed */, false /* iszero */, memid->is_pinned /* is pinned */);
|
||||||
|
memid->mem.os.prim_info = prim_info;
|
||||||
memid->memkind = MI_MEM_OS_REMAP;
|
memid->memkind = MI_MEM_OS_REMAP;
|
||||||
return mi_os_align_within(memid, alignment, newsize, stats);
|
return newp;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
_mi_warning_message("failed to remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, size, newsize);
|
_mi_warning_message("failed to remap OS memory (error %d (0x%02x) at %p of %zu bytes to %zu bytes)\n", err, err, p, size, newsize);
|
||||||
|
@ -449,7 +447,7 @@ void* _mi_os_remap(void* p, size_t size, size_t newsize, mi_memid_t* memid, mi_s
|
||||||
|
|
||||||
// fall back to copy (but in remappable memory if possible)
|
// fall back to copy (but in remappable memory if possible)
|
||||||
mi_memid_t newmemid = _mi_memid_none();
|
mi_memid_t newmemid = _mi_memid_none();
|
||||||
void* newp = _mi_os_alloc_remappable(newsize, 0, memid->mem.os.alignment, &newmemid, stats);
|
void* newp = _mi_os_alloc_remappable(newsize, memid->mem.os.alignment, &newmemid, stats);
|
||||||
if (newp == NULL) {
|
if (newp == NULL) {
|
||||||
newp = _mi_os_alloc_aligned(newsize, memid->mem.os.alignment, true /* commit */, false /* allow_large */, &newmemid, stats);
|
newp = _mi_os_alloc_aligned(newsize, memid->mem.os.alignment, true /* commit */, false /* allow_large */, &newmemid, stats);
|
||||||
if (newp == NULL) return NULL;
|
if (newp == NULL) return NULL;
|
||||||
|
@ -475,10 +473,10 @@ static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size,
|
||||||
if (size == 0 || addr == NULL) return NULL;
|
if (size == 0 || addr == NULL) return NULL;
|
||||||
|
|
||||||
// page align conservatively within the range
|
// page align conservatively within the range
|
||||||
void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size())
|
void* start = (conservative ? _mi_align_up_ptr(addr, _mi_os_page_size())
|
||||||
: mi_align_down_ptr(addr, _mi_os_page_size()));
|
: mi_align_down_ptr(addr, _mi_os_page_size()));
|
||||||
void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size())
|
void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size())
|
||||||
: mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size()));
|
: _mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size()));
|
||||||
ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start;
|
ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start;
|
||||||
if (diff <= 0) return NULL;
|
if (diff <= 0) return NULL;
|
||||||
|
|
||||||
|
|
|
@ -873,25 +873,66 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
|
||||||
// Remappable memory
|
// Remappable memory
|
||||||
//----------------------------------------------------------------
|
//----------------------------------------------------------------
|
||||||
|
|
||||||
int _mi_prim_alloc_remappable(size_t size, size_t future_reserve, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) {
|
static int mi_unix_alloc_aligned(size_t size, size_t alignment, bool commit, bool* is_pinned, bool* is_zero, void** addr)
|
||||||
|
{
|
||||||
|
mi_assert_internal(alignment <= 1 || (alignment >= _mi_os_page_size()));
|
||||||
|
*addr = NULL;
|
||||||
|
void* base = NULL;
|
||||||
|
int err = _mi_prim_alloc(size,alignment,commit,false /*allow large*/, is_pinned, is_zero, &base);
|
||||||
|
if (err != 0) return err;
|
||||||
|
if (_mi_is_aligned(base,alignment)) {
|
||||||
|
*addr = base;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
_mi_prim_free(base,size);
|
||||||
|
const size_t oversize = _mi_align_up( _mi_align_up(size,alignment), _mi_os_page_size());
|
||||||
|
err = _mi_prim_alloc(oversize,alignment,commit,false,is_pinned,is_zero,&base);
|
||||||
|
if (err != 0) return err;
|
||||||
|
mi_assert_internal(!(*is_pinned));
|
||||||
|
if (!(*is_pinned)) {
|
||||||
|
void* p = _mi_align_up_ptr(base,alignment);
|
||||||
|
*addr = p;
|
||||||
|
size_t pre_size = (uint8_t*)p - (uint8_t*)base;
|
||||||
|
size_t mid_size = _mi_align_up(size,_mi_os_page_size());
|
||||||
|
size_t post_size = oversize - pre_size - mid_size;
|
||||||
|
mi_assert_internal(pre_size < oversize && post_size < oversize && mid_size >= size);
|
||||||
|
if (pre_size > 0) { _mi_prim_free(base, pre_size); }
|
||||||
|
if (post_size > 0) { _mi_prim_free((uint8_t*)p + mid_size, post_size); }
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
_mi_prim_free(base,oversize);
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int _mi_prim_alloc_remappable(size_t size, size_t alignment, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) {
|
||||||
#if !defined(MREMAP_MAYMOVE)
|
#if !defined(MREMAP_MAYMOVE)
|
||||||
MI_UNUSED(size); MI_UNUSED(future_reserve); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info);
|
MI_UNUSED(size); MI_UNUSED(alignment); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
#else
|
#else
|
||||||
MI_UNUSED(future_reserve);
|
|
||||||
*remap_info = NULL;
|
*remap_info = NULL;
|
||||||
return _mi_prim_alloc(size,1,true /* commit? */, true /* allow_large */, is_pinned /* is_large? */, is_zero, addr);
|
return mi_unix_alloc_aligned(size, alignment, true, is_pinned, is_zero, addr);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int _mi_prim_remap(void* addr, size_t size, size_t newsize, bool* extend_is_zero, void** newaddr, void** remap_info ) {
|
int _mi_prim_remap(void* addr, size_t size, size_t newsize, size_t alignment, bool* extend_is_zero, void** newaddr, void** remap_info ) {
|
||||||
#if !defined(MREMAP_MAYMOVE)
|
#if !defined(MREMAP_MAYMOVE) || !defined(MREMAP_FIXED)
|
||||||
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info);
|
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(alignment); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
#else
|
#else
|
||||||
mi_assert_internal(*remap_info == NULL); MI_UNUSED(remap_info);
|
mi_assert_internal(*remap_info == NULL); MI_UNUSED(remap_info);
|
||||||
void* p = mremap(addr,size,newsize,MREMAP_MAYMOVE);
|
void* p = NULL;
|
||||||
if (p == MAP_FAILED) { return errno; }
|
bool is_pinned = false;
|
||||||
|
// don't commit yet, mremap will take over the virtual rang completely due to MREMAP_FIXED
|
||||||
|
int err = mi_unix_alloc_aligned(size, alignment, false, &is_pinned, extend_is_zero, &p);
|
||||||
|
if (err != 0) return err;
|
||||||
|
void* res = mremap(addr, size, newsize, MREMAP_MAYMOVE | MREMAP_FIXED, p);
|
||||||
|
if (res == MAP_FAILED || res != p) {
|
||||||
|
err = errno;
|
||||||
|
_mi_prim_free(p,size);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
*extend_is_zero = true;
|
*extend_is_zero = true;
|
||||||
*newaddr = p;
|
*newaddr = p;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -67,7 +67,7 @@ static PGetNumaProcessorNode pGetNumaProcessorNode = NULL;
|
||||||
// Enable large page support dynamically (if possible)
|
// Enable large page support dynamically (if possible)
|
||||||
//---------------------------------------------
|
//---------------------------------------------
|
||||||
|
|
||||||
static bool win_enable_large_os_pages(size_t* large_page_size)
|
static bool mi_win_enable_large_os_pages(size_t* large_page_size)
|
||||||
{
|
{
|
||||||
static bool large_initialized = false;
|
static bool large_initialized = false;
|
||||||
if (large_initialized) return (_mi_os_large_page_size() > 0);
|
if (large_initialized) return (_mi_os_large_page_size() > 0);
|
||||||
|
@ -99,7 +99,7 @@ static bool win_enable_large_os_pages(size_t* large_page_size)
|
||||||
}
|
}
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
if (err == 0) err = GetLastError();
|
if (err == 0) err = GetLastError();
|
||||||
_mi_warning_message("cannot enable large OS page support, error %lu\n", err);
|
_mi_warning_message("cannot acquire the lock memory privilege (needed for large OS page or remap support), error %lu\n", err);
|
||||||
}
|
}
|
||||||
return (ok!=0);
|
return (ok!=0);
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
|
||||||
FreeLibrary(hDll);
|
FreeLibrary(hDll);
|
||||||
}
|
}
|
||||||
if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
||||||
win_enable_large_os_pages(&config->large_page_size);
|
mi_win_enable_large_os_pages(&config->large_page_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,7 +308,7 @@ static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int num
|
||||||
{
|
{
|
||||||
const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
|
const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
|
||||||
|
|
||||||
win_enable_large_os_pages(NULL);
|
if (!mi_win_enable_large_os_pages(NULL)) return NULL;
|
||||||
|
|
||||||
MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
|
MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
|
||||||
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
|
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
|
||||||
|
@ -627,19 +627,208 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
|
||||||
// Remappable memory
|
// Remappable memory
|
||||||
//----------------------------------------------------------------
|
//----------------------------------------------------------------
|
||||||
|
|
||||||
int _mi_prim_alloc_remappable(size_t size, size_t future_reserve, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) {
|
typedef struct mi_win_remap_info_s {
|
||||||
MI_UNUSED(size); MI_UNUSED(future_reserve); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info);
|
void* base; // base of the virtual address space
|
||||||
// return EINVAL;
|
size_t base_pages; // total virtual alloc'd pages from `base`
|
||||||
return _mi_prim_alloc(size, 1, true, true, is_pinned, is_zero, addr);
|
size_t alignment; // alignment: _mi_align_up_ptr(mapped_base,alignment) maps to the first physical page (= `mapped_base`)
|
||||||
|
size_t page_count; // allocated physical pages (with info in page_info)
|
||||||
|
size_t page_reserved; // available entries in page_info
|
||||||
|
ULONG_PTR page_info[1];
|
||||||
|
} mi_win_remap_info_t;
|
||||||
|
|
||||||
|
|
||||||
|
static void* mi_win_mapped_base(mi_win_remap_info_t* rinfo) {
|
||||||
|
return _mi_align_up_ptr(rinfo->base, rinfo->alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
int _mi_prim_remap(void* addr, size_t size, size_t newsize, bool* extend_is_zero, void** newaddr, void** remap_info ) {
|
static size_t mi_win_mapped_pages(mi_win_remap_info_t* rinfo) {
|
||||||
|
const size_t presize = (uint8_t*)mi_win_mapped_base(rinfo) - (uint8_t*)rinfo->base;
|
||||||
|
return (rinfo->base_pages - _mi_divide_up(presize, _mi_os_page_size()));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// allocate remap info
|
||||||
|
static mi_win_remap_info_t* mi_win_alloc_remap_info(size_t page_count, size_t alignment) {
|
||||||
|
const size_t remap_info_size = _mi_align_up(sizeof(mi_win_remap_info_t) + (page_count * sizeof(ULONG_PTR)), _mi_os_page_size());
|
||||||
|
mi_win_remap_info_t* rinfo = NULL;
|
||||||
|
bool os_is_zero = false;
|
||||||
|
bool os_is_large = false;
|
||||||
|
int err = _mi_prim_alloc(remap_info_size, 1, true, false, &os_is_large, &os_is_zero, (void**)&rinfo);
|
||||||
|
if (err != 0) return NULL;
|
||||||
|
if (!os_is_zero) { _mi_memzero_aligned(rinfo, remap_info_size); }
|
||||||
|
rinfo->base = NULL;
|
||||||
|
rinfo->base_pages = 0;
|
||||||
|
rinfo->alignment = alignment;
|
||||||
|
rinfo->page_count = 0;
|
||||||
|
rinfo->page_reserved = (remap_info_size - offsetof(mi_win_remap_info_t, page_info)) / sizeof(ULONG_PTR);
|
||||||
|
return rinfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
// reallocate remap info
|
||||||
|
static mi_win_remap_info_t* mi_win_realloc_remap_info(mi_win_remap_info_t* rinfo, size_t newpage_count, size_t alignment) {
|
||||||
|
if (rinfo == NULL) {
|
||||||
|
return mi_win_alloc_remap_info(newpage_count,alignment);
|
||||||
|
}
|
||||||
|
else if (rinfo->page_reserved >= newpage_count) {
|
||||||
|
mi_assert_internal(alignment <= rinfo->alignment);
|
||||||
|
return rinfo; // still fits
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_assert_internal(alignment <= rinfo->alignment);
|
||||||
|
mi_win_remap_info_t* newrinfo = mi_win_alloc_remap_info(newpage_count,rinfo->alignment);
|
||||||
|
if (newrinfo == NULL) return NULL;
|
||||||
|
newrinfo->base = rinfo->base;
|
||||||
|
newrinfo->base_pages = rinfo->base_pages;
|
||||||
|
newrinfo->page_count = rinfo->page_count;
|
||||||
|
_mi_memcpy(newrinfo->page_info, rinfo->page_info, rinfo->page_count * sizeof(ULONG_PTR));
|
||||||
|
_mi_prim_free(rinfo, sizeof(mi_win_remap_info_t) + ((rinfo->page_reserved - 1) * sizeof(ULONG_PTR)));
|
||||||
|
return newrinfo;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure enough physical pages are allocated
|
||||||
|
static int mi_win_ensure_physical_pages(mi_win_remap_info_t** prinfo, size_t newpage_count, size_t alignment) {
|
||||||
|
// ensure meta data is large enough
|
||||||
|
mi_win_remap_info_t* rinfo = mi_win_realloc_remap_info(*prinfo, newpage_count, alignment);
|
||||||
|
if (rinfo == NULL) return ENOMEM;
|
||||||
|
*prinfo = rinfo;
|
||||||
|
|
||||||
|
// allocate physical pages
|
||||||
|
if (newpage_count > rinfo->page_count) {
|
||||||
|
mi_assert_internal(rinfo->page_reserved >= newpage_count);
|
||||||
|
const size_t extra_pages = newpage_count - rinfo->page_count;
|
||||||
|
ULONG_PTR req_pages = extra_pages;
|
||||||
|
if (!AllocateUserPhysicalPages(GetCurrentProcess(), &req_pages, &rinfo->page_info[rinfo->page_count])) {
|
||||||
|
return (int)GetLastError();
|
||||||
|
}
|
||||||
|
rinfo->page_count += req_pages;
|
||||||
|
if (req_pages < extra_pages) return ENOMEM;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static int mi_win_remap_virtual_pages(mi_win_remap_info_t* rinfo, size_t newpage_count) {
|
||||||
|
mi_assert_internal(rinfo != NULL && rinfo->page_count >= newpage_count);
|
||||||
|
if (mi_win_mapped_pages(rinfo) >= newpage_count) return 0; // still good? should we shrink the mapping?
|
||||||
|
|
||||||
|
|
||||||
|
if (rinfo->base != NULL) {
|
||||||
|
printf("remap existing remap\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// we can now free the original virtual range ?
|
||||||
|
if (rinfo->base != NULL) {
|
||||||
|
if (!VirtualFree(rinfo->base, 0, MEM_RELEASE)) {
|
||||||
|
return (int)GetLastError();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmap the old range
|
||||||
|
/*if (rinfo->mapped_base != NULL) {
|
||||||
|
if (!MapUserPhysicalPages(rinfo->mapped_base, rinfo->mapped_pages, NULL)) {
|
||||||
|
return (int)GetLastError();
|
||||||
|
}
|
||||||
|
}*/
|
||||||
|
|
||||||
|
// allocate new virtual address range
|
||||||
|
const size_t newsize = _mi_align_up( newpage_count * _mi_os_page_size() + rinfo->alignment - 1, _mi_os_page_size());
|
||||||
|
void* newbase = VirtualAlloc(NULL, newsize, MEM_RESERVE | MEM_PHYSICAL, PAGE_READWRITE);
|
||||||
|
if (newbase == NULL) {
|
||||||
|
// todo: remap old range?
|
||||||
|
return (int)GetLastError();
|
||||||
|
}
|
||||||
|
if (rinfo->base != newbase) {
|
||||||
|
printf("different base %p vs previous %p\n", newbase, rinfo->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the aligned point where we map to our physical pages
|
||||||
|
rinfo->base = newbase;
|
||||||
|
rinfo->base_pages = _mi_divide_up(newsize, _mi_os_page_size());
|
||||||
|
mi_assert_internal(mi_win_mapped_pages(rinfo) >= newpage_count);
|
||||||
|
|
||||||
|
// and remap it
|
||||||
|
if (!MapUserPhysicalPages(mi_win_mapped_base(rinfo), newpage_count, &rinfo->page_info[0])) {
|
||||||
|
// todo: remap old range?
|
||||||
|
int err = (int)GetLastError();
|
||||||
|
VirtualFree(newbase, 0, MEM_RELEASE);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mi_win_remap(mi_win_remap_info_t** prinfo, size_t newsize, size_t alignment) {
|
||||||
|
size_t newpage_count = _mi_divide_up(newsize, _mi_os_page_size());
|
||||||
|
int err = mi_win_ensure_physical_pages(prinfo, newpage_count, alignment);
|
||||||
|
if (err != 0) return err;
|
||||||
|
err = mi_win_remap_virtual_pages(*prinfo, newpage_count);
|
||||||
|
if (err != 0) return err;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mi_win_free_remap_info(mi_win_remap_info_t* rinfo) {
|
||||||
|
int err = 0;
|
||||||
|
if (rinfo == NULL) return 0;
|
||||||
|
if (rinfo->base != NULL) {
|
||||||
|
if (!VirtualFree(rinfo->base, 0, MEM_RELEASE)) {
|
||||||
|
err = (int)GetLastError();
|
||||||
|
}
|
||||||
|
rinfo->base = NULL;
|
||||||
|
rinfo->base_pages = 0;
|
||||||
|
}
|
||||||
|
if (rinfo->page_count > 0) {
|
||||||
|
size_t req_pages = rinfo->page_count;
|
||||||
|
if (!FreeUserPhysicalPages(GetCurrentProcess(), &req_pages, &rinfo->page_info[0])) {
|
||||||
|
err = (int)GetLastError();
|
||||||
|
}
|
||||||
|
rinfo->page_count = 0;
|
||||||
|
}
|
||||||
|
VirtualFree(rinfo, 0, MEM_RELEASE);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
int _mi_prim_alloc_remappable(size_t size, size_t alignment, bool* is_pinned, bool* is_zero, void** addr, void** remap_info ) {
|
||||||
|
// MI_UNUSED(size); MI_UNUSED(alignment); MI_UNUSED(is_pinned); MI_UNUSED(is_zero); MI_UNUSED(addr); MI_UNUSED(remap_info);
|
||||||
|
// return EINVAL;
|
||||||
|
// return _mi_prim_alloc(size, 1, true, true, is_pinned, is_zero, addr);
|
||||||
|
|
||||||
|
if (!mi_win_enable_large_os_pages(NULL)) return EINVAL;
|
||||||
|
|
||||||
|
mi_win_remap_info_t* rinfo = NULL;
|
||||||
|
int err = mi_win_remap(&rinfo, size, alignment);
|
||||||
|
if (err != 0) {
|
||||||
|
if (rinfo != NULL) { mi_win_free_remap_info(rinfo); }
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
*is_pinned = true;
|
||||||
|
*is_zero = true;
|
||||||
|
*addr = mi_win_mapped_base(rinfo);
|
||||||
|
*remap_info = rinfo;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int _mi_prim_remap(void* addr, size_t size, size_t newsize, size_t alignment, bool* extend_is_zero, void** newaddr, void** remap_info ) {
|
||||||
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info);
|
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(newsize); MI_UNUSED(extend_is_zero); MI_UNUSED(newaddr); MI_UNUSED(remap_info);
|
||||||
return EINVAL;
|
// return EINVAL;
|
||||||
|
mi_win_remap_info_t** prinfo = (mi_win_remap_info_t**)remap_info;
|
||||||
|
mi_assert_internal(*prinfo != NULL);
|
||||||
|
mi_assert_internal(mi_win_mapped_base(*prinfo) == addr);
|
||||||
|
mi_assert_internal((*prinfo)->alignment == alignment);
|
||||||
|
int err = mi_win_remap(prinfo, newsize, alignment);
|
||||||
|
if (err != 0) return err;
|
||||||
|
*extend_is_zero = false;
|
||||||
|
*newaddr = mi_win_mapped_base(*prinfo);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) {
|
int _mi_prim_free_remappable(void* addr, size_t size, void* remap_info ) {
|
||||||
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(remap_info);
|
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(remap_info);
|
||||||
return _mi_prim_free(addr, size);
|
// return _mi_prim_free(addr, size);
|
||||||
// return EINVAL;
|
// return EINVAL;
|
||||||
|
mi_win_remap_info_t* rinfo = (mi_win_remap_info_t*)remap_info;
|
||||||
|
mi_assert_internal(rinfo != NULL);
|
||||||
|
mi_assert_internal(mi_win_mapped_base(rinfo) == addr);
|
||||||
|
return mi_win_free_remap_info(rinfo);
|
||||||
}
|
}
|
||||||
|
|
|
@ -525,7 +525,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
|
||||||
|
|
||||||
mi_segment_t* segment = NULL;
|
mi_segment_t* segment = NULL;
|
||||||
if (page_alignment == MI_PAGE_ALIGN_REMAPPABLE) {
|
if (page_alignment == MI_PAGE_ALIGN_REMAPPABLE) {
|
||||||
segment = (mi_segment_t*)_mi_os_alloc_remappable(segment_size, 0, alignment, &memid, tld_os->stats);
|
segment = (mi_segment_t*)_mi_os_alloc_remappable(segment_size, alignment, &memid, tld_os->stats);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, tld_os);
|
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, tld_os);
|
||||||
|
@ -1303,14 +1303,14 @@ mi_block_t* _mi_segment_huge_page_remap(mi_segment_t* segment, mi_page_t* page,
|
||||||
const size_t newssize = _mi_align_up(_mi_align_up(newsize, _mi_os_page_size()) + (mi_segment_size(segment) - bsize), MI_SEGMENT_SIZE);
|
const size_t newssize = _mi_align_up(_mi_align_up(newsize, _mi_os_page_size()) + (mi_segment_size(segment) - bsize), MI_SEGMENT_SIZE);
|
||||||
mi_memid_t memid = segment->memid;
|
mi_memid_t memid = segment->memid;
|
||||||
const ptrdiff_t block_ofs = (uint8_t*)block - (uint8_t*)segment;
|
const ptrdiff_t block_ofs = (uint8_t*)block - (uint8_t*)segment;
|
||||||
|
const uintptr_t cookie = segment->cookie;
|
||||||
mi_segment_protect(segment, false, tld->os);
|
mi_segment_protect(segment, false, tld->os);
|
||||||
mi_segment_t* newsegment = (mi_segment_t*)_mi_os_remap(segment, mi_segment_size(segment), newssize, &memid, tld->stats);
|
mi_segment_t* newsegment = (mi_segment_t*)_mi_os_remap(segment, mi_segment_size(segment), newssize, &memid, tld->stats);
|
||||||
if (newsegment == NULL) {
|
if (newsegment == NULL) {
|
||||||
mi_segment_protect(segment, true, tld->os);
|
mi_segment_protect(segment, true, tld->os);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
mi_assert_internal(cookie == newsegment->cookie);
|
||||||
newsegment->memid = memid;
|
newsegment->memid = memid;
|
||||||
newsegment->segment_size = newssize;
|
newsegment->segment_size = newssize;
|
||||||
newsegment->cookie = _mi_ptr_cookie(newsegment);
|
newsegment->cookie = _mi_ptr_cookie(newsegment);
|
||||||
|
|
|
@ -61,7 +61,7 @@ int main() {
|
||||||
//mi_free(p2);
|
//mi_free(p2);
|
||||||
|
|
||||||
//mi_collect(true);
|
//mi_collect(true);
|
||||||
//mi_stats_print(NULL);
|
mi_stats_print(NULL);
|
||||||
|
|
||||||
// test_process_info();
|
// test_process_info();
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue