mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
commit page on demand
This commit is contained in:
parent
9a7c0d443a
commit
ba68810333
7 changed files with 69 additions and 25 deletions
|
@ -190,7 +190,7 @@
|
||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<ConformanceMode>Default</ConformanceMode>
|
<ConformanceMode>Default</ConformanceMode>
|
||||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||||
<PreprocessorDefinitions>MI_DEBUG=3;MI_GUARDED=0;MI_SECURE=4;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
<PreprocessorDefinitions>MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||||
<CompileAs>CompileAsCpp</CompileAs>
|
<CompileAs>CompileAsCpp</CompileAs>
|
||||||
<SupportJustMyCode>false</SupportJustMyCode>
|
<SupportJustMyCode>false</SupportJustMyCode>
|
||||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||||
|
|
|
@ -400,6 +400,7 @@ typedef enum mi_option_e {
|
||||||
mi_option_max_page_candidates, // max candidate pages to consider for allocation (=4)
|
mi_option_max_page_candidates, // max candidate pages to consider for allocation (=4)
|
||||||
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
|
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
|
||||||
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
|
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
|
||||||
|
mi_option_page_commit_on_demand, // commit page memory on-demand
|
||||||
_mi_option_last,
|
_mi_option_last,
|
||||||
// legacy option names
|
// legacy option names
|
||||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||||
|
|
|
@ -139,6 +139,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
// We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
// We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||||
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
|
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
|
||||||
|
|
||||||
|
#define MI_PAGE_MIN_COMMIT_SIZE MI_ARENA_SLICE_SIZE
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Arena's are large reserved areas of memory allocated from
|
// Arena's are large reserved areas of memory allocated from
|
||||||
// the OS that are managed by mimalloc to efficiently
|
// the OS that are managed by mimalloc to efficiently
|
||||||
|
@ -301,6 +303,7 @@ typedef struct mi_page_s {
|
||||||
mi_heap_t* heap; // the heap owning this page (or NULL for abandoned pages)
|
mi_heap_t* heap; // the heap owning this page (or NULL for abandoned pages)
|
||||||
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
|
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
|
||||||
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
|
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
|
||||||
|
size_t page_committed; // committed size relative to `page_start`.
|
||||||
mi_memid_t memid; // provenance of the page memory
|
mi_memid_t memid; // provenance of the page memory
|
||||||
} mi_page_t;
|
} mi_page_t;
|
||||||
|
|
||||||
|
@ -324,7 +327,7 @@ typedef struct mi_page_s {
|
||||||
// (Except for large pages since huge objects are allocated in 4MiB chunks)
|
// (Except for large pages since huge objects are allocated in 4MiB chunks)
|
||||||
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 8 KiB
|
#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 8 KiB
|
||||||
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 64 KiB
|
#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 64 KiB
|
||||||
#define MI_LARGE_MAX_OBJ_SIZE ((MI_LARGE_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < 512 KiB
|
#define MI_LARGE_MAX_OBJ_SIZE ((MI_LARGE_PAGE_SIZE-MI_PAGE_INFO_SIZE)/4) // < 512 KiB
|
||||||
#define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE)
|
#define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE)
|
||||||
|
|
||||||
|
|
||||||
|
|
64
src/arena.c
64
src/arena.c
|
@ -562,7 +562,7 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
|
||||||
|
|
||||||
_mi_page_free_collect(page, false); // update `used` count
|
_mi_page_free_collect(page, false); // update `used` count
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
mi_assert_internal(mi_option_is_enabled(mi_option_page_commit_on_demand) || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
|
||||||
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
||||||
mi_assert_internal(_mi_ptr_page(page)==page);
|
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||||
|
@ -578,16 +578,16 @@ static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_
|
||||||
|
|
||||||
// Allocate a fresh page
|
// Allocate a fresh page
|
||||||
static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment,
|
static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment,
|
||||||
mi_arena_t* req_arena, size_t tseq)
|
mi_arena_t* req_arena, size_t tseq, bool commit)
|
||||||
{
|
{
|
||||||
const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page
|
const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page
|
||||||
const bool commit = true;
|
|
||||||
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
|
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
|
||||||
const size_t page_alignment = MI_ARENA_SLICE_ALIGN;
|
const size_t page_alignment = MI_ARENA_SLICE_ALIGN;
|
||||||
|
|
||||||
// try to allocate from free space in arena's
|
// try to allocate from free space in arena's
|
||||||
mi_memid_t memid = _mi_memid_none();
|
mi_memid_t memid = _mi_memid_none();
|
||||||
mi_page_t* page = NULL;
|
mi_page_t* page = NULL;
|
||||||
|
const size_t alloc_size = mi_size_of_slices(slice_count);
|
||||||
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && // allowed to allocate from arena's?
|
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && // allowed to allocate from arena's?
|
||||||
!os_align && // not large alignment
|
!os_align && // not large alignment
|
||||||
slice_count <= MI_ARENA_MAX_OBJ_SLICES) // and not too large
|
slice_count <= MI_ARENA_MAX_OBJ_SLICES) // and not too large
|
||||||
|
@ -604,10 +604,10 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
|
||||||
if (os_align) {
|
if (os_align) {
|
||||||
// note: slice_count already includes the page
|
// note: slice_count already includes the page
|
||||||
mi_assert_internal(slice_count >= mi_slice_count_of_size(block_size) + mi_slice_count_of_size(page_alignment));
|
mi_assert_internal(slice_count >= mi_slice_count_of_size(block_size) + mi_slice_count_of_size(page_alignment));
|
||||||
page = (mi_page_t*)mi_arena_os_alloc_aligned(mi_size_of_slices(slice_count), block_alignment, page_alignment /* align offset */, commit, allow_large, req_arena, &memid);
|
page = (mi_page_t*)mi_arena_os_alloc_aligned(alloc_size, block_alignment, page_alignment /* align offset */, commit, allow_large, req_arena, &memid);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
page = (mi_page_t*)mi_arena_os_alloc_aligned(mi_size_of_slices(slice_count), page_alignment, 0 /* align offset */, commit, allow_large, req_arena, &memid);
|
page = (mi_page_t*)mi_arena_os_alloc_aligned(alloc_size, page_alignment, 0 /* align offset */, commit, allow_large, req_arena, &memid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -617,25 +617,25 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
|
||||||
|
|
||||||
// guard page at the end of mimalloc page?
|
// guard page at the end of mimalloc page?
|
||||||
#if MI_SECURE < 2
|
#if MI_SECURE < 2
|
||||||
const size_t page_noguard_size = mi_size_of_slices(slice_count);
|
const size_t page_noguard_size = alloc_size;
|
||||||
#else
|
#else
|
||||||
mi_assert(mi_size_of_slices(slice_count) > _mi_os_secure_guard_page_size());
|
mi_assert(alloc_size > _mi_os_secure_guard_page_size());
|
||||||
const size_t page_noguard_size = mi_size_of_slices(slice_count) - _mi_os_secure_guard_page_size();
|
const size_t page_noguard_size = alloc_size - _mi_os_secure_guard_page_size();
|
||||||
if (memid.initially_committed) {
|
if (memid.initially_committed) {
|
||||||
_mi_os_secure_guard_page_set_at((uint8_t*)page + page_noguard_size, memid.is_pinned);
|
_mi_os_secure_guard_page_set_at((uint8_t*)page + page_noguard_size, memid.is_pinned);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// claimed free slices: initialize the page partly
|
// claimed free slices: initialize the page partly
|
||||||
if (!memid.initially_zero) {
|
if (!memid.initially_zero && memid.initially_committed) {
|
||||||
mi_track_mem_undefined(page, slice_count * MI_ARENA_SLICE_SIZE);
|
mi_track_mem_undefined(page, slice_count * MI_ARENA_SLICE_SIZE);
|
||||||
_mi_memzero_aligned(page, sizeof(*page));
|
_mi_memzero_aligned(page, sizeof(*page));
|
||||||
}
|
}
|
||||||
else {
|
else if (memid.initially_committed) {
|
||||||
mi_track_mem_defined(page, slice_count * MI_ARENA_SLICE_SIZE);
|
mi_track_mem_defined(page, slice_count * MI_ARENA_SLICE_SIZE);
|
||||||
}
|
}
|
||||||
#if MI_DEBUG > 1
|
#if MI_DEBUG > 1
|
||||||
if (memid.initially_zero) {
|
if (memid.initially_zero && memid.initially_committed) {
|
||||||
if (!mi_mem_is_zero(page, page_noguard_size)) {
|
if (!mi_mem_is_zero(page, page_noguard_size)) {
|
||||||
_mi_error_message(EFAULT, "internal error: page memory was not zero initialized.\n");
|
_mi_error_message(EFAULT, "internal error: page memory was not zero initialized.\n");
|
||||||
memid.initially_zero = false;
|
memid.initially_zero = false;
|
||||||
|
@ -644,6 +644,7 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
mi_assert(MI_PAGE_INFO_SIZE >= mi_page_info_size());
|
mi_assert(MI_PAGE_INFO_SIZE >= mi_page_info_size());
|
||||||
|
|
||||||
size_t block_start;
|
size_t block_start;
|
||||||
#if MI_GUARDED
|
#if MI_GUARDED
|
||||||
// in a guarded build, we align pages with blocks a multiple of an OS page size, to the OS page size
|
// in a guarded build, we align pages with blocks a multiple of an OS page size, to the OS page size
|
||||||
|
@ -668,9 +669,24 @@ static mi_page_t* mi_arenas_page_alloc_fresh(mi_subproc_t* subproc, size_t slice
|
||||||
}
|
}
|
||||||
const size_t reserved = (os_align ? 1 : (page_noguard_size - block_start) / block_size);
|
const size_t reserved = (os_align ? 1 : (page_noguard_size - block_start) / block_size);
|
||||||
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
||||||
|
|
||||||
|
// commit first block?
|
||||||
|
size_t commit_size = 0;
|
||||||
|
if (!memid.initially_committed) {
|
||||||
|
commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE);
|
||||||
|
if (commit_size > page_noguard_size) { commit_size = page_noguard_size; }
|
||||||
|
bool is_zero;
|
||||||
|
_mi_os_commit(page, commit_size, &is_zero);
|
||||||
|
if (!memid.initially_zero && !is_zero) {
|
||||||
|
_mi_memzero_aligned(page, commit_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize
|
||||||
page->reserved = (uint16_t)reserved;
|
page->reserved = (uint16_t)reserved;
|
||||||
page->page_start = (uint8_t*)page + block_start;
|
page->page_start = (uint8_t*)page + block_start;
|
||||||
page->block_size = block_size;
|
page->block_size = block_size;
|
||||||
|
page->page_committed = (commit_size == 0 ? 0 : commit_size - block_start); mi_assert(commit_size == 0 || commit_size >= block_start + block_size);
|
||||||
page->memid = memid;
|
page->memid = memid;
|
||||||
page->free_is_zero = memid.initially_zero;
|
page->free_is_zero = memid.initially_zero;
|
||||||
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
||||||
|
@ -704,7 +720,8 @@ static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_cou
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. find a free block, potentially allocating a new arena
|
// 2. find a free block, potentially allocating a new arena
|
||||||
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq);
|
page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, 1, req_arena, tld->thread_seq,
|
||||||
|
!mi_option_is_enabled(mi_option_page_commit_on_demand));
|
||||||
if (page != NULL) {
|
if (page != NULL) {
|
||||||
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
|
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
|
||||||
_mi_page_init(heap, page);
|
_mi_page_init(heap, page);
|
||||||
|
@ -726,7 +743,7 @@ static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_s
|
||||||
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size());
|
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq);
|
mi_page_t* page = mi_arenas_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq, true /* commit singletons always */);
|
||||||
if (page == NULL) return NULL;
|
if (page == NULL) return NULL;
|
||||||
|
|
||||||
mi_assert(page->reserved == 1);
|
mi_assert(page->reserved == 1);
|
||||||
|
@ -779,7 +796,7 @@ void _mi_arenas_page_free(mi_page_t* page) {
|
||||||
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
||||||
|
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
mi_assert_internal(mi_option_is_enabled(mi_option_page_commit_on_demand) || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->pages_abandoned[bin], slice_index, 1));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->pages_abandoned[bin], slice_index, 1));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1));
|
mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1));
|
||||||
// note: we cannot check for `!mi_page_is_abandoned_and_mapped` since that may
|
// note: we cannot check for `!mi_page_is_abandoned_and_mapped` since that may
|
||||||
|
@ -799,7 +816,16 @@ void _mi_arenas_page_free(mi_page_t* page) {
|
||||||
// unregister page
|
// unregister page
|
||||||
_mi_page_map_unregister(page);
|
_mi_page_map_unregister(page);
|
||||||
if (page->memid.memkind == MI_MEM_ARENA) {
|
if (page->memid.memkind == MI_MEM_ARENA) {
|
||||||
mi_bitmap_clear(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index);
|
mi_arena_t* arena = page->memid.mem.arena.arena;
|
||||||
|
mi_bitmap_clear(arena->pages, page->memid.mem.arena.slice_index);
|
||||||
|
if (page->page_committed > 0) {
|
||||||
|
// if committed on-demand, set the commit bits to account commit properly
|
||||||
|
const size_t total_committed = (page->page_start - (uint8_t*)page) + page->page_committed;
|
||||||
|
mi_assert_internal(mi_memid_size(page->memid) >= total_committed);
|
||||||
|
const size_t total_slices = _mi_divide_up(total_committed, MI_ARENA_SLICE_SIZE);
|
||||||
|
mi_assert_internal(page->memid.mem.arena.slice_count >= total_slices);
|
||||||
|
mi_bitmap_setN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_mi_arenas_free(page, mi_memid_size(page->memid), page->memid);
|
_mi_arenas_free(page, mi_memid_size(page->memid), page->memid);
|
||||||
}
|
}
|
||||||
|
@ -824,7 +850,7 @@ void _mi_arenas_page_abandon(mi_page_t* page) {
|
||||||
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
||||||
mi_assert_internal(!mi_page_is_singleton(page));
|
mi_assert_internal(!mi_page_is_singleton(page));
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
mi_assert_internal(mi_option_is_enabled(mi_option_page_commit_on_demand) || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
|
||||||
|
|
||||||
mi_page_set_abandoned_mapped(page);
|
mi_page_set_abandoned_mapped(page);
|
||||||
|
@ -889,7 +915,7 @@ void _mi_arenas_page_unabandon(mi_page_t* page) {
|
||||||
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
||||||
|
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
||||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
mi_assert_internal(mi_option_is_enabled(mi_option_page_commit_on_demand) || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
|
||||||
|
|
||||||
// this busy waits until a concurrent reader (from alloc_abandoned) is done
|
// this busy waits until a concurrent reader (from alloc_abandoned) is done
|
||||||
mi_bitmap_clear_once_set(arena->pages_abandoned[bin], slice_index);
|
mi_bitmap_clear_once_set(arena->pages_abandoned[bin], slice_index);
|
||||||
|
@ -1430,7 +1456,7 @@ static long mi_arena_purge_delay(void) {
|
||||||
// returns if the memory is no longer committed (versus reset which keeps the commit)
|
// returns if the memory is no longer committed (versus reset which keeps the commit)
|
||||||
static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) {
|
static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) {
|
||||||
mi_assert_internal(!arena->memid.is_pinned);
|
mi_assert_internal(!arena->memid.is_pinned);
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count)); // we own it?
|
||||||
|
|
||||||
const size_t size = mi_size_of_slices(slice_count);
|
const size_t size = mi_size_of_slices(slice_count);
|
||||||
void* const p = mi_arena_slice_start(arena, slice_index);
|
void* const p = mi_arena_slice_start(arena, slice_index);
|
||||||
|
@ -1455,7 +1481,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_
|
||||||
const long delay = mi_arena_purge_delay();
|
const long delay = mi_arena_purge_delay();
|
||||||
if (arena->memid.is_pinned || delay < 0 || _mi_preloading()) return; // is purging allowed at all?
|
if (arena->memid.is_pinned || delay < 0 || _mi_preloading()) return; // is purging allowed at all?
|
||||||
|
|
||||||
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
mi_assert_internal(mi_bitmap_is_clearN(arena->slices_free, slice_index, slice_count)); // we still own it?
|
||||||
if (delay == 0) {
|
if (delay == 0) {
|
||||||
// purge directly
|
// purge directly
|
||||||
mi_arena_purge(arena, slice_index, slice_count);
|
mi_arena_purge(arena, slice_index, slice_count);
|
||||||
|
|
|
@ -35,6 +35,7 @@ const mi_page_t _mi_page_empty = {
|
||||||
#endif
|
#endif
|
||||||
NULL, // xheap
|
NULL, // xheap
|
||||||
NULL, NULL, // next, prev
|
NULL, NULL, // next, prev
|
||||||
|
MI_ARENA_SLICE_SIZE, // page_committed
|
||||||
MI_MEMID_STATIC // memid
|
MI_MEMID_STATIC // memid
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
#else
|
#else
|
||||||
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
|
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
|
||||||
#endif
|
#endif
|
||||||
{ 500, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
|
{ 250, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
|
||||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||||
{ 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
{ 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
||||||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||||
|
@ -175,6 +175,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
{ 0, UNINIT, MI_OPTION(max_vabits) },
|
{ 0, UNINIT, MI_OPTION(max_vabits) },
|
||||||
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
||||||
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
||||||
|
{ 0, UNINIT, MI_OPTION(page_commit_on_demand) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mi_option_init(mi_option_desc_t* desc);
|
static void mi_option_init(mi_option_desc_t* desc);
|
||||||
|
|
16
src/page.c
16
src/page.c
|
@ -606,6 +606,18 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
|
||||||
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
|
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
|
||||||
mi_assert_internal(extend < (1UL<<16));
|
mi_assert_internal(extend < (1UL<<16));
|
||||||
|
|
||||||
|
// commit on demand?
|
||||||
|
if (page->page_committed > 0) {
|
||||||
|
const size_t needed_size = (page->capacity + extend)*bsize;
|
||||||
|
if (needed_size > page->page_committed) {
|
||||||
|
size_t commit_size = _mi_align_up(needed_size, MI_PAGE_MIN_COMMIT_SIZE);
|
||||||
|
const size_t max_size = page->reserved * bsize;
|
||||||
|
if (commit_size > max_size) { commit_size = max_size; }
|
||||||
|
mi_assert(commit_size > page->page_committed);
|
||||||
|
_mi_os_commit(mi_page_start(page) + page->page_committed, commit_size - page->page_committed, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// and append the extend the free list
|
// and append the extend the free list
|
||||||
if (extend < MI_MIN_SLICES || MI_SECURE<3) { //!mi_option_is_enabled(mi_option_secure)) {
|
if (extend < MI_MIN_SLICES || MI_SECURE<3) { //!mi_option_is_enabled(mi_option_secure)) {
|
||||||
mi_page_free_list_extend(page, bsize, extend, &heap->tld->stats );
|
mi_page_free_list_extend(page, bsize, extend, &heap->tld->stats );
|
||||||
|
@ -635,8 +647,8 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
|
||||||
#endif
|
#endif
|
||||||
#if MI_DEBUG>2
|
#if MI_DEBUG>2
|
||||||
if (page->memid.initially_zero) {
|
if (page->memid.initially_zero) {
|
||||||
mi_track_mem_defined(page->page_start, page_size);
|
mi_track_mem_defined(page->page_start, (page->page_committed == 0 ? page_size : page->page_committed));
|
||||||
mi_assert_expensive(mi_mem_is_zero(page_start, page_size));
|
mi_assert_expensive(mi_mem_is_zero(page_start, (page->page_committed == 0 ? page_size : page->page_committed)));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue