mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-05 23:19:31 +03:00
track pinned memory separately from large os pages
This commit is contained in:
parent
c86459afef
commit
14b8d27386
8 changed files with 83 additions and 57 deletions
|
@ -64,7 +64,7 @@ void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free th
|
||||||
size_t _mi_os_good_alloc_size(size_t size);
|
size_t _mi_os_good_alloc_size(size_t size);
|
||||||
|
|
||||||
// memory.c
|
// memory.c
|
||||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* id, mi_os_tld_t* tld);
|
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* id, mi_os_tld_t* tld);
|
||||||
void _mi_mem_free(void* p, size_t size, size_t id, bool fully_committed, bool any_reset, mi_os_tld_t* tld);
|
void _mi_mem_free(void* p, size_t size, size_t id, bool fully_committed, bool any_reset, mi_os_tld_t* tld);
|
||||||
|
|
||||||
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld);
|
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld);
|
||||||
|
|
|
@ -261,7 +261,7 @@ typedef enum mi_page_kind_e {
|
||||||
typedef struct mi_segment_s {
|
typedef struct mi_segment_s {
|
||||||
// memory fields
|
// memory fields
|
||||||
size_t memid; // id for the os-level memory manager
|
size_t memid; // id for the os-level memory manager
|
||||||
bool mem_is_fixed; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
|
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
|
||||||
bool mem_is_committed; // `true` if the whole segment is eagerly committed
|
bool mem_is_committed; // `true` if the whole segment is eagerly committed
|
||||||
|
|
||||||
// segment fields
|
// segment fields
|
||||||
|
|
66
src/arena.c
66
src/arena.c
|
@ -44,6 +44,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_sec
|
||||||
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
|
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
|
||||||
|
|
||||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||||
|
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Arena allocation
|
Arena allocation
|
||||||
|
@ -61,12 +62,12 @@ typedef struct mi_arena_s {
|
||||||
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
||||||
int numa_node; // associated NUMA node
|
int numa_node; // associated NUMA node
|
||||||
bool is_zero_init; // is the arena zero initialized?
|
bool is_zero_init; // is the arena zero initialized?
|
||||||
bool is_committed; // is the memory committed
|
bool is_committed; // is the memory fully committed? (if so, block_committed == NULL)
|
||||||
bool is_large; // large OS page allocated
|
bool is_large; // large- or huge OS pages (always committed)
|
||||||
_Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks
|
_Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks
|
||||||
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
||||||
mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed?
|
mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed?
|
||||||
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
||||||
} mi_arena_t;
|
} mi_arena_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -118,16 +119,17 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||||
bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_bitmap_index_t bitmap_index;
|
mi_bitmap_index_t bitmap_index;
|
||||||
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||||
|
|
||||||
// claimed it! set the dirty bits (todo: no need for an atomic op here?)
|
// claimed it! set the dirty bits (todo: no need for an atomic op here?)
|
||||||
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
|
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
|
||||||
*memid = mi_arena_id_create(arena_index, bitmap_index);
|
*memid = mi_arena_id_create(arena_index, bitmap_index);
|
||||||
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||||
*large = arena->is_large;
|
*large = arena->is_large;
|
||||||
|
*is_pinned = (arena->is_large || arena->is_committed);
|
||||||
if (arena->is_committed) {
|
if (arena->is_committed) {
|
||||||
// always committed
|
// always committed
|
||||||
*commit = true;
|
*commit = true;
|
||||||
|
@ -149,14 +151,14 @@ static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t n
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||||
bool* commit, bool* large, bool* is_zero,
|
|
||||||
size_t* memid, mi_os_tld_t* tld)
|
size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(commit != NULL && large != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
||||||
mi_assert_internal(size > 0);
|
mi_assert_internal(size > 0);
|
||||||
*memid = MI_MEMID_OS;
|
*memid = MI_MEMID_OS;
|
||||||
*is_zero = false;
|
*is_zero = false;
|
||||||
|
*is_pinned = false;
|
||||||
|
|
||||||
// try to allocate in an arena if the alignment is small enough
|
// try to allocate in an arena if the alignment is small enough
|
||||||
// and the object is not too large or too small.
|
// and the object is not too large or too small.
|
||||||
|
@ -175,7 +177,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
|
||||||
if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
|
if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
|
||||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||||
{
|
{
|
||||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_zero, memid, tld);
|
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
|
||||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
|
@ -187,7 +189,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
|
||||||
if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
|
if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
|
||||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||||
{
|
{
|
||||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_zero, memid, tld);
|
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
|
||||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
|
@ -196,13 +198,15 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
|
||||||
|
|
||||||
// finally, fall back to the OS
|
// finally, fall back to the OS
|
||||||
*is_zero = true;
|
*is_zero = true;
|
||||||
*memid = MI_MEMID_OS;
|
*memid = MI_MEMID_OS;
|
||||||
return _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats);
|
void* p = _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats);
|
||||||
|
if (p != NULL) *is_pinned = *large;
|
||||||
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_zero, memid, tld);
|
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, memid, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
|
@ -225,6 +229,8 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
|
||||||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
|
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
|
||||||
mi_assert_internal(arena != NULL);
|
mi_assert_internal(arena != NULL);
|
||||||
|
const size_t blocks = mi_block_count_of_size(size);
|
||||||
|
// checks
|
||||||
if (arena == NULL) {
|
if (arena == NULL) {
|
||||||
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||||
return;
|
return;
|
||||||
|
@ -234,9 +240,18 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
|
||||||
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const size_t blocks = mi_block_count_of_size(size);
|
// potentially decommit
|
||||||
bool ones = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
|
if (arena->is_committed) {
|
||||||
if (!ones) {
|
mi_assert_internal(all_committed);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_assert_internal(arena->blocks_committed != NULL);
|
||||||
|
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails
|
||||||
|
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||||
|
}
|
||||||
|
// and make it available to others again
|
||||||
|
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
|
||||||
|
if (!all_inuse) {
|
||||||
_mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
|
_mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
@ -263,9 +278,14 @@ static bool mi_arena_add(mi_arena_t* arena) {
|
||||||
|
|
||||||
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept
|
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept
|
||||||
{
|
{
|
||||||
|
if (is_large) {
|
||||||
|
mi_assert_internal(is_committed);
|
||||||
|
is_committed = true;
|
||||||
|
}
|
||||||
|
|
||||||
const size_t bcount = mi_block_count_of_size(size);
|
const size_t bcount = mi_block_count_of_size(size);
|
||||||
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
|
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
|
||||||
const size_t bitmaps = (is_committed ? 3 : 2);
|
const size_t bitmaps = (is_committed ? 2 : 3);
|
||||||
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
|
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
|
||||||
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
||||||
if (arena == NULL) return false;
|
if (arena == NULL) return false;
|
||||||
|
@ -301,7 +321,7 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
|
||||||
bool large = allow_large;
|
bool large = allow_large;
|
||||||
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main);
|
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main);
|
||||||
if (start==NULL) return ENOMEM;
|
if (start==NULL) return ENOMEM;
|
||||||
if (!mi_manage_os_memory(start, size, commit, large, true, -1)) {
|
if (!mi_manage_os_memory(start, size, (large || commit), large, true, -1)) {
|
||||||
_mi_os_free_ex(start, size, commit, &_mi_stats_main);
|
_mi_os_free_ex(start, size, commit, &_mi_stats_main);
|
||||||
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
|
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
|
|
@ -388,10 +388,8 @@ bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size
|
||||||
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||||
bool any_ones;
|
bool any_ones;
|
||||||
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
|
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
|
||||||
return any_ones;
|
return any_ones;
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|
|
@ -97,5 +97,6 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
|
||||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
|
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
|
||||||
|
|
||||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||||
|
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -478,7 +478,7 @@ void mi_process_init(void) mi_attr_noexcept {
|
||||||
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
||||||
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
|
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
|
||||||
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
|
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called when the process is done (through `at_exit`)
|
// Called when the process is done (through `at_exit`)
|
||||||
|
|
39
src/region.c
39
src/region.c
|
@ -50,8 +50,8 @@ bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||||
|
|
||||||
// arena.c
|
// arena.c
|
||||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
|
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,7 +77,8 @@ typedef union mi_region_info_u {
|
||||||
uintptr_t value;
|
uintptr_t value;
|
||||||
struct {
|
struct {
|
||||||
bool valid; // initialized?
|
bool valid; // initialized?
|
||||||
bool is_large; // allocated in fixed large/huge OS pages
|
bool is_large:1; // allocated in fixed large/huge OS pages
|
||||||
|
bool is_pinned:1; // pinned memory cannot be decommitted
|
||||||
short numa_node; // the associated NUMA node (where -1 means no associated node)
|
short numa_node; // the associated NUMA node (where -1 means no associated node)
|
||||||
} x;
|
} x;
|
||||||
} mi_region_info_t;
|
} mi_region_info_t;
|
||||||
|
@ -177,8 +178,9 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
||||||
bool region_commit = (commit && mi_option_is_enabled(mi_option_eager_region_commit));
|
bool region_commit = (commit && mi_option_is_enabled(mi_option_eager_region_commit));
|
||||||
bool region_large = (commit && allow_large);
|
bool region_large = (commit && allow_large);
|
||||||
bool is_zero = false;
|
bool is_zero = false;
|
||||||
|
bool is_pinned = false;
|
||||||
size_t arena_memid = 0;
|
size_t arena_memid = 0;
|
||||||
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_zero, &arena_memid, tld);
|
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_pinned, &is_zero, &arena_memid, tld);
|
||||||
if (start == NULL) return false;
|
if (start == NULL) return false;
|
||||||
mi_assert_internal(!(region_large && !allow_large));
|
mi_assert_internal(!(region_large && !allow_large));
|
||||||
mi_assert_internal(!region_large || region_commit);
|
mi_assert_internal(!region_large || region_commit);
|
||||||
|
@ -208,6 +210,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
||||||
info.value = 0; // initialize the full union to zero
|
info.value = 0; // initialize the full union to zero
|
||||||
info.x.valid = true;
|
info.x.valid = true;
|
||||||
info.x.is_large = region_large;
|
info.x.is_large = region_large;
|
||||||
|
info.x.is_pinned = is_pinned;
|
||||||
info.x.numa_node = (short)_mi_os_numa_node(tld);
|
info.x.numa_node = (short)_mi_os_numa_node(tld);
|
||||||
mi_atomic_store_release(&r->info, info.value); // now make it available to others
|
mi_atomic_store_release(&r->info, info.value); // now make it available to others
|
||||||
*region = r;
|
*region = r;
|
||||||
|
@ -259,16 +262,16 @@ static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(blocks <= MI_BITMAP_FIELD_BITS);
|
mi_assert_internal(blocks <= MI_BITMAP_FIELD_BITS);
|
||||||
mem_region_t* region;
|
mem_region_t* region;
|
||||||
mi_bitmap_index_t bit_idx;
|
mi_bitmap_index_t bit_idx;
|
||||||
const int numa_node = (_mi_os_numa_node_count() <= 1 ? -1 : _mi_os_numa_node(tld));
|
const int numa_node = (_mi_os_numa_node_count() <= 1 ? -1 : _mi_os_numa_node(tld));
|
||||||
// try to claim in existing regions
|
// try to claim in existing regions
|
||||||
if (!mi_region_try_claim(numa_node, blocks, *is_large, ®ion, &bit_idx, tld)) {
|
if (!mi_region_try_claim(numa_node, blocks, *large, ®ion, &bit_idx, tld)) {
|
||||||
// otherwise try to allocate a fresh region and claim in there
|
// otherwise try to allocate a fresh region and claim in there
|
||||||
if (!mi_region_try_alloc_os(blocks, *commit, *is_large, ®ion, &bit_idx, tld)) {
|
if (!mi_region_try_alloc_os(blocks, *commit, *large, ®ion, &bit_idx, tld)) {
|
||||||
// out of regions or memory
|
// out of regions or memory
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -282,12 +285,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
mi_region_info_t info;
|
mi_region_info_t info;
|
||||||
info.value = mi_atomic_load_acquire(®ion->info);
|
info.value = mi_atomic_load_acquire(®ion->info);
|
||||||
uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ion->start);
|
uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ion->start);
|
||||||
mi_assert_internal(!(info.x.is_large && !*is_large));
|
mi_assert_internal(!(info.x.is_large && !*large));
|
||||||
mi_assert_internal(start != NULL);
|
mi_assert_internal(start != NULL);
|
||||||
|
|
||||||
*is_zero = _mi_bitmap_claim(®ion->dirty, 1, blocks, bit_idx, NULL);
|
*is_zero = _mi_bitmap_claim(®ion->dirty, 1, blocks, bit_idx, NULL);
|
||||||
*is_large = info.x.is_large;
|
*large = info.x.is_large;
|
||||||
*memid = mi_memid_create(region, bit_idx);
|
*is_pinned = info.x.is_pinned;
|
||||||
|
*memid = mi_memid_create(region, bit_idx);
|
||||||
void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
|
void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
|
||||||
|
|
||||||
// commit
|
// commit
|
||||||
|
@ -296,7 +300,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
bool any_uncommitted;
|
bool any_uncommitted;
|
||||||
_mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted);
|
_mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted);
|
||||||
if (any_uncommitted) {
|
if (any_uncommitted) {
|
||||||
mi_assert_internal(!info.x.is_large);
|
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
|
||||||
bool commit_zero;
|
bool commit_zero;
|
||||||
_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld);
|
_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld);
|
||||||
if (commit_zero) *is_zero = true;
|
if (commit_zero) *is_zero = true;
|
||||||
|
@ -311,7 +315,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
// unreset reset blocks
|
// unreset reset blocks
|
||||||
if (_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
|
if (_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
|
||||||
// some blocks are still reset
|
// some blocks are still reset
|
||||||
mi_assert_internal(!info.x.is_large);
|
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
|
||||||
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
||||||
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
||||||
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
||||||
|
@ -338,12 +342,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
|
|
||||||
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
|
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
|
||||||
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
|
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
|
||||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(memid != NULL && tld != NULL);
|
mi_assert_internal(memid != NULL && tld != NULL);
|
||||||
mi_assert_internal(size > 0);
|
mi_assert_internal(size > 0);
|
||||||
*memid = 0;
|
*memid = 0;
|
||||||
*is_zero = false;
|
*is_zero = false;
|
||||||
|
*is_pinned = false;
|
||||||
bool default_large = false;
|
bool default_large = false;
|
||||||
if (large==NULL) large = &default_large; // ensure `large != NULL`
|
if (large==NULL) large = &default_large; // ensure `large != NULL`
|
||||||
if (size == 0) return NULL;
|
if (size == 0) return NULL;
|
||||||
|
@ -354,14 +359,14 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
|
||||||
size_t arena_memid;
|
size_t arena_memid;
|
||||||
const size_t blocks = mi_region_block_count(size);
|
const size_t blocks = mi_region_block_count(size);
|
||||||
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) {
|
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) {
|
||||||
p = mi_region_try_alloc(blocks, commit, large, is_zero, memid, tld);
|
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
_mi_warning_message("unable to allocate from region: size %zu\n", size);
|
_mi_warning_message("unable to allocate from region: size %zu\n", size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
// and otherwise fall back to the OS
|
// and otherwise fall back to the OS
|
||||||
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_zero, &arena_memid, tld);
|
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, &arena_memid, tld);
|
||||||
*memid = mi_memid_create_from_arena(arena_memid);
|
*memid = mi_memid_create_from_arena(arena_memid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -418,7 +423,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset the blocks to reduce the working set.
|
// reset the blocks to reduce the working set.
|
||||||
if (!info.x.is_large && mi_option_is_enabled(mi_option_segment_reset)
|
if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
|
||||||
&& (mi_option_is_enabled(mi_option_eager_commit) ||
|
&& (mi_option_is_enabled(mi_option_eager_commit) ||
|
||||||
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
||||||
{
|
{
|
||||||
|
|
|
@ -231,7 +231,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
|
||||||
static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) {
|
static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(page->is_committed);
|
mi_assert_internal(page->is_committed);
|
||||||
if (!mi_option_is_enabled(mi_option_page_reset)) return;
|
if (!mi_option_is_enabled(mi_option_page_reset)) return;
|
||||||
if (segment->mem_is_fixed || page->segment_in_use || !page->is_committed || page->is_reset) return;
|
if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
|
||||||
size_t psize;
|
size_t psize;
|
||||||
void* start = mi_segment_raw_page_start(segment, page, &psize);
|
void* start = mi_segment_raw_page_start(segment, page, &psize);
|
||||||
page->is_reset = true;
|
page->is_reset = true;
|
||||||
|
@ -244,8 +244,8 @@ static bool mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size,
|
||||||
{
|
{
|
||||||
mi_assert_internal(page->is_reset);
|
mi_assert_internal(page->is_reset);
|
||||||
mi_assert_internal(page->is_committed);
|
mi_assert_internal(page->is_committed);
|
||||||
mi_assert_internal(!segment->mem_is_fixed);
|
mi_assert_internal(!segment->mem_is_pinned);
|
||||||
if (segment->mem_is_fixed || !page->is_committed || !page->is_reset) return true;
|
if (segment->mem_is_pinned || !page->is_committed || !page->is_reset) return true;
|
||||||
page->is_reset = false;
|
page->is_reset = false;
|
||||||
size_t psize;
|
size_t psize;
|
||||||
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
|
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
|
||||||
|
@ -284,7 +284,7 @@ static void mi_pages_reset_add(mi_segment_t* segment, mi_page_t* page, mi_segmen
|
||||||
mi_assert_expensive(!mi_pages_reset_contains(page, tld));
|
mi_assert_expensive(!mi_pages_reset_contains(page, tld));
|
||||||
mi_assert_internal(_mi_page_segment(page)==segment);
|
mi_assert_internal(_mi_page_segment(page)==segment);
|
||||||
if (!mi_option_is_enabled(mi_option_page_reset)) return;
|
if (!mi_option_is_enabled(mi_option_page_reset)) return;
|
||||||
if (segment->mem_is_fixed || page->segment_in_use || !page->is_committed || page->is_reset) return;
|
if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
|
||||||
|
|
||||||
if (mi_option_get(mi_option_reset_delay) == 0) {
|
if (mi_option_get(mi_option_reset_delay) == 0) {
|
||||||
// reset immediately?
|
// reset immediately?
|
||||||
|
@ -324,7 +324,7 @@ static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool force_reset, mi_segments_tld_t* tld) {
|
static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool force_reset, mi_segments_tld_t* tld) {
|
||||||
if (segment->mem_is_fixed) return; // never reset in huge OS pages
|
if (segment->mem_is_pinned) return; // never reset in huge OS pages
|
||||||
for (size_t i = 0; i < segment->capacity; i++) {
|
for (size_t i = 0; i < segment->capacity; i++) {
|
||||||
mi_page_t* page = &segment->pages[i];
|
mi_page_t* page = &segment->pages[i];
|
||||||
if (!page->segment_in_use && page->is_committed && !page->is_reset) {
|
if (!page->segment_in_use && page->is_committed && !page->is_reset) {
|
||||||
|
@ -458,7 +458,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
||||||
segment->thread_id = 0;
|
segment->thread_id = 0;
|
||||||
mi_segments_track_size(-((long)segment_size),tld);
|
mi_segments_track_size(-((long)segment_size),tld);
|
||||||
if (MI_SECURE != 0) {
|
if (MI_SECURE != 0) {
|
||||||
mi_assert_internal(!segment->mem_is_fixed);
|
mi_assert_internal(!segment->mem_is_pinned);
|
||||||
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
|
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -587,7 +587,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (MI_SECURE!=0) {
|
if (MI_SECURE!=0) {
|
||||||
mi_assert_internal(!segment->mem_is_fixed);
|
mi_assert_internal(!segment->mem_is_pinned);
|
||||||
mi_segment_protect(segment, false, tld->os); // reset protection if the page kind differs
|
mi_segment_protect(segment, false, tld->os); // reset protection if the page kind differs
|
||||||
}
|
}
|
||||||
// different page kinds; unreset any reset pages, and unprotect
|
// different page kinds; unreset any reset pages, and unprotect
|
||||||
|
@ -615,10 +615,12 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
||||||
// Allocate the segment from the OS
|
// Allocate the segment from the OS
|
||||||
size_t memid;
|
size_t memid;
|
||||||
bool mem_large = (!eager_delayed && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
|
bool mem_large = (!eager_delayed && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
|
||||||
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld);
|
bool is_pinned = false;
|
||||||
|
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
|
||||||
if (segment == NULL) return NULL; // failed to allocate
|
if (segment == NULL) return NULL; // failed to allocate
|
||||||
if (!commit) {
|
if (!commit) {
|
||||||
// ensure the initial info is committed
|
// ensure the initial info is committed
|
||||||
|
mi_assert_internal(!mem_large && !is_pinned);
|
||||||
bool commit_zero = false;
|
bool commit_zero = false;
|
||||||
bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
|
bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
|
||||||
if (commit_zero) is_zero = true;
|
if (commit_zero) is_zero = true;
|
||||||
|
@ -629,12 +631,12 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
segment->memid = memid;
|
segment->memid = memid;
|
||||||
segment->mem_is_fixed = mem_large;
|
segment->mem_is_pinned = (mem_large || is_pinned);
|
||||||
segment->mem_is_committed = commit;
|
segment->mem_is_committed = commit;
|
||||||
mi_segments_track_size((long)segment_size, tld);
|
mi_segments_track_size((long)segment_size, tld);
|
||||||
}
|
}
|
||||||
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||||
mi_assert_internal(segment->mem_is_fixed ? segment->mem_is_committed : true);
|
mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
|
||||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
|
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
|
||||||
if (!pages_still_good) {
|
if (!pages_still_good) {
|
||||||
// zero the segment info (but not the `mem` fields)
|
// zero the segment info (but not the `mem` fields)
|
||||||
|
@ -719,7 +721,7 @@ static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg
|
||||||
mi_pages_reset_remove(page, tld);
|
mi_pages_reset_remove(page, tld);
|
||||||
// check commit
|
// check commit
|
||||||
if (!page->is_committed) {
|
if (!page->is_committed) {
|
||||||
mi_assert_internal(!segment->mem_is_fixed);
|
mi_assert_internal(!segment->mem_is_pinned);
|
||||||
mi_assert_internal(!page->is_reset);
|
mi_assert_internal(!page->is_reset);
|
||||||
size_t psize;
|
size_t psize;
|
||||||
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
|
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
|
||||||
|
@ -736,7 +738,7 @@ static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg
|
||||||
segment->used++;
|
segment->used++;
|
||||||
// check reset
|
// check reset
|
||||||
if (page->is_reset) {
|
if (page->is_reset) {
|
||||||
mi_assert_internal(!segment->mem_is_fixed);
|
mi_assert_internal(!segment->mem_is_pinned);
|
||||||
bool ok = mi_page_unreset(segment, page, 0, tld);
|
bool ok = mi_page_unreset(segment, page, 0, tld);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
page->segment_in_use = false;
|
page->segment_in_use = false;
|
||||||
|
|
Loading…
Add table
Reference in a new issue