merge from dev

This commit is contained in:
daanx 2023-04-16 11:01:25 -07:00
commit b0104ef4fd
6 changed files with 207 additions and 204 deletions

View file

@ -80,10 +80,11 @@ extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty; extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void); bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void); size_t _mi_current_thread_count(void);
bool _mi_preloading(void); // true while the C runtime is not ready bool _mi_preloading(void); // true while the C runtime is not initialized yet
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
void _mi_thread_done(mi_heap_t* heap); void _mi_thread_done(mi_heap_t* heap);
void _mi_thread_data_collect(void);
// os.c // os.c
void _mi_os_init(void); // called from process init void _mi_os_init(void); // called from process init
@ -116,12 +117,13 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
// arena.c // arena.c
mi_arena_id_t _mi_arena_id_none(void); mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed, mi_stats_t* stats); void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
bool _mi_arena_contains(const void* p); bool _mi_arena_contains(const void* p);
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
// "segment-map.c" // "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment); void _mi_segment_map_allocated_at(const mi_segment_t* segment);
@ -171,8 +173,8 @@ uint8_t _mi_bin(size_t size); // for stats
void _mi_heap_destroy_pages(mi_heap_t* heap); void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap); void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap); void _mi_heap_set_default_direct(mi_heap_t* heap);
void _mi_heap_destroy_all(void);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
void _mi_heap_unsafe_destroy_all(void);
// "stats.c" // "stats.c"
void _mi_stats_done(mi_stats_t* stats); void _mi_stats_done(mi_stats_t* stats);

View file

@ -369,16 +369,33 @@ typedef int64_t mi_msecs_t;
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this. // Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
typedef enum mi_memkind_e { typedef enum mi_memkind_e {
MI_MEM_NONE, MI_MEM_NONE, // not allocated
MI_MEM_OS, MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
MI_MEM_STATIC, MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
MI_MEM_ARENA MI_MEM_OS, // allocated from the OS
MI_MEM_ARENA // allocated from an arena (the usual case)
} mi_memkind_t; } mi_memkind_t;
typedef struct mi_memid_os_info {
size_t alignment; // allocated with the given alignment
size_t align_offset; // the offset that was aligned (used only for huge aligned pages)
} mi_memid_os_info_t;
typedef struct mi_memid_arena_info {
size_t block_index; // index in the arena
mi_arena_id_t id; // arena id (>= 1)
bool is_exclusive; // the arena can only be used for specific arena allocations
} mi_memid_arena_info_t;
typedef struct mi_memid_s { typedef struct mi_memid_s {
size_t arena_idx; union {
mi_arena_id_t arena_id; mi_memid_os_info_t os; // only used for MI_MEM_OS
bool arena_is_exclusive; mi_memid_arena_info_t arena;// only used for MI_MEM_ARENA
} mem;
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
bool is_large; // `true` if the memory is in OS large (2MiB) or huge (1GiB) pages. (`is_pinned` will be true)
bool was_committed; // `true` if the memory was originally allocated as committed
bool was_zero; // `true` if the memory was originally zero initialized
mi_memkind_t memkind; mi_memkind_t memkind;
} mi_memid_t; } mi_memid_t;
@ -387,15 +404,13 @@ typedef struct mi_memid_s {
// the OS. Inside segments we allocated fixed size _pages_ that // the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks. // contain blocks.
typedef struct mi_segment_s { typedef struct mi_segment_s {
// constant fields
mi_memid_t memid; // memory id for arena allocation mi_memid_t memid; // memory id for arena allocation
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_large; // in large/huge os pages?
bool mem_is_committed; // `true` if the whole segment is eagerly committed
size_t mem_alignment; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
size_t mem_align_offset; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
bool allow_decommit; bool allow_decommit;
bool allow_purge; bool allow_purge;
size_t segment_size;
// segment fields
mi_msecs_t purge_expire; mi_msecs_t purge_expire;
mi_commit_mask_t purge_mask; mi_commit_mask_t purge_mask;
mi_commit_mask_t commit_mask; mi_commit_mask_t commit_mask;

View file

@ -46,7 +46,7 @@ typedef struct mi_arena_s {
_Atomic(uint8_t*) start; // the start of the memory area _Atomic(uint8_t*) start; // the start of the memory area
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`) size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
size_t meta_size; // size of the arena structure itself including the bitmaps size_t meta_size; // size of the arena structure itself (including its bitmaps)
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node int numa_node; // associated NUMA node
bool is_zero_init; // is the arena zero initialized? bool is_zero_init; // is the arena zero initialized?
@ -96,57 +96,45 @@ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclus
memory id's memory id's
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_memid_t mi_arena_memid_none(void) { static mi_memid_t mi_memid_none(void) {
mi_memid_t memid; mi_memid_t memid;
_mi_memzero(&memid, sizeof(memid));
memid.memkind = MI_MEM_NONE; memid.memkind = MI_MEM_NONE;
memid.arena_id = 0;
memid.arena_idx = 0;
memid.arena_is_exclusive = false;
return memid; return memid;
} }
static mi_memid_t mi_arena_memid_os(void) { static mi_memid_t mi_memid_create(mi_memkind_t memkind) {
mi_memid_t memid = mi_arena_memid_none(); mi_memid_t memid = mi_memid_none();
memid.memkind = memkind;
return memid;
}
static mi_memid_t mi_memid_create_os(bool committed) {
mi_memid_t memid = mi_memid_none();
memid.memkind = MI_MEM_OS; memid.memkind = MI_MEM_OS;
return memid; memid.was_committed = committed;
}
static mi_memid_t mi_arena_memid_static(void) {
mi_memid_t memid = mi_arena_memid_none();
memid.memkind = MI_MEM_STATIC;
return memid; return memid;
} }
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) { bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
// note: works also for OS and STATIC memory with a zero arena_id. if (memid.memkind == MI_MEM_ARENA) {
return mi_arena_id_is_suitable(memid.arena_id, memid.arena_is_exclusive, request_arena_id); return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
}
else {
return mi_arena_id_is_suitable(0, false, request_arena_id);
}
} }
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) { bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
return (memid.memkind == MI_MEM_OS); return (memid.memkind == MI_MEM_OS);
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index. lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_memid_t mi_arena_memid_create(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
mi_memid_t memid;
memid.memkind = MI_MEM_ARENA;
memid.arena_id = id;
memid.arena_idx = bitmap_index;
memid.arena_is_exclusive = is_exclusive;
return memid;
}
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
*arena_index = mi_arena_id_index(memid.arena_id);
*bitmap_index = memid.arena_idx;
return memid.arena_is_exclusive;
}
static size_t mi_block_count_of_size(size_t size) { static size_t mi_block_count_of_size(size_t size) {
return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE); return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
} }
@ -159,6 +147,22 @@ static size_t mi_arena_size(mi_arena_t* arena) {
return mi_arena_block_size(arena->block_count); return mi_arena_block_size(arena->block_count);
} }
static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
mi_memid_t memid = mi_memid_create(MI_MEM_ARENA);
memid.mem.arena.id = id;
memid.mem.arena.block_index = bitmap_index;
memid.mem.arena.is_exclusive = is_exclusive;
return memid;
}
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
*arena_index = mi_arena_id_index(memid.mem.arena.id);
*bitmap_index = memid.mem.arena.block_index;
return memid.mem.arena.is_exclusive;
}
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Special static area for mimalloc internal structures Special static area for mimalloc internal structures
@ -172,7 +176,7 @@ static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
static _Atomic(size_t) mi_arena_static_top; static _Atomic(size_t) mi_arena_static_top;
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) { static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
*memid = mi_arena_memid_static(); *memid = mi_memid_none();
if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL; if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
if (mi_atomic_load_relaxed(&mi_arena_static_top) >= MI_ARENA_STATIC_MAX) return NULL; if (mi_atomic_load_relaxed(&mi_arena_static_top) >= MI_ARENA_STATIC_MAX) return NULL;
@ -189,7 +193,7 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m
} }
// success // success
*memid = mi_arena_memid_static(); *memid = mi_memid_create(MI_MEM_STATIC);
const size_t start = _mi_align_up(oldtop, alignment); const size_t start = _mi_align_up(oldtop, alignment);
uint8_t* const p = &mi_arena_static[start]; uint8_t* const p = &mi_arena_static[start];
_mi_memzero(p, size); _mi_memzero(p, size);
@ -197,20 +201,17 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m
} }
static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
*memid = mi_arena_memid_none(); *memid = mi_memid_none();
// try static // try static
void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid); void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
if (p != NULL) { if (p != NULL) return p;
*memid = mi_arena_memid_static();
return p;
}
// or fall back to the OS // or fall back to the OS
bool is_zero = false; bool is_zero = false;
p = _mi_os_alloc(size, &is_zero, stats); p = _mi_os_alloc(size, &is_zero, stats);
if (p != NULL) { if (p != NULL) {
*memid = mi_arena_memid_os(); *memid = mi_memid_create_os(true);
if (!is_zero) { _mi_memzero(p, size); } if (!is_zero) { _mi_memzero(p, size); }
return p; return p;
} }
@ -218,7 +219,7 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st
return NULL; return NULL;
} }
static void mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) { static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) {
if (memid.memkind == MI_MEM_OS) { if (memid.memkind == MI_MEM_OS) {
_mi_os_free(p, size, stats); _mi_os_free(p, size, stats);
} }
@ -246,8 +247,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool* commit, bool* large, bool* is_pinned, bool* is_zero, bool commit, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{ {
MI_UNUSED(arena_index); MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
@ -258,9 +258,9 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
// claimed it! // claimed it!
void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index)); void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index));
*memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index); *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
*large = arena->is_large; memid->is_large = arena->is_large;
*is_pinned = (arena->is_large || !arena->allow_decommit); memid->is_pinned = (arena->is_large || !arena->allow_decommit);
// none of the claimed blocks should be scheduled for a decommit // none of the claimed blocks should be scheduled for a decommit
if (arena->blocks_purge != NULL) { if (arena->blocks_purge != NULL) {
@ -269,35 +269,40 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
} }
// set the dirty bits (todo: no need for an atomic op here?) // set the dirty bits (todo: no need for an atomic op here?)
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); memid->was_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
// set commit state // set commit state
if (arena->blocks_committed == NULL) { if (arena->blocks_committed == NULL) {
// always committed // always committed
*commit = true; memid->was_committed = true;
} }
else if (*commit) { else if (commit) {
// commit requested, but the range may not be committed as a whole: ensure it is committed now // commit requested, but the range may not be committed as a whole: ensure it is committed now
memid->was_committed = true;
bool any_uncommitted; bool any_uncommitted;
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted); _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) { if (any_uncommitted) {
bool commit_zero; bool commit_zero = false;
_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats); if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
if (commit_zero) { *is_zero = true; } memid->was_committed = false;
}
else {
if (commit_zero) { memid->was_zero = true; }
}
} }
} }
else { else {
// no need to commit, but check if already fully committed // no need to commit, but check if already fully committed
*commit = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); memid->was_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
} }
mi_track_mem_undefined(p,needed_bcount*MI_ARENA_BLOCK_SIZE); // todo: should not be needed? mi_track_mem_undefined(p, mi_arena_block_size(needed_bcount)); // todo: should not be needed?
return p; return p;
} }
// allocate in a speficic arena // allocate in a speficic arena
static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment, static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
bool* commit, bool* large, bool* is_pinned, bool* is_zero, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
MI_UNUSED_RELEASE(alignment); MI_UNUSED_RELEASE(alignment);
@ -312,14 +317,14 @@ static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]); mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
if (arena == NULL) return NULL; if (arena == NULL) return NULL;
if (arena->numa_node >= 0 && arena->numa_node != numa_node) return NULL; if (arena->numa_node >= 0 && arena->numa_node != numa_node) return NULL;
if (!(*large) && arena->is_large) return NULL; if (!allow_large && arena->is_large) return NULL;
return mi_arena_alloc_at(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); return mi_arena_alloc_at(arena, arena_index, bcount, commit, req_arena_id, memid, tld);
} }
// allocate from an arena with fallback to the OS // allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment,
bool* is_pinned, bool* is_zero, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
MI_UNUSED(alignment); MI_UNUSED(alignment);
@ -335,9 +340,9 @@ static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
if ((arena != NULL) && if ((arena != NULL) &&
// (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local? // (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages (allow_large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
{ {
void* p = mi_arena_alloc_at(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); void* p = mi_arena_alloc_at(arena, arena_index, bcount, commit, req_arena_id, memid, tld);
mi_assert_internal((uintptr_t)p % alignment == 0); mi_assert_internal((uintptr_t)p % alignment == 0);
if (p != NULL) return p; if (p != NULL) return p;
} }
@ -348,9 +353,9 @@ static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL && if (arena != NULL &&
(arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local? (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages (allow_large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
{ {
void* p = mi_arena_alloc_at(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); void* p = mi_arena_alloc_at(arena, i, bcount, commit, req_arena_id, memid, tld);
mi_assert_internal((uintptr_t)p % alignment == 0); mi_assert_internal((uintptr_t)p % alignment == 0);
if (p != NULL) return p; if (p != NULL) return p;
} }
@ -361,9 +366,9 @@ static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL && if (arena != NULL &&
(arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local! (arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local!
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages (allow_large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
{ {
void* p = mi_arena_alloc_at(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); void* p = mi_arena_alloc_at(arena, i, bcount, commit, req_arena_id, memid, tld);
mi_assert_internal((uintptr_t)p % alignment == 0); mi_assert_internal((uintptr_t)p % alignment == 0);
if (p != NULL) return p; if (p != NULL) return p;
} }
@ -402,48 +407,53 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
} }
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{ {
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL); mi_assert_internal(memid != NULL && tld != NULL);
mi_assert_internal(size > 0); mi_assert_internal(size > 0);
*memid = mi_arena_memid_none(); *memid = mi_memid_none();
*is_zero = false;
*is_pinned = false;
bool default_large = false;
if (large == NULL) large = &default_large; // ensure `large != NULL`
const int numa_node = _mi_os_numa_node(tld); // current numa node const int numa_node = _mi_os_numa_node(tld); // current numa node
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arenas_alloc(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); void* p = mi_arenas_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena // otherwise, try to first eagerly reserve a new arena
mi_arena_id_t arena_id = 0; mi_arena_id_t arena_id = 0;
if (mi_arena_reserve(size,*large,req_arena_id,&arena_id)) { if (mi_arena_reserve(size,allow_large,req_arena_id,&arena_id)) {
// and try allocate in there // and try allocate in there
p = mi_arena_alloc_at_id(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); p = mi_arena_alloc_at_id(arena_id, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p; if (p != NULL) return p;
} }
} }
// finally, fall back to the OS // if we cannot use OS allocation, return NULL
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) { if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
errno = ENOMEM; errno = ENOMEM;
return NULL; return NULL;
} }
*memid = mi_arena_memid_os(); // finally, fall back to the OS
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, is_zero, tld->stats); bool os_large = allow_large;
if (p != NULL) { *is_pinned = *large; } bool os_is_zero = false;
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, commit, &os_large, &os_is_zero, tld->stats);
if (p != NULL) {
*memid = mi_memid_create_os(commit);
memid->is_large = os_large;
memid->is_pinned = os_large;
memid->was_zero = os_is_zero;
memid->mem.os.alignment = alignment;
memid->mem.os.align_offset = align_offset;
}
return p; return p;
} }
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{ {
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
} }
@ -623,27 +633,28 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free Arena free
----------------------------------------------------------- */ ----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed_size, mi_stats_t* stats) { void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL); mi_assert_internal(size > 0 && stats != NULL);
mi_assert_internal(committed_size <= size); mi_assert_internal(committed_size <= size);
if (p==NULL) return; if (p==NULL) return;
if (size==0) return; if (size==0) return;
const bool all_committed = (committed_size == size); const bool all_committed = (committed_size == size);
if (memid.memkind == MI_MEM_STATIC) { if (memid.memkind == MI_MEM_OS) {
// nothing to do
}
else if (memid.memkind == MI_MEM_OS) {
// was a direct OS allocation, pass through // was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) { if (!all_committed && committed_size > 0) {
// if partially committed, adjust the committed stats // if partially committed, adjust the committed stats
_mi_stat_decrease(&stats->committed, committed_size); _mi_stat_decrease(&stats->committed, committed_size);
} }
_mi_os_free_aligned(p, size, alignment, align_offset, all_committed, stats); if (memid.mem.os.align_offset != 0) {
_mi_os_free_aligned(p, size, memid.mem.os.alignment, memid.mem.os.align_offset, all_committed, stats);
} }
else { else {
_mi_os_free(p, size, stats);
}
}
else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena // allocated in an arena
mi_assert_internal(align_offset == 0);
size_t arena_idx; size_t arena_idx;
size_t bitmap_idx; size_t bitmap_idx;
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx); mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
@ -698,6 +709,10 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
return; return;
}; };
} }
else {
// arena was none, external, or static; nothing to do
mi_assert_internal(memid.memkind <= MI_MEM_STATIC);
}
// purge expired decommits // purge expired decommits
mi_arenas_try_purge(false, false, stats); mi_arenas_try_purge(false, false, stats);
@ -705,7 +720,7 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory. // for dynamic libraries that are unloaded and need to release all their allocated memory.
static void mi_arenas_destroy(void) { static void mi_arenas_unsafe_destroy(void) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
size_t new_max_arena = 0; size_t new_max_arena = 0;
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
@ -719,11 +734,11 @@ static void mi_arenas_destroy(void) {
else { else {
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main); _mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
} }
mi_arena_meta_free(arena, arena->meta_size, arena->meta_memid, &_mi_stats_main);
} }
else { else {
new_max_arena = i; new_max_arena = i;
} }
mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main);
} }
} }
@ -732,15 +747,19 @@ static void mi_arenas_destroy(void) {
mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena); mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
} }
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats) { void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
if (free_arenas) { mi_arenas_try_purge(force_purge, true /* visit all */, stats);
mi_arenas_destroy();
}
mi_arenas_try_purge(force_decommit, true, stats);
} }
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
mi_arenas_unsafe_destroy();
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
bool _mi_arena_contains(const void* p) { bool _mi_arena_contains(const void* p) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {

View file

@ -154,8 +154,8 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
// collect abandoned segments (in particular, decommit expired parts of segments in the abandoned segment list) // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
// note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
// collect segment local caches // collect segment local caches
@ -165,7 +165,8 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// collect regions on program-exit (or shared library unload) // collect regions on program-exit (or shared library unload)
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) { if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
_mi_arena_collect(false /* destroy arenas */, true /* force purge */, &heap->tld->stats); _mi_thread_data_collect(); // collect thread data cache
_mi_arena_collect(true /* force purge */, &heap->tld->stats);
} }
} }
@ -361,7 +362,8 @@ void mi_heap_destroy(mi_heap_t* heap) {
} }
} }
void _mi_heap_destroy_all(void) { // forcefully destroy all heaps in the current thread
void _mi_heap_unsafe_destroy_all(void) {
mi_heap_t* bheap = mi_heap_get_backing(); mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* curr = bheap->tld->heaps; mi_heap_t* curr = bheap->tld->heaps;
while (curr != NULL) { while (curr != NULL) {

View file

@ -203,7 +203,6 @@ mi_heap_t* _mi_heap_main_get(void) {
typedef struct mi_thread_data_s { typedef struct mi_thread_data_s {
mi_heap_t heap; // must come first due to cast in `_mi_heap_done` mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
mi_tld_t tld; mi_tld_t tld;
mi_memid_t memid;
} mi_thread_data_t; } mi_thread_data_t;
@ -212,7 +211,7 @@ typedef struct mi_thread_data_s {
// destroy many OS threads, this may causes too much overhead // destroy many OS threads, this may causes too much overhead
// per thread so we maintain a small cache of recently freed metadata. // per thread so we maintain a small cache of recently freed metadata.
#define TD_CACHE_SIZE (8) #define TD_CACHE_SIZE (16)
static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE]; static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
static mi_thread_data_t* mi_thread_data_zalloc(void) { static mi_thread_data_t* mi_thread_data_zalloc(void) {
@ -264,7 +263,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
_mi_os_free(tdfree, sizeof(mi_thread_data_t), &_mi_stats_main); _mi_os_free(tdfree, sizeof(mi_thread_data_t), &_mi_stats_main);
} }
static void mi_thread_data_collect(void) { void _mi_thread_data_collect(void) {
// free all thread metadata from the cache // free all thread metadata from the cache
for (int i = 0; i < TD_CACHE_SIZE; i++) { for (int i = 0; i < TD_CACHE_SIZE; i++) {
mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
@ -353,7 +352,6 @@ static bool _mi_heap_done(mi_heap_t* heap) {
mi_thread_data_free((mi_thread_data_t*)heap); mi_thread_data_free((mi_thread_data_t*)heap);
} }
else { else {
mi_thread_data_collect(); // free cached thread metadata
#if 0 #if 0
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc, // never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
// there may still be delete/free calls after the mi_fls_done is called. Issue #207 // there may still be delete/free calls after the mi_fls_done is called. Issue #207
@ -619,7 +617,7 @@ static void mi_cdecl mi_process_done(void) {
_mi_prim_thread_done_auto_done(); _mi_prim_thread_done_auto_done();
#ifndef MI_SKIP_COLLECT_ON_EXIT #ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB) #if (MI_DEBUG || !defined(MI_SHARED_LIB))
// free all memory if possible on process exit. This is not needed for a stand-alone process // free all memory if possible on process exit. This is not needed for a stand-alone process
// but should be done if mimalloc is statically linked into another shared library which // but should be done if mimalloc is statically linked into another shared library which
// is repeatedly loaded/unloaded, see issue #281. // is repeatedly loaded/unloaded, see issue #281.
@ -631,8 +629,9 @@ static void mi_cdecl mi_process_done(void) {
// since after process_done there might still be other code running that calls `free` (like at_exit routines, // since after process_done there might still be other code running that calls `free` (like at_exit routines,
// or C-runtime termination code. // or C-runtime termination code.
if (mi_option_is_enabled(mi_option_destroy_on_exit)) { if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
_mi_heap_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) mi_collect(true /* force */);
_mi_arena_collect(true /* destroy (owned) arenas */, true /* purge the rest */, &_mi_heap_main_get()->tld->stats); _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
} }
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {

View file

@ -396,7 +396,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
_mi_abandoned_await_readers(); // wait until safe to free _mi_abandoned_await_readers(); // wait until safe to free
_mi_arena_free(segment, mi_segment_size(segment), segment->mem_alignment, segment->mem_align_offset, segment->memid, csize, tld->stats); _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats);
} }
// called by threads that are terminating // called by threads that are terminating
@ -797,14 +797,11 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id, static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id,
size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices, size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices,
mi_commit_mask_t* pcommit_mask, mi_commit_mask_t* ppurge_mask, bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
bool* is_zero, bool* pcommit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
{ {
MI_UNUSED(ppurge_mask);
mi_memid_t memid; mi_memid_t memid;
bool mem_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false;
size_t align_offset = 0; size_t align_offset = 0;
size_t alignment = MI_SEGMENT_ALIGN; size_t alignment = MI_SEGMENT_ALIGN;
@ -818,43 +815,41 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
// recalculate due to potential guard pages // recalculate due to potential guard pages
*psegment_slices = mi_segment_calculate_slices(required + extra, ppre_size, pinfo_slices); *psegment_slices = mi_segment_calculate_slices(required + extra, ppre_size, pinfo_slices);
} }
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
mi_segment_t* segment = NULL;
// get from OS const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
if (segment==NULL) { mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, os_tld);
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, pcommit, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld); if (segment == NULL) {
if (segment == NULL) return NULL; // failed to allocate return NULL; // failed to allocate
if (*pcommit) { }
mi_commit_mask_create_full(pcommit_mask);
// ensure metadata part of the segment is committed
mi_commit_mask_t commit_mask;
if (memid.was_committed) {
mi_commit_mask_create_full(&commit_mask);
} }
else { else {
mi_commit_mask_create_empty(pcommit_mask); // at least commit the info slices
const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_assert_internal(commit_needed>0);
mi_commit_mask_create(0, commit_needed, &commit_mask);
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) {
_mi_arena_free(segment,segment_size,0,memid,tld->stats);
return NULL;
} }
} }
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); mi_track_mem_undefined(segment, (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE); // todo: should not be necessary?
mi_assert_internal(commit_needed>0);
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
if (!mi_commit_mask_all_set(pcommit_mask, &commit_needed_mask)) {
// at least commit the info slices
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, is_zero, tld->stats);
if (!ok) return NULL; // failed to commit
mi_commit_mask_set(pcommit_mask, &commit_needed_mask);
}
else if (*is_zero) {
// track zero initialization for valgrind
mi_track_mem_defined(segment, commit_needed * MI_COMMIT_SIZE);
}
segment->memid = memid; segment->memid = memid;
segment->mem_is_pinned = is_pinned; segment->allow_decommit = !memid.is_pinned && !memid.is_large;
segment->mem_is_large = mem_large; segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge);
segment->mem_is_committed = mi_commit_mask_is_full(pcommit_mask); segment->segment_size = segment_size;
segment->mem_alignment = alignment; segment->commit_mask = commit_mask;
segment->mem_align_offset = align_offset; segment->purge_expire = 0;
mi_commit_mask_create_empty(&segment->purge_mask);
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
mi_segments_track_size((long)(segment_size), tld); mi_segments_track_size((long)(segment_size), tld);
_mi_segment_map_allocated_at(segment); _mi_segment_map_allocated_at(segment);
return segment; return segment;
@ -877,50 +872,21 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
bool commit = eager || (required > 0); bool commit = eager || (required > 0);
bool is_zero = false;
mi_commit_mask_t commit_mask;
mi_commit_mask_t purge_mask;
mi_commit_mask_create_empty(&commit_mask);
mi_commit_mask_create_empty(&purge_mask);
// Allocate the segment from the OS // Allocate the segment from the OS
mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id, mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
&segment_slices, &pre_size, &info_slices, &commit_mask, &purge_mask, &segment_slices, &pre_size, &info_slices, commit, tld, os_tld);
&is_zero, &commit, tld, os_tld);
if (segment == NULL) return NULL; if (segment == NULL) return NULL;
// zero the segment info? -- not always needed as it may be zero initialized from the OS // zero the segment info? -- not always needed as it may be zero initialized from the OS
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan if (!segment->memid.was_zero) {
{
ptrdiff_t ofs = offsetof(mi_segment_t, next); ptrdiff_t ofs = offsetof(mi_segment_t, next);
size_t prefix = offsetof(mi_segment_t, slices) - ofs; size_t prefix = offsetof(mi_segment_t, slices) - ofs;
size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more
if (!is_zero) { _mi_memzero((uint8_t*)segment + ofs, zsize);
memset((uint8_t*)segment + ofs, 0, zsize);
}
} }
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed // initialize the rest of the segment info
segment->allow_decommit = !segment->mem_is_pinned && !segment->mem_is_large;
segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge);
if (segment->allow_purge) {
segment->purge_expire = 0; // don't decommit just committed memory // _mi_clock_now() + mi_option_get(mi_option_purge_delay);
segment->purge_mask = purge_mask;
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
#if MI_DEBUG>2
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
mi_assert_internal(!mi_commit_mask_any_set(&segment->purge_mask, &commit_needed_mask));
#endif
}
else {
segment->purge_expire = 0;
mi_commit_mask_create_empty( &segment->purge_mask );
}
// initialize segment info
const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices); const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
segment->segment_slices = segment_slices; segment->segment_slices = segment_slices;
segment->segment_info_slices = info_slices; segment->segment_info_slices = info_slices;
@ -929,7 +895,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
segment->slice_entries = slice_entries; segment->slice_entries = slice_entries;
segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE); segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
// memset(segment->slices, 0, sizeof(mi_slice_t)*(info_slices+1)); // _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1));
_mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment)); _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment));
// set up guard pages // set up guard pages