remove os_tld and stats parameters to os interface

This commit is contained in:
daanx 2024-12-08 17:56:13 -08:00
parent d9a2f76ff7
commit c8607a8d01
12 changed files with 270 additions and 294 deletions

View file

@ -186,7 +186,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
if (p != NULL) return p;
// or fall back to the OS
p = _mi_os_alloc(size, memid, &_mi_stats_main);
p = _mi_os_alloc(size, memid);
if (p == NULL) return NULL;
// zero the OS memory if needed
@ -199,7 +199,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
if (mi_memkind_is_os(memid.memkind)) {
_mi_os_free(p, size, memid, &_mi_stats_main);
_mi_os_free(p, size, memid);
}
else {
mi_assert(memid.memkind == MI_MEM_STATIC);
@ -216,10 +216,10 @@ void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
----------------------------------------------------------- */
// claim the `blocks_inuse` bits
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
{
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) {
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true;
};
@ -232,13 +232,13 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
bool commit, mi_memid_t* memid)
{
MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
mi_bitmap_index_t bitmap_index;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it!
void* p = mi_arena_block_start(arena, bitmap_index);
@ -268,7 +268,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) {
bool commit_zero = false;
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero)) {
memid->initially_committed = false;
}
else {
@ -286,7 +286,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid )
{
MI_UNUSED_RELEASE(alignment);
mi_assert(alignment <= MI_SEGMENT_ALIGN);
@ -307,7 +307,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
}
// try to allocate
void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid);
mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
return p;
}
@ -316,7 +316,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
mi_arena_id_t req_arena_id, mi_memid_t* memid )
{
MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -326,21 +326,21 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) {
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
else {
// try numa affine allocation
for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
// try from another numa node instead..
if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
@ -385,18 +385,18 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
mi_assert_internal(memid != NULL && tld != NULL);
mi_assert_internal(memid != NULL);
mi_assert_internal(size > 0);
*memid = _mi_memid_none();
const int numa_node = _mi_os_numa_node(tld); // current numa node
const int numa_node = _mi_os_numa_node(); // current numa node
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed?
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena
@ -405,7 +405,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
// and try allocate in there
mi_assert_internal(req_arena_id == _mi_arena_id_none());
p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
@ -420,16 +420,16 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// finally, fall back to the OS
if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
}
else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
}
}
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid);
}
@ -455,7 +455,7 @@ static long mi_arena_purge_delay(void) {
// reset or decommit in an arena and update the committed/decommit bitmaps
// assumes we own the area (i.e. blocks_in_use is claimed by us)
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned);
@ -464,7 +464,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats);
needs_recommit = _mi_os_purge(p, size);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
@ -472,7 +472,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */);
if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); }
}
@ -486,14 +486,14 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
// Note: assumes we (still) own the area as we may purge immediately
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_purge != NULL);
const long delay = mi_arena_purge_delay();
if (delay < 0) return; // is purging allowed at all?
if (_mi_preloading() || delay == 0) {
// decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats);
mi_arena_purge(arena, bitmap_idx, blocks);
}
else {
// schedule decommit
@ -511,7 +511,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
// purge a range of blocks
// return true if the full range was purged.
// assumes we own the area (i.e. blocks_in_use is claimed by us)
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) {
const size_t endidx = startidx + bitlen;
size_t bitidx = startidx;
bool all_purged = false;
@ -524,7 +524,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
if (count > 0) {
// found range to be purged
const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
mi_arena_purge(arena, range_idx, count, stats);
mi_arena_purge(arena, range_idx, count);
if (count == bitlen) {
all_purged = true;
}
@ -535,7 +535,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
}
// returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
{
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
@ -571,7 +571,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
if (bitlen > 0) {
// read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) {
full_purge = false;
}
any_purged = true;
@ -591,7 +591,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
return any_purged;
}
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
static void mi_arenas_try_purge( bool force, bool visit_all ) {
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
@ -606,7 +606,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) {
if (mi_arena_try_purge(arena, now, force, stats)) {
if (mi_arena_try_purge(arena, now, force)) {
if (max_purge_count <= 1) break;
max_purge_count--;
}
@ -620,8 +620,8 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free
----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) {
mi_assert_internal(size > 0);
mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
@ -636,7 +636,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
}
_mi_os_free(p, size, memid, stats);
_mi_os_free(p, size, memid);
}
else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena
@ -681,7 +681,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts).
}
// (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
mi_arena_schedule_purge(arena, bitmap_idx, blocks);
}
// and make it available to others again
@ -697,7 +697,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
}
// purge expired decommits
mi_arenas_try_purge(false, false, stats);
mi_arenas_try_purge(false, false);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
@ -711,7 +711,7 @@ static void mi_arenas_unsafe_destroy(void) {
mi_lock_done(&arena->abandoned_visit_lock);
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid);
}
else {
new_max_arena = i;
@ -726,15 +726,15 @@ static void mi_arenas_unsafe_destroy(void) {
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats);
void _mi_arenas_collect(bool force_purge) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
void _mi_arena_unsafe_destroy_all(void) {
mi_arenas_unsafe_destroy();
_mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas
_mi_arenas_collect(true /* force purge */); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
@ -838,11 +838,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
mi_memid_t memid;
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field?
if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
_mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
_mi_os_free_ex(start, size, commit, memid);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
}
@ -938,7 +938,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
_mi_os_free(p, hsize, memid, &_mi_stats_main);
_mi_os_free(p, hsize, memid);
return ENOMEM;
}
return 0;