delete old files

This commit is contained in:
daanx 2024-12-08 09:19:05 -08:00
parent 36bb599873
commit 67cc424ada
8 changed files with 0 additions and 3460 deletions

View file

@ -1,357 +0,0 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#if !defined(MI_IN_ARENA_C)
#error "this file should be included from 'arena.c' (so mi_arena_t is visible)"
// add includes help an IDE
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "bitmap.h"
#endif
// Minimal exports for arena-abandoned.
size_t mi_arena_id_index(mi_arena_id_t id);
mi_arena_t* mi_arena_from_index(size_t idx);
size_t mi_arena_get_count(void);
void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex);
bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index);
/* -----------------------------------------------------------
Abandoned blocks/segments:
_mi_arena_segment_clear_abandoned
_mi_arena_segment_mark_abandoned
This is used to atomically abandon/reclaim segments
(and crosses the arena API but it is convenient to have here).
Abandoned segments still have live blocks; they get reclaimed
when a thread frees a block in it, or when a thread needs a fresh
segment.
Abandoned segments are atomically marked in the `block_abandoned`
bitmap of arenas. Any segments allocated outside arenas are put
in the sub-process `abandoned_os_list`. This list is accessed
using locks but this should be uncommon and generally uncontended.
Reclaim and visiting either scan through the `block_abandoned`
bitmaps of the arena's, or visit the `abandoned_os_list`
A potentially nicer design is to use arena's for everything
and perhaps have virtual arena's to map OS allocated memory
but this would lack the "density" of our current arena's. TBC.
----------------------------------------------------------- */
// reclaim a specific OS abandoned segment; `true` on success.
// sets the thread_id.
static bool mi_arena_segment_os_clear_abandoned(mi_segment_t* segment, bool take_lock) {
mi_assert(segment->memid.memkind != MI_MEM_ARENA);
// not in an arena, remove from list of abandoned os segments
mi_subproc_t* const subproc = segment->subproc;
if (take_lock && !mi_lock_try_acquire(&subproc->abandoned_os_lock)) {
return false; // failed to acquire the lock, we just give up
}
// remove atomically from the abandoned os list (if possible!)
bool reclaimed = false;
mi_segment_t* const next = segment->abandoned_os_next;
mi_segment_t* const prev = segment->abandoned_os_prev;
if (next != NULL || prev != NULL || subproc->abandoned_os_list == segment) {
#if MI_DEBUG>3
// find ourselves in the abandoned list (and check the count)
bool found = false;
size_t count = 0;
for (mi_segment_t* current = subproc->abandoned_os_list; current != NULL; current = current->abandoned_os_next) {
if (current == segment) { found = true; }
count++;
}
mi_assert_internal(found);
mi_assert_internal(count == mi_atomic_load_relaxed(&subproc->abandoned_os_list_count));
#endif
// remove (atomically) from the list and reclaim
if (prev != NULL) { prev->abandoned_os_next = next; }
else { subproc->abandoned_os_list = next; }
if (next != NULL) { next->abandoned_os_prev = prev; }
else { subproc->abandoned_os_list_tail = prev; }
segment->abandoned_os_next = NULL;
segment->abandoned_os_prev = NULL;
mi_atomic_decrement_relaxed(&subproc->abandoned_count);
mi_atomic_decrement_relaxed(&subproc->abandoned_os_list_count);
if (take_lock) { // don't reset the thread_id when iterating
mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
}
reclaimed = true;
}
if (take_lock) { mi_lock_release(&segment->subproc->abandoned_os_lock); }
return reclaimed;
}
// reclaim a specific abandoned segment; `true` on success.
// sets the thread_id.
bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment) {
if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) {
return mi_arena_segment_os_clear_abandoned(segment, true /* take lock */);
}
// arena segment: use the blocks_abandoned bitmap.
size_t arena_idx;
size_t bitmap_idx;
mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx);
mi_arena_t* arena = mi_arena_from_index(arena_idx);
mi_assert_internal(arena != NULL);
// reclaim atomically
bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx);
if (was_marked) {
mi_assert_internal(mi_atomic_load_acquire(&segment->thread_id) == 0);
mi_atomic_decrement_relaxed(&segment->subproc->abandoned_count);
mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
}
// mi_assert_internal(was_marked);
mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
return was_marked;
}
// mark a specific OS segment as abandoned
static void mi_arena_segment_os_mark_abandoned(mi_segment_t* segment) {
mi_assert(segment->memid.memkind != MI_MEM_ARENA);
// not in an arena; we use a list of abandoned segments
mi_subproc_t* const subproc = segment->subproc;
if (!mi_lock_acquire(&subproc->abandoned_os_lock)) {
_mi_error_message(EFAULT, "internal error: failed to acquire the abandoned (os) segment lock to mark abandonment");
// we can continue but cannot visit/reclaim such blocks..
}
else {
// push on the tail of the list (important for the visitor)
mi_segment_t* prev = subproc->abandoned_os_list_tail;
mi_assert_internal(prev == NULL || prev->abandoned_os_next == NULL);
mi_assert_internal(segment->abandoned_os_prev == NULL);
mi_assert_internal(segment->abandoned_os_next == NULL);
if (prev != NULL) { prev->abandoned_os_next = segment; }
else { subproc->abandoned_os_list = segment; }
subproc->abandoned_os_list_tail = segment;
segment->abandoned_os_prev = prev;
segment->abandoned_os_next = NULL;
mi_atomic_increment_relaxed(&subproc->abandoned_os_list_count);
mi_atomic_increment_relaxed(&subproc->abandoned_count);
// and release the lock
mi_lock_release(&subproc->abandoned_os_lock);
}
return;
}
// mark a specific segment as abandoned
// clears the thread_id.
void _mi_arena_segment_mark_abandoned(mi_segment_t* segment)
{
mi_assert_internal(segment->used == segment->abandoned);
mi_atomic_store_release(&segment->thread_id, (uintptr_t)0); // mark as abandoned for multi-thread free's
if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) {
mi_arena_segment_os_mark_abandoned(segment);
return;
}
// segment is in an arena, mark it in the arena `blocks_abandoned` bitmap
size_t arena_idx;
size_t bitmap_idx;
mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx);
mi_arena_t* arena = mi_arena_from_index(arena_idx);
mi_assert_internal(arena != NULL);
// set abandonment atomically
mi_subproc_t* const subproc = segment->subproc; // don't access the segment after setting it abandoned
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
if (was_unmarked) { mi_atomic_increment_relaxed(&subproc->abandoned_count); }
mi_assert_internal(was_unmarked);
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
}
/* -----------------------------------------------------------
Iterate through the abandoned blocks/segments using a cursor.
This is used for reclaiming and abandoned block visiting.
----------------------------------------------------------- */
// start a cursor at a randomized arena
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, bool visit_all, mi_arena_field_cursor_t* current) {
mi_assert_internal(heap == NULL || heap->tld->segments.subproc == subproc);
current->bitmap_idx = 0;
current->subproc = subproc;
current->visit_all = visit_all;
current->hold_visit_lock = false;
const size_t abandoned_count = mi_atomic_load_relaxed(&subproc->abandoned_count);
const size_t abandoned_list_count = mi_atomic_load_relaxed(&subproc->abandoned_os_list_count);
const size_t max_arena = mi_arena_get_count();
if (heap != NULL && heap->arena_id != _mi_arena_id_none()) {
// for a heap that is bound to one arena, only visit that arena
current->start = mi_arena_id_index(heap->arena_id);
current->end = current->start + 1;
current->os_list_count = 0;
}
else {
// otherwise visit all starting at a random location
if (abandoned_count > abandoned_list_count && max_arena > 0) {
current->start = (heap == NULL || max_arena == 0 ? 0 : (mi_arena_id_t)(_mi_heap_random_next(heap) % max_arena));
current->end = current->start + max_arena;
}
else {
current->start = 0;
current->end = 0;
}
current->os_list_count = abandoned_list_count; // max entries to visit in the os abandoned list
}
mi_assert_internal(current->start <= max_arena);
}
void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current) {
if (current->hold_visit_lock) {
mi_lock_release(&current->subproc->abandoned_os_visit_lock);
current->hold_visit_lock = false;
}
}
static mi_segment_t* mi_arena_segment_clear_abandoned_at(mi_arena_t* arena, mi_subproc_t* subproc, mi_bitmap_index_t bitmap_idx) {
// try to reclaim an abandoned segment in the arena atomically
if (!_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) return NULL;
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx);
mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0);
// check that the segment belongs to our sub-process
// note: this is the reason we need the `abandoned_visit` lock in the case abandoned visiting is enabled.
// without the lock an abandoned visit may otherwise fail to visit all abandoned segments in the sub-process.
// for regular reclaim it is fine to miss one sometimes so without abandoned visiting we don't need the `abandoned_visit` lock.
if (segment->subproc != subproc) {
// it is from another sub-process, re-mark it and continue searching
const bool was_zero = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
mi_assert_internal(was_zero); MI_UNUSED(was_zero);
return NULL;
}
else {
// success, we unabandoned a segment in our sub-process
mi_atomic_decrement_relaxed(&subproc->abandoned_count);
return segment;
}
}
static mi_segment_t* mi_arena_segment_clear_abandoned_next_field(mi_arena_field_cursor_t* previous) {
const size_t max_arena = mi_arena_get_count();
size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx);
size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx);
// visit arena's (from the previous cursor)
for (; previous->start < previous->end; previous->start++, field_idx = 0, bit_idx = 0) {
// index wraps around
size_t arena_idx = (previous->start >= max_arena ? previous->start % max_arena : previous->start);
mi_arena_t* arena = mi_arena_from_index(arena_idx);
if (arena != NULL) {
bool has_lock = false;
// visit the abandoned fields (starting at previous_idx)
for (; field_idx < arena->field_count; field_idx++, bit_idx = 0) {
size_t field = mi_atomic_load_relaxed(&arena->blocks_abandoned[field_idx]);
if mi_unlikely(field != 0) { // skip zero fields quickly
// we only take the arena lock if there are actually abandoned segments present
if (!has_lock && mi_option_is_enabled(mi_option_visit_abandoned)) {
has_lock = (previous->visit_all ? mi_lock_acquire(&arena->abandoned_visit_lock) : mi_lock_try_acquire(&arena->abandoned_visit_lock));
if (!has_lock) {
if (previous->visit_all) {
_mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the visitor lock");
}
// skip to next arena
break;
}
}
mi_assert_internal(has_lock || !mi_option_is_enabled(mi_option_visit_abandoned));
// visit each set bit in the field (todo: maybe use `ctz` here?)
for (; bit_idx < MI_BITMAP_FIELD_BITS; bit_idx++) {
// pre-check if the bit is set
size_t mask = ((size_t)1 << bit_idx);
if mi_unlikely((field & mask) == mask) {
mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, bitmap_idx);
if (segment != NULL) {
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); }
previous->bitmap_idx = mi_bitmap_index_create_ex(field_idx, bit_idx + 1); // start at next one for the next iteration
return segment;
}
}
}
}
}
if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); }
}
}
return NULL;
}
static mi_segment_t* mi_arena_segment_clear_abandoned_next_list(mi_arena_field_cursor_t* previous) {
// go through the abandoned_os_list
// we only allow one thread per sub-process to do to visit guarded by the `abandoned_os_visit_lock`.
// The lock is released when the cursor is released.
if (!previous->hold_visit_lock) {
previous->hold_visit_lock = (previous->visit_all ? mi_lock_acquire(&previous->subproc->abandoned_os_visit_lock)
: mi_lock_try_acquire(&previous->subproc->abandoned_os_visit_lock));
if (!previous->hold_visit_lock) {
if (previous->visit_all) {
_mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the OS visitor lock");
}
return NULL; // we cannot get the lock, give up
}
}
// One list entry at a time
while (previous->os_list_count > 0) {
previous->os_list_count--;
const bool has_lock = mi_lock_acquire(&previous->subproc->abandoned_os_lock); // this could contend with concurrent OS block abandonment and reclaim from `free`
if (has_lock) {
mi_segment_t* segment = previous->subproc->abandoned_os_list;
// pop from head of the list, a subsequent mark will push at the end (and thus we iterate through os_list_count entries)
if (segment == NULL || mi_arena_segment_os_clear_abandoned(segment, false /* we already have the lock */)) {
mi_lock_release(&previous->subproc->abandoned_os_lock);
return segment;
}
// already abandoned, try again
mi_lock_release(&previous->subproc->abandoned_os_lock);
}
else {
_mi_error_message(EFAULT, "failed to acquire abandoned OS list lock during abandoned block visit\n");
return NULL;
}
}
// done
mi_assert_internal(previous->os_list_count == 0);
return NULL;
}
// reclaim abandoned segments
// this does not set the thread id (so it appears as still abandoned)
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous) {
if (previous->start < previous->end) {
// walk the arena
mi_segment_t* segment = mi_arena_segment_clear_abandoned_next_field(previous);
if (segment != NULL) { return segment; }
}
// no entries in the arena's anymore, walk the abandoned OS list
mi_assert_internal(previous->start == previous->end);
return mi_arena_segment_clear_abandoned_next_list(previous);
}
bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
// (unfortunately) the visit_abandoned option must be enabled from the start.
// This is to avoid taking locks if abandoned list visiting is not required (as for most programs)
if (!mi_option_is_enabled(mi_option_visit_abandoned)) {
_mi_error_message(EFAULT, "internal error: can only visit abandoned blocks when MIMALLOC_VISIT_ABANDONED=ON");
return false;
}
mi_arena_field_cursor_t current;0
_mi_arena_field_cursor_init(NULL, _mi_subproc_from_id(subproc_id), true /* visit all (blocking) */, &current);
mi_segment_t* segment;
bool ok = true;
while (ok && (segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL) {
ok = _mi_segment_visit_blocks(segment, heap_tag, visit_blocks, visitor, arg);
_mi_arena_segment_mark_abandoned(segment);
}
_mi_arena_field_cursor_done(&current);
return ok;
}

View file

@ -1,988 +0,0 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
/* ----------------------------------------------------------------------------
"Arenas" are fixed area's of OS memory from which we can allocate
large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
In contrast to the rest of mimalloc, the arenas are shared between
threads and need to be accessed using atomic operations.
Arenas are also used to for huge OS page (1GiB) reservations or for reserving
OS memory upfront which can be improve performance or is sometimes needed
on embedded devices. We can also employ this with WASI or `sbrk` systems
to reserve large arenas upfront and be able to reuse the memory more effectively.
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/atomic.h"
#include "bitmap.h"
/* -----------------------------------------------------------
Arena allocation
----------------------------------------------------------- */
// A memory arena descriptor
typedef struct mi_arena_s {
mi_arena_id_t id; // arena id; 0 for non-specific
mi_memid_t memid; // memid of the memory area
_Atomic(uint8_t*)start; // the start of the memory area
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_SLICE_SIZE`)
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
size_t meta_size; // size of the arena structure itself (including its bitmaps)
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node
bool exclusive; // only allow allocations if specifically for this arena
bool is_large; // memory area consists of large- or huge OS pages (always committed)
mi_lock_t abandoned_visit_lock; // lock is only used when abandoned segments are being visited
_Atomic(size_t)search_idx; // optimization to start the search for free blocks
_Atomic(mi_msecs_t)purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
// do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields.
} mi_arena_t;
#define MI_ARENA_SLICE_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_SLICE_SIZE/2) // 32MiB
#define MI_MAX_ARENAS (132) // Limited as the reservation exponentially increases (and takes up .bss)
// The available arenas
static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
#define MI_IN_ARENA_C
#include "arena-abandon.c"
#undef MI_IN_ARENA_C
/* -----------------------------------------------------------
Arena id's
id = arena_index + 1
----------------------------------------------------------- */
size_t mi_arena_id_index(mi_arena_id_t id) {
return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
}
static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
mi_assert_internal(arena_index < MI_MAX_ARENAS);
return (int)arena_index + 1;
}
mi_arena_id_t _mi_arena_id_none(void) {
return 0;
}
static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
(arena_id == req_arena_id));
}
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
if (memid.memkind == MI_MEM_ARENA) {
return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
}
else {
return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id);
}
}
size_t mi_arena_get_count(void) {
return mi_atomic_load_relaxed(&mi_arena_count);
}
mi_arena_t* mi_arena_from_index(size_t idx) {
mi_assert_internal(idx < mi_arena_get_count());
return mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[idx]);
}
/* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */
static size_t mi_block_count_of_size(size_t size) {
return _mi_divide_up(size, MI_ARENA_SLICE_SIZE);
}
static size_t mi_arena_block_size(size_t bcount) {
return (bcount * MI_ARENA_SLICE_SIZE);
}
static size_t mi_arena_size(mi_arena_t* arena) {
return mi_arena_block_size(arena->block_count);
}
static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
memid.mem.arena.id = id;
memid.mem.arena.block_index = bitmap_index;
memid.mem.arena.is_exclusive = is_exclusive;
return memid;
}
bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
*arena_index = mi_arena_id_index(memid.mem.arena.id);
*bitmap_index = memid.mem.arena.block_index;
return memid.mem.arena.is_exclusive;
}
/* -----------------------------------------------------------
Special static area for mimalloc internal structures
to avoid OS calls (for example, for the arena metadata (~= 256b))
----------------------------------------------------------- */
#define MI_ARENA_STATIC_MAX ((MI_INTPTR_SIZE/2)*MI_KiB) // 4 KiB on 64-bit
static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895
static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top;
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
*memid = _mi_memid_none();
if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
const size_t toplow = mi_atomic_load_relaxed(&mi_arena_static_top);
if ((toplow + size) > MI_ARENA_STATIC_MAX) return NULL;
// try to claim space
if (alignment < MI_MAX_ALIGN_SIZE) { alignment = MI_MAX_ALIGN_SIZE; }
const size_t oversize = size + alignment - 1;
if (toplow + oversize > MI_ARENA_STATIC_MAX) return NULL;
const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
size_t top = oldtop + oversize;
if (top > MI_ARENA_STATIC_MAX) {
// try to roll back, ok if this fails
mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
return NULL;
}
// success
*memid = _mi_memid_create(MI_MEM_STATIC);
memid->initially_zero = true;
const size_t start = _mi_align_up(oldtop, alignment);
uint8_t* const p = &mi_arena_static[start];
_mi_memzero_aligned(p, size);
return p;
}
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
*memid = _mi_memid_none();
// try static
void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid);
if (p != NULL) return p;
// or fall back to the OS
p = _mi_os_alloc(size, memid, &_mi_stats_main);
if (p == NULL) return NULL;
// zero the OS memory if needed
if (!memid->initially_zero) {
_mi_memzero_aligned(p, size);
memid->initially_zero = true;
}
return p;
}
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
if (mi_memkind_is_os(memid.memkind)) {
_mi_os_free(p, size, memid, &_mi_stats_main);
}
else {
mi_assert(memid.memkind == MI_MEM_STATIC);
}
}
void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
}
/* -----------------------------------------------------------
Thread safe allocation in an arena
----------------------------------------------------------- */
// claim the `blocks_inuse` bits
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
{
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true;
};
return false;
}
/* -----------------------------------------------------------
Arena Allocation
----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
{
MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
mi_bitmap_index_t bitmap_index;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL;
// claimed it!
void* p = mi_arena_block_start(arena, bitmap_index);
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
memid->is_pinned = arena->memid.is_pinned;
// none of the claimed blocks should be scheduled for a decommit
if (arena->blocks_purge != NULL) {
// this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
}
// set the dirty bits (todo: no need for an atomic op here?)
if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
}
// set commit state
if (arena->blocks_committed == NULL) {
// always committed
memid->initially_committed = true;
}
else if (commit) {
// commit requested, but the range may not be committed as a whole: ensure it is committed now
memid->initially_committed = true;
bool any_uncommitted;
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) {
bool commit_zero = false;
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
memid->initially_committed = false;
}
else {
if (commit_zero) { memid->initially_zero = true; }
}
}
}
else {
// no need to commit, but check if already fully committed
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
}
return p;
}
// allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
MI_UNUSED_RELEASE(alignment);
mi_assert(alignment <= MI_SEGMENT_ALIGN);
const size_t bcount = mi_block_count_of_size(size);
const size_t arena_index = mi_arena_id_index(arena_id);
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
mi_assert_internal(size <= mi_arena_block_size(bcount));
// Check arena suitability
mi_arena_t* arena = mi_arena_from_index(arena_index);
if (arena == NULL) return NULL;
if (!allow_large && arena->is_large) return NULL;
if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
if (match_numa_node) { if (!numa_suitable) return NULL; }
else { if (numa_suitable) return NULL; }
}
// try to allocate
void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
return p;
}
// allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
if mi_likely(max_arena == 0) return NULL;
if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) {
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
}
else {
// try numa affine allocation
for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
// try from another numa node instead..
if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
}
}
return NULL;
}
// try to reserve a fresh arena space
static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
{
if (_mi_preloading()) return false; // use OS only while pre loading
if (req_arena_id != _mi_arena_id_none()) return false;
const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
if (arena_count > (MI_MAX_ARENAS - 4)) return false;
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
if (arena_reserve == 0) return false;
if (!_mi_os_has_virtual_reserve()) {
arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example)
}
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_SLICE_SIZE);
arena_reserve = _mi_align_up(arena_reserve, MI_SEGMENT_SIZE);
if (arena_count >= 8 && arena_count <= 128) {
// scale up the arena sizes exponentially every 8 entries (128 entries get to 589TiB)
const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/8, 0, 16 );
size_t reserve = 0;
if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) {
arena_reserve = reserve;
}
}
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
// commit eagerly?
bool arena_commit = false;
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0);
}
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(memid != NULL && tld != NULL);
mi_assert_internal(size > 0);
*memid = _mi_memid_none();
const int numa_node = _mi_os_numa_node(tld); // current numa node
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed?
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena
if (req_arena_id == _mi_arena_id_none()) {
mi_arena_id_t arena_id = 0;
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
// and try allocate in there
mi_assert_internal(req_arena_id == _mi_arena_id_none());
p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
}
}
}
// if we cannot use OS allocation, return NULL
if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) {
errno = ENOMEM;
return NULL;
}
// finally, fall back to the OS
if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
}
else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
}
}
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
return _mi_arena_alloc_aligned(size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
}
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
if (size != NULL) *size = 0;
size_t arena_index = mi_arena_id_index(arena_id);
if (arena_index >= MI_MAX_ARENAS) return NULL;
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
if (arena == NULL) return NULL;
if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
return arena->start;
}
/* -----------------------------------------------------------
Arena purge
----------------------------------------------------------- */
static long mi_arena_purge_delay(void) {
// <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
}
// reset or decommit in an arena and update the committed/decommit bitmaps
// assumes we own the area (i.e. blocks_in_use is claimed by us)
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned);
const size_t size = mi_arena_block_size(blocks);
void* const p = mi_arena_block_start(arena, bitmap_idx);
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); }
}
// clear the purged blocks
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
// update committed bitmap
if (needs_recommit) {
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
}
}
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
// Note: assumes we (still) own the area as we may purge immediately
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
mi_assert_internal(arena->blocks_purge != NULL);
const long delay = mi_arena_purge_delay();
if (delay < 0) return; // is purging allowed at all?
if (_mi_preloading() || delay == 0) {
// decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats);
}
else {
// schedule decommit
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (expire != 0) {
mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10)); // add smallish extra delay
}
else {
mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
}
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
}
}
// purge a range of blocks
// return true if the full range was purged.
// assumes we own the area (i.e. blocks_in_use is claimed by us)
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
const size_t endidx = startidx + bitlen;
size_t bitidx = startidx;
bool all_purged = false;
while (bitidx < endidx) {
// count consecutive ones in the purge mask
size_t count = 0;
while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
count++;
}
if (count > 0) {
// found range to be purged
const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
mi_arena_purge(arena, range_idx, count, stats);
if (count == bitlen) {
all_purged = true;
}
}
bitidx += (count+1); // +1 to skip the zero bit (or end)
}
return all_purged;
}
// returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
{
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (expire == 0) return false;
if (!force && expire > now) return false;
// reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
// potential purges scheduled, walk through the bitmap
bool any_purged = false;
bool full_purge = true;
for (size_t i = 0; i < arena->field_count; i++) {
size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
if (purge != 0) {
size_t bitidx = 0;
while (bitidx < MI_BITMAP_FIELD_BITS) {
// find consecutive range of ones in the purge mask
size_t bitlen = 0;
while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
bitlen++;
}
// temporarily claim the purge range as "in-use" to be thread-safe with allocation
// try to claim the longest range of corresponding in_use bits
const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
while( bitlen > 0 ) {
if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
break;
}
bitlen--;
}
// actual claimed bits at `in_use`
if (bitlen > 0) {
// read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
full_purge = false;
}
any_purged = true;
// release the claimed `in_use` bits again
_mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
}
bitidx += (bitlen+1); // +1 to skip the zero (or end)
} // while bitidx
} // purge != 0
}
// if not fully purged, make sure to purge again in the future
if (!full_purge) {
const long delay = mi_arena_purge_delay();
mi_msecs_t expected = 0;
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
}
return any_purged;
}
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
if (max_arena == 0) return;
// allow only one thread to purge at a time
static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard)
{
mi_msecs_t now = _mi_clock_now();
size_t max_purge_count = (visit_all ? max_arena : 1);
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) {
if (mi_arena_try_purge(arena, now, force, stats)) {
if (max_purge_count <= 1) break;
max_purge_count--;
}
}
}
}
}
/* -----------------------------------------------------------
Arena free
----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
const bool all_committed = (committed_size == size);
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
mi_track_mem_undefined(p,size);
if (mi_memkind_is_os(memid.memkind)) {
// was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) {
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
}
_mi_os_free(p, size, memid, stats);
}
else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena
size_t arena_idx;
size_t bitmap_idx;
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL);
const size_t blocks = mi_block_count_of_size(size);
// checks
if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
_mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
// potentially decommit
if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
mi_assert_internal(all_committed);
}
else {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
if (!all_committed) {
// mark the entire range as no longer committed (so we recommit the full range when re-using)
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
mi_track_mem_noaccess(p,size);
if (committed_size > 0) {
// if partially committed, adjust the committed stats (is it will be recommitted when re-using)
// in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
}
// note: if not all committed, it may be that the purge will reset/decommit the entire range
// that contains already decommitted parts. Since purge consistently uses reset or decommit that
// works (as we should never reset decommitted parts).
}
// (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
}
// and make it available to others again
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!all_inuse) {
_mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
return;
};
}
else {
// arena was none, external, or static; nothing to do
mi_assert_internal(memid.memkind < MI_MEM_OS);
}
// purge expired decommits
mi_arenas_try_purge(false, false, stats);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
static void mi_arenas_unsafe_destroy(void) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
size_t new_max_arena = 0;
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) {
mi_lock_done(&arena->abandoned_visit_lock);
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
}
else {
new_max_arena = i;
}
_mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size);
}
}
// try to lower the max arena.
size_t expected = max_arena;
mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
mi_arenas_unsafe_destroy();
_mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
bool _mi_arena_contains(const void* p) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
return true;
}
}
return false;
}
/* -----------------------------------------------------------
Add an arena.
----------------------------------------------------------- */
static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* stats) {
mi_assert_internal(arena != NULL);
mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
mi_assert_internal(arena->block_count > 0);
if (arena_id != NULL) { *arena_id = -1; }
size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
if (i >= MI_MAX_ARENAS) {
mi_atomic_decrement_acq_rel(&mi_arena_count);
return false;
}
_mi_stat_counter_increase(&stats->arena_count,1);
arena->id = mi_arena_id_create(i);
mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
if (arena_id != NULL) { *arena_id = arena->id; }
return true;
}
static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
{
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
if (size < MI_ARENA_SLICE_SIZE) return false;
if (is_large) {
mi_assert_internal(memid.initially_committed && memid.is_pinned);
}
const size_t bcount = size / MI_ARENA_SLICE_SIZE;
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
const size_t bitmaps = (memid.is_pinned ? 3 : 5);
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
mi_memid_t meta_memid;
mi_arena_t* arena = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid);
if (arena == NULL) return false;
// already zero'd due to zalloc
// _mi_memzero(arena, asize);
arena->id = _mi_arena_id_none();
arena->memid = memid;
arena->exclusive = exclusive;
arena->meta_size = asize;
arena->meta_memid = meta_memid;
arena->block_count = bcount;
arena->field_count = fields;
arena->start = (uint8_t*)start;
arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
arena->is_large = is_large;
arena->purge_expire = 0;
arena->search_idx = 0;
mi_lock_init(&arena->abandoned_visit_lock);
// consecutive bitmaps
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap
// initialize committed bitmap?
if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
}
// and claim leftover blocks if needed (so we never allocate there)
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
mi_assert_internal(post >= 0);
if (post > 0) {
// don't use leftover bits at the end
mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
_mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
}
return mi_arena_add(arena, arena_id, &_mi_stats_main);
}
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
memid.initially_committed = is_committed;
memid.initially_zero = is_zero;
memid.is_pinned = is_large;
return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
}
// Reserve a range of regular OS memory
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_SLICE_SIZE); // at least one block
mi_memid_t memid;
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field?
if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
_mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
}
_mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
return 0;
}
// Manage a range of regular OS memory
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
}
// Reserve a range of regular OS memory
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
}
/* -----------------------------------------------------------
Debugging
----------------------------------------------------------- */
static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) {
_mi_verbose_message("%s%s:\n", prefix, header);
size_t bcount = 0;
size_t inuse_count = 0;
for (size_t i = 0; i < field_count; i++) {
char buf[MI_BITMAP_FIELD_BITS + 1];
uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++, bcount++) {
if (bcount < block_count) {
bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
if (inuse) inuse_count++;
buf[bit] = (inuse ? 'x' : '.');
}
else {
buf[bit] = ' ';
}
}
buf[MI_BITMAP_FIELD_BITS] = 0;
_mi_verbose_message("%s %s\n", prefix, buf);
}
_mi_verbose_message("%s total ('x'): %zu\n", prefix, inuse_count);
return inuse_count;
}
void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept {
size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
size_t inuse_total = 0;
size_t abandoned_total = 0;
size_t purge_total = 0;
for (size_t i = 0; i < max_arenas; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
if (arena == NULL) break;
_mi_verbose_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, MI_ARENA_SLICE_SIZE / MI_MiB, arena->field_count, (arena->memid.is_pinned ? ", pinned" : ""));
if (show_inuse) {
inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count);
}
if (arena->blocks_committed != NULL) {
mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count);
}
if (show_abandoned) {
abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count);
}
if (show_purge && arena->blocks_purge != NULL) {
purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count);
}
}
if (show_inuse) _mi_verbose_message("total inuse blocks : %zu\n", inuse_total);
if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total);
if (show_purge) _mi_verbose_message("total purgeable blocks: %zu\n", purge_total);
}
/* -----------------------------------------------------------
Reserve a huge page arena.
----------------------------------------------------------- */
// reserve at a specific numa node
int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
if (arena_id != NULL) *arena_id = -1;
if (pages==0) return 0;
if (numa_node < -1) numa_node = -1;
if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
size_t hsize = 0;
size_t pages_reserved = 0;
mi_memid_t memid;
void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
if (p==NULL || pages_reserved==0) {
_mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
return ENOMEM;
}
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
_mi_os_free(p, hsize, memid, &_mi_stats_main);
return ENOMEM;
}
return 0;
}
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
}
// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
if (pages == 0) return 0;
// pages per numa node
size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count());
if (numa_count <= 0) numa_count = 1;
const size_t pages_per = pages / numa_count;
const size_t pages_mod = pages % numa_count;
const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
// reserve evenly among numa nodes
for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
size_t node_pages = pages_per; // can be 0
if (numa_node < pages_mod) node_pages++;
int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per);
if (err) return err;
if (pages < node_pages) {
pages = 0;
}
else {
pages -= node_pages;
}
}
return 0;
}
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
MI_UNUSED(max_secs);
_mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
if (pages_reserved != NULL) *pages_reserved = 0;
int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
return err;
}

View file

@ -1,20 +0,0 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
/* ----------------------------------------------------------------------------
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "bitmap.h"
/* -----------------------------------------------------------
Arena allocation
----------------------------------------------------------- */

View file

@ -1,419 +0,0 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
/* ----------------------------------------------------------------------------
Concurrent bitmap that can set/reset sequences of bits atomically,
represented as an array of fields where each field is a machine word (`size_t`)
There are two api's; the standard one cannot have sequences that cross
between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
The `_across` postfixed functions do allow sequences that can cross over
between the fields. (This is used in arena allocation)
---------------------------------------------------------------------------- */
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/bits.h"
#include "bitmap.h"
/* -----------------------------------------------------------
Bitmap definition
----------------------------------------------------------- */
// The bit mask for a given number of blocks at a specified bit index.
static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS);
mi_assert_internal(count > 0);
if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL;
if (count == 0) return 0;
return ((((size_t)1 << count) - 1) << bitidx);
}
/* -----------------------------------------------------------
Claim a bit sequence atomically
----------------------------------------------------------- */
// Try to atomically claim a sequence of `count` bits in a single
// field at `idx` in `bitmap`. Returns `true` on success.
bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
{
mi_assert_internal(bitmap_idx != NULL);
mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
mi_bitmap_field_t* field = &bitmap[idx];
size_t map = mi_atomic_load_relaxed(field);
if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
// search for 0-bit sequence of length count
const size_t mask = mi_bitmap_mask_(count, 0);
const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count;
#if MI_HAS_FAST_BITSCAN
size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible
#else
size_t bitidx = 0; // otherwise start at 0
#endif
size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx
// scan linearly for a free range of zero bits
while (bitidx <= bitidx_max) {
const size_t mapm = (map & m);
if (mapm == 0) { // are the mask bits free at bitidx?
mi_assert_internal((m >> bitidx) == mask); // no overflow?
const size_t newmap = (map | m);
mi_assert_internal((newmap^map) >> bitidx == mask);
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { // TODO: use weak cas here?
// no success, another thread claimed concurrently.. keep going (with updated `map`)
continue;
}
else {
// success, we claimed the bits!
*bitmap_idx = mi_bitmap_index_create(idx, bitidx);
return true;
}
}
else {
// on to the next bit range
#if MI_HAS_FAST_BITSCAN
mi_assert_internal(mapm != 0);
const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx));
mi_assert_internal(shift > 0 && shift <= count);
#else
const size_t shift = 1;
#endif
bitidx += shift;
m <<= shift;
}
}
// no bits found
return false;
}
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
size_t idx = start_field_idx;
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
if (idx >= bitmap_fields) { idx = 0; } // wrap
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
return true;
}
}
return false;
}
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
// mi_assert_internal((bitmap[idx] & mask) == mask);
const size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask);
return ((prev & mask) == mask);
}
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
//mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0);
size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask);
if (any_zero != NULL) { *any_zero = ((prev & mask) != mask); }
return ((prev & mask) == 0);
}
// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one.
static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
const size_t field = mi_atomic_load_relaxed(&bitmap[idx]);
if (any_ones != NULL) { *any_ones = ((field & mask) != 0); }
return ((field & mask) == mask);
}
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
// Returns `true` if successful when all previous `count` bits were 0.
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
size_t expected = mi_atomic_load_relaxed(&bitmap[idx]);
do {
if ((expected & mask) != 0) return false;
}
while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask));
mi_assert_internal((expected & mask) == 0);
return true;
}
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
}
bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
bool any_ones;
mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
return any_ones;
}
//--------------------------------------------------------------------------
// the `_across` functions work on bitmaps where sequences can cross over
// between the fields. This is used in arena allocation
//--------------------------------------------------------------------------
// Try to atomically claim a sequence of `count` bits starting from the field
// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
{
mi_assert_internal(bitmap_idx != NULL);
// check initial trailing zeros
mi_bitmap_field_t* field = &bitmap[idx];
size_t map = mi_atomic_load_relaxed(field);
const size_t initial = mi_clz(map); // count of initial zeros starting at idx
mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS);
if (initial == 0) return false;
if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us)
if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries
// scan ahead
size_t found = initial;
size_t mask = 0; // mask bits for the final field
while(found < count) {
field++;
map = mi_atomic_load_relaxed(field);
const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found));
mi_assert_internal(mask_bits > 0 && mask_bits <= MI_BITMAP_FIELD_BITS);
mask = mi_bitmap_mask_(mask_bits, 0);
if ((map & mask) != 0) return false; // some part is already claimed
found += mask_bits;
}
mi_assert_internal(field < &bitmap[bitmap_fields]);
// we found a range of contiguous zeros up to the final field; mask contains mask in the final field
// now try to claim the range atomically
mi_bitmap_field_t* const final_field = field;
const size_t final_mask = mask;
mi_bitmap_field_t* const initial_field = &bitmap[idx];
const size_t initial_idx = MI_BITMAP_FIELD_BITS - initial;
const size_t initial_mask = mi_bitmap_mask_(initial, initial_idx);
// initial field
size_t newmap;
field = initial_field;
map = mi_atomic_load_relaxed(field);
do {
newmap = (map | initial_mask);
if ((map & initial_mask) != 0) { goto rollback; };
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
// intermediate fields
while (++field < final_field) {
newmap = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
map = 0;
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
}
// final field
mi_assert_internal(field == final_field);
map = mi_atomic_load_relaxed(field);
do {
newmap = (map | final_mask);
if ((map & final_mask) != 0) { goto rollback; }
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
// claimed!
mi_stat_counter_increase(stats->arena_crossover_count,1);
*bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
return true;
rollback:
// roll back intermediate fields
// (we just failed to claim `field` so decrement first)
while (--field > initial_field) {
newmap = 0;
map = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
mi_assert_internal(mi_atomic_load_relaxed(field) == map);
mi_atomic_store_release(field, newmap);
}
if (field == initial_field) { // (if we failed on the initial field, `field + 1 == initial_field`)
map = mi_atomic_load_relaxed(field);
do {
mi_assert_internal((map & initial_mask) == initial_mask);
newmap = (map & ~initial_mask);
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
}
mi_stat_counter_increase(stats->arena_rollback_count,1);
// retry? (we make a recursive call instead of goto to be able to use const declarations)
if (retries <= 2) {
return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats);
}
else {
return false;
}
}
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) {
mi_assert_internal(count > 0);
if (count <= 2) {
// we don't bother with crossover fields for small counts
return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx);
}
// visit the fields
size_t idx = start_field_idx;
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
if (idx >= bitmap_fields) { idx = 0; } // wrap
// first try to claim inside a field
/*
if (count <= MI_BITMAP_FIELD_BITS) {
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
return true;
}
}
*/
// if that fails, then try to claim across fields
if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) {
return true;
}
}
return false;
}
// Helper for masks across fields; returns the mid count, post_mask may be 0
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
MI_UNUSED(bitmap_fields);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
*pre_mask = mi_bitmap_mask_(count, bitidx);
*mid_mask = 0;
*post_mask = 0;
mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields);
return 0;
}
else {
const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx;
mi_assert_internal(pre_bits < count);
*pre_mask = mi_bitmap_mask_(pre_bits, bitidx);
count -= pre_bits;
const size_t mid_count = (count / MI_BITMAP_FIELD_BITS);
*mid_mask = MI_BITMAP_FIELD_FULL;
count %= MI_BITMAP_FIELD_BITS;
*post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0));
mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields);
return mid_count;
}
}
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
size_t idx = mi_bitmap_index_field(bitmap_idx);
size_t pre_mask;
size_t mid_mask;
size_t post_mask;
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
bool all_one = true;
mi_bitmap_field_t* field = &bitmap[idx];
size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); // clear first part
if ((prev & pre_mask) != pre_mask) all_one = false;
while(mid_count-- > 0) {
prev = mi_atomic_and_acq_rel(field++, ~mid_mask); // clear mid part
if ((prev & mid_mask) != mid_mask) all_one = false;
}
if (post_mask!=0) {
prev = mi_atomic_and_acq_rel(field, ~post_mask); // clear end part
if ((prev & post_mask) != post_mask) all_one = false;
}
return all_one;
}
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) {
size_t idx = mi_bitmap_index_field(bitmap_idx);
size_t pre_mask;
size_t mid_mask;
size_t post_mask;
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
bool all_zero = true;
bool any_zero = false;
_Atomic(size_t)*field = &bitmap[idx];
size_t prev = mi_atomic_or_acq_rel(field++, pre_mask);
if ((prev & pre_mask) != 0) all_zero = false;
if ((prev & pre_mask) != pre_mask) any_zero = true;
while (mid_count-- > 0) {
prev = mi_atomic_or_acq_rel(field++, mid_mask);
if ((prev & mid_mask) != 0) all_zero = false;
if ((prev & mid_mask) != mid_mask) any_zero = true;
}
if (post_mask!=0) {
prev = mi_atomic_or_acq_rel(field, post_mask);
if ((prev & post_mask) != 0) all_zero = false;
if ((prev & post_mask) != post_mask) any_zero = true;
}
if (pany_zero != NULL) { *pany_zero = any_zero; }
return all_zero;
}
// Returns `true` if all `count` bits were 1.
// `any_ones` is `true` if there was at least one bit set to one.
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) {
size_t idx = mi_bitmap_index_field(bitmap_idx);
size_t pre_mask;
size_t mid_mask;
size_t post_mask;
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
bool all_ones = true;
bool any_ones = false;
mi_bitmap_field_t* field = &bitmap[idx];
size_t prev = mi_atomic_load_relaxed(field++);
if ((prev & pre_mask) != pre_mask) all_ones = false;
if ((prev & pre_mask) != 0) any_ones = true;
while (mid_count-- > 0) {
prev = mi_atomic_load_relaxed(field++);
if ((prev & mid_mask) != mid_mask) all_ones = false;
if ((prev & mid_mask) != 0) any_ones = true;
}
if (post_mask!=0) {
prev = mi_atomic_load_relaxed(field);
if ((prev & post_mask) != post_mask) all_ones = false;
if ((prev & post_mask) != 0) any_ones = true;
}
if (pany_ones != NULL) { *pany_ones = any_ones; }
return all_ones;
}
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
}
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
bool any_ones;
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
return any_ones;
}

View file

@ -1,110 +0,0 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
/* ----------------------------------------------------------------------------
Concurrent bitmap that can set/reset sequences of bits atomically,
represented as an array of fields where each field is a machine word (`size_t`)
There are two api's; the standard one cannot have sequences that cross
between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
(this is used in region allocation)
The `_across` postfixed functions do allow sequences that can cross over
between the fields. (This is used in arena allocation)
---------------------------------------------------------------------------- */
#pragma once
#ifndef MI_BITMAP_H
#define MI_BITMAP_H
/* -----------------------------------------------------------
Bitmap definition
----------------------------------------------------------- */
#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE)
#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set
// An atomic bitmap of `size_t` fields
typedef _Atomic(size_t) mi_bitmap_field_t;
typedef mi_bitmap_field_t* mi_bitmap_t;
// A bitmap index is the index of the bit in a bitmap.
typedef size_t mi_bitmap_index_t;
// Create a bit index.
static inline mi_bitmap_index_t mi_bitmap_index_create_ex(size_t idx, size_t bitidx) {
mi_assert_internal(bitidx <= MI_BITMAP_FIELD_BITS);
return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
}
static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) {
mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS);
return mi_bitmap_index_create_ex(idx,bitidx);
}
// Get the field index from a bit index.
static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
return (bitmap_idx / MI_BITMAP_FIELD_BITS);
}
// Get the bit index in a bitmap field
static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) {
return (bitmap_idx % MI_BITMAP_FIELD_BITS);
}
// Get the full bit index
static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) {
return bitmap_idx;
}
/* -----------------------------------------------------------
Claim a bit sequence atomically
----------------------------------------------------------- */
// Try to atomically claim a sequence of `count` bits in a single
// field at `idx` in `bitmap`. Returns `true` on success.
bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
// Returns `true` if successful when all previous `count` bits were 0.
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero);
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
//--------------------------------------------------------------------------
// the `_across` functions work on bitmaps where sequences can cross over
// between the fields. This is used in arena allocation
//--------------------------------------------------------------------------
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats);
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
#endif

View file

@ -339,59 +339,6 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
}
}
/*
// Abandon a page with used blocks at the end of a thread.
// Note: only call if it is ensured that no references exist from
// the `page->heap->thread_delayed_free` into this page.
// Currently only called through `mi_heap_collect_ex` which ensures this.
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(mi_page_heap(page) != NULL);
mi_heap_t* pheap = mi_page_heap(page);
// remove from our page list
mi_page_queue_remove(pq, page);
// page is no longer associated with our heap
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_page_set_heap(page, NULL);
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
// check there are no references left..
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
mi_assert_internal(_mi_ptr_page(block) != page);
}
#endif
// and abandon it
mi_assert_internal(mi_page_is_abandoned(page));
_mi_arena_page_abandon(page, pheap->tld);
}
// force abandon a page
void _mi_page_force_abandon(mi_page_t* page) {
mi_heap_t* heap = mi_page_heap(page);
// mark page as not using delayed free
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
// ensure this page is no longer in the heap delayed free list
_mi_heap_delayed_free_all(heap);
// TODO: can we still access the page meta-info even if it is freed?
if (page->capacity == 0) return; // it may have been freed now
// and now unlink it from the page queue and abandon (or free)
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
if (mi_page_all_free(page)) {
_mi_page_free(page, pq, false);
}
else {
_mi_page_abandon(page, pq);
}
}
*/
// Free a page with no more free blocks
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) {

View file

@ -1,126 +0,0 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
/* -----------------------------------------------------------
The following functions are to reliably find the segment or
block that encompasses any pointer p (or NULL if it is not
in any of our segments).
We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB)
set to 1 if it contains the segment meta data.
----------------------------------------------------------- */
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/atomic.h"
// Reduce total address space to reduce .bss (due to the `mi_segment_map`)
#if (MI_INTPTR_SIZE > 4) && MI_TRACK_ASAN
#define MI_SEGMENT_MAP_MAX_ADDRESS (128*1024ULL*MI_GiB) // 128 TiB (see issue #881)
#elif (MI_INTPTR_SIZE > 4)
#define MI_SEGMENT_MAP_MAX_ADDRESS (48*1024ULL*MI_GiB) // 48 TiB
#else
#define MI_SEGMENT_MAP_MAX_ADDRESS (UINT32_MAX)
#endif
#define MI_SEGMENT_MAP_PART_SIZE (MI_INTPTR_SIZE*MI_KiB - 128) // 128 > sizeof(mi_memid_t) !
#define MI_SEGMENT_MAP_PART_BITS (8*MI_SEGMENT_MAP_PART_SIZE)
#define MI_SEGMENT_MAP_PART_ENTRIES (MI_SEGMENT_MAP_PART_SIZE / MI_INTPTR_SIZE)
#define MI_SEGMENT_MAP_PART_BIT_SPAN (MI_SEGMENT_ALIGN)
#define MI_SEGMENT_MAP_PART_SPAN (MI_SEGMENT_MAP_PART_BITS * MI_SEGMENT_MAP_PART_BIT_SPAN)
#define MI_SEGMENT_MAP_MAX_PARTS ((MI_SEGMENT_MAP_MAX_ADDRESS / MI_SEGMENT_MAP_PART_SPAN) + 1)
// A part of the segment map.
typedef struct mi_segmap_part_s {
mi_memid_t memid;
_Atomic(uintptr_t) map[MI_SEGMENT_MAP_PART_ENTRIES];
} mi_segmap_part_t;
// Allocate parts on-demand to reduce .bss footprint
static _Atomic(mi_segmap_part_t*) mi_segment_map[MI_SEGMENT_MAP_MAX_PARTS]; // = { NULL, .. }
static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bool create_on_demand, size_t* idx, size_t* bitidx) {
// note: segment can be invalid or NULL.
mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE?
*idx = 0;
*bitidx = 0;
if ((uintptr_t)segment >= MI_SEGMENT_MAP_MAX_ADDRESS) return NULL;
const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_MAP_PART_SPAN;
if (segindex >= MI_SEGMENT_MAP_MAX_PARTS) return NULL;
mi_segmap_part_t* part = mi_atomic_load_ptr_relaxed(mi_segmap_part_t, &mi_segment_map[segindex]);
// allocate on demand to reduce .bss footprint
if (part == NULL) {
if (!create_on_demand) return NULL;
mi_memid_t memid;
part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid, NULL);
if (part == NULL) return NULL;
mi_segmap_part_t* expected = NULL;
if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) {
_mi_os_free(part, sizeof(mi_segmap_part_t), memid, NULL);
part = expected;
if (part == NULL) return NULL;
}
}
mi_assert(part != NULL);
const uintptr_t offset = ((uintptr_t)segment) % MI_SEGMENT_MAP_PART_SPAN;
const uintptr_t bitofs = offset / MI_SEGMENT_MAP_PART_BIT_SPAN;
*idx = bitofs / MI_INTPTR_BITS;
*bitidx = bitofs % MI_INTPTR_BITS;
return part;
}
void _mi_segment_map_allocated_at(const mi_segment_t* segment) {
if (segment->memid.memkind == MI_MEM_ARENA) return; // we lookup segments first in the arena's and don't need the segment map
size_t index;
size_t bitidx;
mi_segmap_part_t* part = mi_segment_map_index_of(segment, true /* alloc map if needed */, &index, &bitidx);
if (part == NULL) return; // outside our address range..
uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]);
uintptr_t newmask;
do {
newmask = (mask | ((uintptr_t)1 << bitidx));
} while (!mi_atomic_cas_weak_release(&part->map[index], &mask, newmask));
}
void _mi_segment_map_freed_at(const mi_segment_t* segment) {
if (segment->memid.memkind == MI_MEM_ARENA) return;
size_t index;
size_t bitidx;
mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* don't alloc if not present */, &index, &bitidx);
if (part == NULL) return; // outside our address range..
uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]);
uintptr_t newmask;
do {
newmask = (mask & ~((uintptr_t)1 << bitidx));
} while (!mi_atomic_cas_weak_release(&part->map[index], &mask, newmask));
}
// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
static mi_segment_t* _mi_segment_of(const void* p) {
if (p == NULL) return NULL;
mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL
size_t index;
size_t bitidx;
mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* dont alloc if not present */, &index, &bitidx);
if (part == NULL) return NULL;
const uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]);
if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) {
bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(cookie_ok); MI_UNUSED(cookie_ok);
return segment; // yes, allocated by us
}
return NULL;
}
// Is this a valid pointer in our heap?
static bool mi_is_valid_pointer(const void* p) {
// first check if it is in an arena, then check if it is OS allocated
return (_mi_arena_contains(p) || _mi_segment_of(p) != NULL);
}
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
return mi_is_valid_pointer(p);
}

File diff suppressed because it is too large Load diff