mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 03:48:42 +03:00
merge from dev-atomic with new atomic interface
This commit is contained in:
commit
03071dec0f
20 changed files with 410 additions and 415 deletions
|
@ -685,6 +685,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||
}
|
||||
|
||||
// zero the segment info? -- not always needed as it is zero initialized from the OS
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
|
||||
if (!is_zero) {
|
||||
ptrdiff_t ofs = offsetof(mi_segment_t, next);
|
||||
size_t prefix = offsetof(mi_segment_t, slices) - ofs;
|
||||
|
@ -891,77 +892,75 @@ static mi_tagged_segment_t mi_tagged_segment(mi_segment_t* segment, mi_tagged_se
|
|||
// This is a list of visited abandoned pages that were full at the time.
|
||||
// this list migrates to `abandoned` when that becomes NULL. The use of
|
||||
// this list reduces contention and the rate at which segments are visited.
|
||||
static mi_decl_cache_align volatile _Atomic(mi_segment_t*) abandoned_visited; // = NULL
|
||||
static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL
|
||||
|
||||
// The abandoned page list (tagged as it supports pop)
|
||||
static mi_decl_cache_align volatile _Atomic(mi_tagged_segment_t) abandoned; // = NULL
|
||||
static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
|
||||
|
||||
// We also maintain a count of current readers of the abandoned list
|
||||
// in order to prevent resetting/decommitting segment memory if it might
|
||||
// still be read.
|
||||
static mi_decl_cache_align volatile _Atomic(uintptr_t) abandoned_readers; // = 0
|
||||
static mi_decl_cache_align _Atomic(uintptr_t) abandoned_readers; // = 0
|
||||
|
||||
// Push on the visited list
|
||||
static void mi_abandoned_visited_push(mi_segment_t* segment) {
|
||||
mi_assert_internal(segment->thread_id == 0);
|
||||
mi_assert_internal(segment->abandoned_next == NULL);
|
||||
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t,&segment->abandoned_next) == NULL);
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
mi_assert_internal(segment->used > 0);
|
||||
mi_segment_t* anext;
|
||||
mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited);
|
||||
do {
|
||||
anext = mi_atomic_read_ptr_relaxed(mi_segment_t, &abandoned_visited);
|
||||
segment->abandoned_next = anext;
|
||||
} while (!mi_atomic_cas_ptr_weak(mi_segment_t, &abandoned_visited, segment, anext));
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, anext);
|
||||
} while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &abandoned_visited, &anext, segment));
|
||||
}
|
||||
|
||||
// Move the visited list to the abandoned list.
|
||||
static bool mi_abandoned_visited_revisit(void)
|
||||
{
|
||||
// quick check if the visited list is empty
|
||||
if (mi_atomic_read_ptr_relaxed(mi_segment_t,&abandoned_visited)==NULL) return false;
|
||||
if (mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited) == NULL) return false;
|
||||
|
||||
// grab the whole visited list
|
||||
mi_segment_t* first = mi_atomic_exchange_ptr(mi_segment_t, &abandoned_visited, NULL);
|
||||
mi_segment_t* first = mi_atomic_exchange_ptr_acq_rel(mi_segment_t, &abandoned_visited, NULL);
|
||||
if (first == NULL) return false;
|
||||
|
||||
// first try to swap directly if the abandoned list happens to be NULL
|
||||
const mi_tagged_segment_t ts = mi_atomic_read_relaxed(&abandoned);
|
||||
mi_tagged_segment_t afirst;
|
||||
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
|
||||
if (mi_tagged_segment_ptr(ts)==NULL) {
|
||||
afirst = mi_tagged_segment(first, ts);
|
||||
if (mi_atomic_cas_strong(&abandoned, afirst, ts)) return true;
|
||||
if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) return true;
|
||||
}
|
||||
|
||||
// find the last element of the visited list: O(n)
|
||||
mi_segment_t* last = first;
|
||||
while (last->abandoned_next != NULL) {
|
||||
last = last->abandoned_next;
|
||||
mi_segment_t* next;
|
||||
while ((next = mi_atomic_load_ptr_relaxed(mi_segment_t, &last->abandoned_next)) != NULL) {
|
||||
last = next;
|
||||
}
|
||||
|
||||
// and atomically prepend to the abandoned list
|
||||
// (no need to increase the readers as we don't access the abandoned segments)
|
||||
mi_tagged_segment_t anext;
|
||||
mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned);
|
||||
do {
|
||||
anext = mi_atomic_read_relaxed(&abandoned);
|
||||
last->abandoned_next = mi_tagged_segment_ptr(anext);
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext));
|
||||
afirst = mi_tagged_segment(first, anext);
|
||||
} while (!mi_atomic_cas_weak(&abandoned, afirst, anext));
|
||||
} while (!mi_atomic_cas_weak_release(&abandoned, &anext, afirst));
|
||||
return true;
|
||||
}
|
||||
|
||||
// Push on the abandoned list.
|
||||
static void mi_abandoned_push(mi_segment_t* segment) {
|
||||
mi_assert_internal(segment->thread_id == 0);
|
||||
mi_assert_internal(segment->abandoned_next == NULL);
|
||||
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
mi_assert_internal(segment->used > 0);
|
||||
mi_tagged_segment_t ts;
|
||||
mi_tagged_segment_t next;
|
||||
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
|
||||
do {
|
||||
ts = mi_atomic_read_relaxed(&abandoned);
|
||||
segment->abandoned_next = mi_tagged_segment_ptr(ts);
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, mi_tagged_segment_ptr(ts));
|
||||
next = mi_tagged_segment(segment, ts);
|
||||
} while (!mi_atomic_cas_weak(&abandoned, next, ts));
|
||||
} while (!mi_atomic_cas_weak_release(&abandoned, &ts, next));
|
||||
}
|
||||
|
||||
// Wait until there are no more pending reads on segments that used to be in the abandoned list
|
||||
|
@ -969,7 +968,7 @@ static void mi_abandoned_push(mi_segment_t* segment) {
|
|||
void _mi_abandoned_await_readers(void) {
|
||||
uintptr_t n;
|
||||
do {
|
||||
n = mi_atomic_read(&abandoned_readers);
|
||||
n = mi_atomic_load_acquire(&abandoned_readers);
|
||||
if (n != 0) mi_atomic_yield();
|
||||
} while (n != 0);
|
||||
}
|
||||
|
@ -978,7 +977,7 @@ void _mi_abandoned_await_readers(void) {
|
|||
static mi_segment_t* mi_abandoned_pop(void) {
|
||||
mi_segment_t* segment;
|
||||
// Check efficiently if it is empty (or if the visited list needs to be moved)
|
||||
mi_tagged_segment_t ts = mi_atomic_read_relaxed(&abandoned);
|
||||
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
|
||||
segment = mi_tagged_segment_ptr(ts);
|
||||
if (mi_likely(segment == NULL)) {
|
||||
if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL
|
||||
|
@ -988,19 +987,21 @@ static mi_segment_t* mi_abandoned_pop(void) {
|
|||
|
||||
// Do a pop. We use a reader count to prevent
|
||||
// a segment to be decommitted while a read is still pending,
|
||||
// and a tagged pointer to prevent A-B-A link corruption.
|
||||
mi_atomic_increment(&abandoned_readers); // ensure no segment gets decommitted
|
||||
// and a tagged pointer to prevent A-B-A link corruption.
|
||||
// (this is called from `region.c:_mi_mem_free` for example)
|
||||
mi_atomic_increment_relaxed(&abandoned_readers); // ensure no segment gets decommitted
|
||||
mi_tagged_segment_t next = 0;
|
||||
ts = mi_atomic_load_acquire(&abandoned);
|
||||
do {
|
||||
ts = mi_atomic_read(&abandoned);
|
||||
segment = mi_tagged_segment_ptr(ts);
|
||||
if (segment != NULL) {
|
||||
next = mi_tagged_segment(segment->abandoned_next, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted
|
||||
mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next);
|
||||
next = mi_tagged_segment(anext, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted
|
||||
}
|
||||
} while (segment != NULL && !mi_atomic_cas_weak(&abandoned, next, ts));
|
||||
mi_atomic_decrement(&abandoned_readers); // release reader lock
|
||||
} while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&abandoned, &ts, next));
|
||||
mi_atomic_decrement_relaxed(&abandoned_readers); // release reader lock
|
||||
if (segment != NULL) {
|
||||
segment->abandoned_next = NULL;
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
|
||||
}
|
||||
return segment;
|
||||
}
|
||||
|
@ -1012,7 +1013,7 @@ static mi_segment_t* mi_abandoned_pop(void) {
|
|||
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->used == segment->abandoned);
|
||||
mi_assert_internal(segment->used > 0);
|
||||
mi_assert_internal(segment->abandoned_next == NULL);
|
||||
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
|
||||
mi_assert_internal(segment->abandoned_visits == 0);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment,tld));
|
||||
|
||||
|
@ -1036,7 +1037,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
_mi_stat_increase(&tld->stats->segments_abandoned, 1);
|
||||
mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
|
||||
segment->thread_id = 0;
|
||||
segment->abandoned_next = NULL;
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
|
||||
segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
|
||||
mi_abandoned_push(segment);
|
||||
}
|
||||
|
@ -1118,7 +1119,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
|||
// Reclaim an abandoned segment; returns NULL if the segment was freed
|
||||
// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
|
||||
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->abandoned_next == NULL);
|
||||
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment, tld));
|
||||
if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
|
||||
|
||||
|
@ -1306,12 +1307,13 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
|||
// huge page segments are always abandoned and can be freed immediately by any thread
|
||||
mi_assert_internal(segment->kind==MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(segment == _mi_page_segment(page));
|
||||
mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0);
|
||||
mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0);
|
||||
|
||||
// claim it and free
|
||||
mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
|
||||
// paranoia: if this it the last reference, the cas should always succeed
|
||||
if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) {
|
||||
uintptr_t expected_tid = 0;
|
||||
if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) {
|
||||
mi_block_set_next(page, block, page->free);
|
||||
page->free = block;
|
||||
page->used--;
|
||||
|
@ -1328,6 +1330,11 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
|||
// mi_segments_track_size((long)segment->segment_size, tld);
|
||||
_mi_segment_page_free(page, true, &tld->segments);
|
||||
}
|
||||
#if (MI_DEBUG!=0)
|
||||
else {
|
||||
mi_assert_internal(false);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
@ -1371,7 +1378,7 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segment
|
|||
#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8)
|
||||
#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE)
|
||||
|
||||
static volatile _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE]; // 2KiB per TB with 64MiB segments
|
||||
static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE]; // 2KiB per TB with 64MiB segments
|
||||
|
||||
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
|
||||
mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE?
|
||||
|
@ -1385,12 +1392,11 @@ static void mi_segment_map_allocated_at(const mi_segment_t* segment) {
|
|||
size_t index = mi_segment_map_index_of(segment, &bitidx);
|
||||
mi_assert_internal(index < MI_SEGMENT_MAP_WSIZE);
|
||||
if (index==0) return;
|
||||
uintptr_t mask;
|
||||
uintptr_t mask = mi_segment_map[index];
|
||||
uintptr_t newmask;
|
||||
do {
|
||||
mask = mi_segment_map[index];
|
||||
newmask = (mask | ((uintptr_t)1 << bitidx));
|
||||
} while (!mi_atomic_cas_weak(&mi_segment_map[index], newmask, mask));
|
||||
} while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask));
|
||||
}
|
||||
|
||||
static void mi_segment_map_freed_at(const mi_segment_t* segment) {
|
||||
|
@ -1398,12 +1404,11 @@ static void mi_segment_map_freed_at(const mi_segment_t* segment) {
|
|||
size_t index = mi_segment_map_index_of(segment, &bitidx);
|
||||
mi_assert_internal(index < MI_SEGMENT_MAP_WSIZE);
|
||||
if (index == 0) return;
|
||||
uintptr_t mask;
|
||||
uintptr_t mask = mi_segment_map[index];
|
||||
uintptr_t newmask;
|
||||
do {
|
||||
mask = mi_segment_map[index];
|
||||
do {
|
||||
newmask = (mask & ~((uintptr_t)1 << bitidx));
|
||||
} while (!mi_atomic_cas_weak(&mi_segment_map[index], newmask, mask));
|
||||
} while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask));
|
||||
}
|
||||
|
||||
// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue