mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 11:58:41 +03:00
merge from dev-win
This commit is contained in:
commit
25dca38ef9
11 changed files with 234 additions and 199 deletions
|
@ -780,8 +780,8 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
|
|||
// live blocks (reached through other threads). Such segments
|
||||
// are "abandoned" and will be reclaimed by other threads to
|
||||
// reuse their pages and/or free them eventually
|
||||
static volatile mi_segment_t* abandoned = NULL;
|
||||
static volatile uintptr_t abandoned_count = 0;
|
||||
static volatile _Atomic(mi_segment_t*) abandoned; // = NULL;
|
||||
static volatile _Atomic(uintptr_t) abandoned_count; // = 0;
|
||||
|
||||
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->used == segment->abandoned);
|
||||
|
@ -808,9 +808,9 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
segment->thread_id = 0;
|
||||
mi_segment_t* next;
|
||||
do {
|
||||
next = (mi_segment_t*)abandoned;
|
||||
mi_atomic_write_ptr((volatile void**)&segment->abandoned_next, next);
|
||||
} while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, next));
|
||||
next = (mi_segment_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&abandoned));
|
||||
mi_atomic_write_ptr(mi_atomic_cast(void*,&segment->abandoned_next), next);
|
||||
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&abandoned), segment, next));
|
||||
mi_atomic_increment(&abandoned_count);
|
||||
}
|
||||
|
||||
|
@ -844,7 +844,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
mi_segment_t* segment;
|
||||
do {
|
||||
segment = (mi_segment_t*)abandoned;
|
||||
} while(segment != NULL && !mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, (mi_segment_t*)segment->abandoned_next, segment));
|
||||
} while(segment != NULL && !mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&abandoned), (mi_segment_t*)segment->abandoned_next, segment));
|
||||
if (segment==NULL) break; // stop early if no more segments available
|
||||
|
||||
// got it.
|
||||
|
@ -960,7 +960,7 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_
|
|||
#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8)
|
||||
#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE)
|
||||
|
||||
static volatile uintptr_t mi_segment_map[MI_SEGMENT_MAP_WSIZE]; // 2KiB per TB with 64MiB segments
|
||||
static volatile _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE]; // 2KiB per TB with 64MiB segments
|
||||
|
||||
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
|
||||
mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE?
|
||||
|
@ -979,7 +979,7 @@ static void mi_segment_map_allocated_at(const mi_segment_t* segment) {
|
|||
do {
|
||||
mask = mi_segment_map[index];
|
||||
newmask = (mask | ((uintptr_t)1 << bitidx));
|
||||
} while (!mi_atomic_compare_exchange(&mi_segment_map[index], newmask, mask));
|
||||
} while (!mi_atomic_cas_weak(&mi_segment_map[index], newmask, mask));
|
||||
}
|
||||
|
||||
static void mi_segment_map_freed_at(const mi_segment_t* segment) {
|
||||
|
@ -992,7 +992,7 @@ static void mi_segment_map_freed_at(const mi_segment_t* segment) {
|
|||
do {
|
||||
mask = mi_segment_map[index];
|
||||
newmask = (mask & ~((uintptr_t)1 << bitidx));
|
||||
} while (!mi_atomic_compare_exchange(&mi_segment_map[index], newmask, mask));
|
||||
} while (!mi_atomic_cas_weak(&mi_segment_map[index], newmask, mask));
|
||||
}
|
||||
|
||||
// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue