replace atomics with C11/C++ atomics with explicit memory order; passes tsan. Issue #130

This commit is contained in:
daan 2020-07-26 18:00:38 -07:00
parent a468430772
commit ef8e5d18a6
14 changed files with 248 additions and 315 deletions

View file

@ -270,11 +270,11 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
void* p = NULL;
if ((large_only || use_large_os_page(size, try_alignment))
&& allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
uintptr_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
if (!large_only && try_ok > 0) {
// if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
// therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
mi_atomic_cas_strong(&large_page_try_ok, &try_ok, try_ok - 1);
mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
}
else {
// large OS pages must always reserve and commit.
@ -283,7 +283,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
if (large_only) return p;
// fall back to non-large page allocation on error (`p == NULL`).
if (p == NULL) {
mi_atomic_write(&large_page_try_ok,10); // on error, don't try again for the next N allocations
mi_atomic_store_release(&large_page_try_ok,10); // on error, don't try again for the next N allocations
}
}
}
@ -361,13 +361,13 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#endif
if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) {
static _Atomic(uintptr_t) large_page_try_ok; // = 0;
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
uintptr_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
if (!large_only && try_ok > 0) {
// If the OS is not configured for large OS pages, or the user does not have
// enough permission, the `mmap` will always fail (but it might also fail for other reasons).
// Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
// to avoid too many failing calls to mmap.
mi_atomic_cas_strong(&large_page_try_ok, &try_ok, try_ok - 1);
mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
}
else {
int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
@ -407,7 +407,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#endif
if (large_only) return p;
if (p == NULL) {
mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations
mi_atomic_store_release(&large_page_try_ok, 10); // on error, don't try again for the next N allocations
}
}
}
@ -455,7 +455,7 @@ static mi_decl_cache_align _Atomic(uintptr_t) aligned_base;
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
if (try_alignment == 0 || try_alignment > MI_SEGMENT_SIZE) return NULL;
if ((size%MI_SEGMENT_SIZE) != 0) return NULL;
uintptr_t hint = mi_atomic_add(&aligned_base, size);
uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size);
if (hint == 0 || hint > ((intptr_t)30<<40)) { // try to wrap around after 30TiB (area after 32TiB is used for huge OS pages)
uintptr_t init = ((uintptr_t)4 << 40); // start at 4TiB area
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
@ -463,8 +463,8 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
init = init + (MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)); // (randomly 20 bits)*4MiB == 0 to 4TiB
#endif
uintptr_t expected = hint + size;
mi_atomic_cas_strong(&aligned_base, &expected, init);
hint = mi_atomic_add(&aligned_base, size); // this may still give 0 or > 30TiB but that is ok, it is a hint after all
mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init);
hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > 30TiB but that is ok, it is a hint after all
}
if (hint%try_alignment != 0) return NULL;
return (void*)hint;
@ -760,10 +760,10 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
#else
#if defined(MADV_FREE)
static _Atomic(uintptr_t) advice = ATOMIC_VAR_INIT(MADV_FREE);
int err = madvise(start, csize, (int)mi_atomic_read_relaxed(&advice));
int err = madvise(start, csize, (int)mi_atomic_load_relaxed(&advice));
if (err != 0 && errno == EINVAL && advice == MADV_FREE) {
// if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
mi_atomic_write(&advice, MADV_DONTNEED);
mi_atomic_store_release(&advice, MADV_DONTNEED);
err = madvise(start, csize, MADV_DONTNEED);
}
#elif defined(__wasi__)
@ -970,7 +970,7 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
uintptr_t start = 0;
uintptr_t end = 0;
uintptr_t huge_start = mi_atomic_read_relaxed(&mi_huge_start);
uintptr_t huge_start = mi_atomic_load_relaxed(&mi_huge_start);
do {
start = huge_start;
if (start == 0) {
@ -983,7 +983,7 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
}
end = start + size;
mi_assert_internal(end % MI_SEGMENT_SIZE == 0);
} while (!mi_atomic_cas_strong(&mi_huge_start, &huge_start, end));
} while (!mi_atomic_cas_strong_acq_rel(&mi_huge_start, &huge_start, end));
if (total_size != NULL) *total_size = size;
return (uint8_t*)start;