fix msvc compilation in C mode

This commit is contained in:
daan 2020-09-05 09:37:09 -07:00
parent 50de0d2358
commit 2e311f341b
4 changed files with 19 additions and 3 deletions

View file

@ -257,4 +257,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">
</ImportGroup> </ImportGroup>
</Project> </Project>

View file

@ -131,18 +131,23 @@ typedef enum mi_memory_order_e {
} mi_memory_order; } mi_memory_order;
static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)* p, uintptr_t add, mi_memory_order mo) { static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)* p, uintptr_t add, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add); return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
} }
static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) { static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub)); return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
} }
static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)* p, uintptr_t x, mi_memory_order mo) { static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)* p, uintptr_t x, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x); return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
} }
static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)* p, uintptr_t x, mi_memory_order mo) { static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)* p, uintptr_t x, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x); return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
} }
static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)* p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)* p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
(void)(mo1); (void)(mo2);
uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected)); uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
if (read == *expected) { if (read == *expected) {
return true; return true;
@ -156,13 +161,16 @@ static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p
return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2); return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
} }
static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)* p, uintptr_t exchange, mi_memory_order mo) { static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)* p, uintptr_t exchange, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange); return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
} }
static inline mi_atomic_thread_fence(mi_memory_order mo) { static inline void mi_atomic_thread_fence(mi_memory_order mo) {
(void)(mo);
_Atomic(uintptr_t)x = 0; _Atomic(uintptr_t)x = 0;
mi_atomic_exchange_explicit(&x, 1, mo); mi_atomic_exchange_explicit(&x, 1, mo);
} }
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) { static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
(void)(mo);
#if defined(_M_IX86) || defined(_M_X64) #if defined(_M_IX86) || defined(_M_X64)
return *p; return *p;
#else #else
@ -174,6 +182,7 @@ static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_
#endif #endif
} }
static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)* p, uintptr_t x, mi_memory_order mo) { static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)* p, uintptr_t x, mi_memory_order mo) {
(void)(mo);
#if defined(_M_IX86) || defined(_M_X64) #if defined(_M_IX86) || defined(_M_X64)
*p = x; *p = x;
#else #else

View file

@ -12,6 +12,10 @@ terms of the MIT license. A copy of the license can be found in the file
#include <stdint.h> // uintptr_t, uint16_t, etc #include <stdint.h> // uintptr_t, uint16_t, etc
#include <mimalloc-atomic.h> // _Atomic #include <mimalloc-atomic.h> // _Atomic
#ifdef _MSC_VER
#pragma warning(disable:4214) // bitfield is not int
#endif
// Minimal alignment necessary. On most platforms 16 bytes are needed // Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE` // due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
#ifndef MI_MAX_ALIGN_SIZE #ifndef MI_MAX_ALIGN_SIZE

View file

@ -550,6 +550,9 @@ static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t*
// Visit all blocks in a heap // Visit all blocks in a heap
bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; mi_visit_blocks_args_t args = { 0 };
args.visit_blocks = visit_blocks;
args.visitor = visitor;
args.arg = arg;
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
} }