mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-20 14:09:32 +03:00
Implement appropriate atomic_yield for Armv7+
Spin-wait often involves active sleep (better known as "pause"). x86 has a direct assembly instruction named "PAUSE" which has two roles: one is to hint at the operating systme that it might be ready to be swapped out, and the other is to create a small delay. That delay is useful as backoff from attempting to capture spinlocks, which improves the behavior of the system and allows more efficient lock acquisition. However, the "yield" instruction is not a good fit because it is effectively a nop on most Arm cores and does not cause enough delay to help backoff. The "isb" instruction is a barrier that, especially inside a loop, creates a small delay without consuming ALU resources.
This commit is contained in:
parent
766f1f9345
commit
1966db91b7
1 changed files with 3 additions and 7 deletions
|
@ -293,19 +293,15 @@ static inline void mi_atomic_yield(void) {
|
|||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
asm volatile("wfe");
|
||||
}
|
||||
#elif (defined(__arm__) && __ARM_ARCH__ >= 7)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("yield" ::: "memory");
|
||||
asm volatile("isb" ::: "memory");
|
||||
}
|
||||
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
|
||||
}
|
||||
#elif defined(__armel__) || defined(__ARMEL__)
|
||||
#elif defined(__arm__) /* Arm cores prior to Armv7-A */
|
||||
static inline void mi_atomic_yield(void) {
|
||||
asm volatile ("nop" ::: "memory");
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue