Commit 6457c442 authored by Ananth Jasty's avatar Ananth Jasty Committed by Sara Golemon

ARM64 assembler fixes for Folly.

Summary: Wrap asm("pause") in an inline so that it becomes
asm("wfe") on aarch64.

Closes #187
Closes #190

Reviewed By: @yfeldblum

Differential Revision: D2152868

Pulled By: @sgolemon
parent 372fad51
......@@ -273,13 +273,11 @@ struct Baton : boost::noncopyable {
// hooray!
return true;
}
#if FOLLY_X64
// The pause instruction is the polite way to spin, but it doesn't
// actually affect correctness to omit it if we don't have it.
// Pausing donates the full capabilities of the current core to
// its other hyperthreads for a dozen cycles or so
asm volatile ("pause");
#endif
asm_volatile_pause();
}
return false;
......
......@@ -120,6 +120,12 @@ struct MaxAlign { char c; } __attribute__((__aligned__));
# define FOLLY_X64 0
#endif
#if defined(__aarch64__)
# define FOLLY_A64 1
#else
# define FOLLY_A64 0
#endif
// packing is very ugly in msvc
#ifdef _MSC_VER
# define FOLLY_PACK_ATTR /**/
......@@ -278,4 +284,23 @@ inline size_t malloc_usable_size(void* ptr) {
# define FOLLY_HAS_RTTI 1
#endif
namespace folly {
inline void asm_volatile_pause() {
#if defined(__i386__) || FOLLY_X64
asm volatile ("pause");
#elif FOLLY_A64
asm volatile ("wfe");
#endif
}
inline void asm_pause() {
#if defined(__i386__) || FOLLY_X64
asm ("pause");
#elif FOLLY_A64
asm ("wfe");
#endif
}
}
#endif // FOLLY_PORTABILITY_H_
......@@ -587,7 +587,7 @@ class RWTicketSpinLockT : boost::noncopyable {
int count = 0;
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) {
asm volatile("pause");
asm_volatile_pause();
if (UNLIKELY(++count > 1000)) sched_yield();
}
}
......@@ -636,7 +636,7 @@ class RWTicketSpinLockT : boost::noncopyable {
// need to let threads that already have a shared lock complete
int count = 0;
while (!LIKELY(try_lock_shared())) {
asm volatile("pause");
asm_volatile_pause();
if (UNLIKELY((++count & 1023) == 0)) sched_yield();
}
}
......
......@@ -796,9 +796,7 @@ class SharedMutexImpl {
if ((state & goal) == 0) {
return true;
}
#if FOLLY_X64
asm volatile("pause");
#endif
asm_volatile_pause();
++spinCount;
if (UNLIKELY(spinCount >= kMaxSpinCount)) {
return ctx.canBlock() &&
......@@ -956,9 +954,7 @@ class SharedMutexImpl {
return;
}
}
#if FOLLY_X64
asm("pause");
#endif
asm_pause();
if (UNLIKELY(++spinCount >= kMaxSpinCount)) {
applyDeferredReaders(state, ctx, slot);
return;
......
......@@ -47,8 +47,8 @@
#include <glog/logging.h>
#include <folly/Portability.h>
#if !FOLLY_X64
# error "SmallLocks.h is currently x64-only."
#if !FOLLY_X64 && !FOLLY_A64
# error "SmallLocks.h is currently x64 and aarch64 only."
#endif
namespace folly {
......@@ -72,7 +72,7 @@ namespace detail {
void wait() {
if (spinCount < kMaxActiveSpin) {
++spinCount;
asm volatile("pause");
asm_volatile_pause();
} else {
/*
* Always sleep 0.5ms, assuming this will make the kernel put
......@@ -217,6 +217,7 @@ struct PicoSpinLock {
bool try_lock() const {
bool ret = false;
#if FOLLY_X64
#define FB_DOBTS(size) \
asm volatile("lock; bts" #size " %1, (%2); setnc %0" \
: "=r" (ret) \
......@@ -231,6 +232,11 @@ struct PicoSpinLock {
}
#undef FB_DOBTS
#elif FOLLY_A64
ret = __atomic_fetch_or(&lock_, 1 << Bit, __ATOMIC_SEQ_CST);
#else
#error "x86 aarch64 only"
#endif
return ret;
}
......@@ -250,6 +256,7 @@ struct PicoSpinLock {
* integer.
*/
void unlock() const {
#if FOLLY_X64
#define FB_DOBTR(size) \
asm volatile("lock; btr" #size " %0, (%1)" \
: \
......@@ -267,6 +274,11 @@ struct PicoSpinLock {
}
#undef FB_DOBTR
#elif FOLLY_A64
__atomic_fetch_and(&lock_, ~(1 << Bit), __ATOMIC_SEQ_CST);
#else
# error "x64 aarch64 only"
#endif
}
};
......
......@@ -124,9 +124,7 @@ struct TurnSequencer {
// the first effectSpinCutoff tries are spins, after that we will
// record ourself as a waiter and block with futexWait
if (tries < effectiveSpinCutoff) {
#if defined(__i386__) || FOLLY_X64
asm volatile ("pause");
#endif
asm_volatile_pause();
continue;
}
......
......@@ -65,13 +65,11 @@ bool Baton::spinWaitForEarlyPost() {
// hooray!
return true;
}
#if FOLLY_X64
// The pause instruction is the polite way to spin, but it doesn't
// actually affect correctness to omit it if we don't have it.
// Pausing donates the full capabilities of the current core to
// its other hyperthreads for a dozen cycles or so
asm volatile ("pause");
#endif
asm_volatile_pause();
}
return false;
......
......@@ -60,7 +60,7 @@ void splock_test() {
const int max = 1000;
unsigned int seed = (uintptr_t)pthread_self();
for (int i = 0; i < max; i++) {
asm("pause");
folly::asm_pause();
MSLGuard g(v.lock);
int first = v.ar[0];
......@@ -84,7 +84,7 @@ template<class T> struct PslTest {
std::lock_guard<PicoSpinLock<T>> guard(lock);
lock.setData(ourVal);
for (int n = 0; n < 10; ++n) {
asm volatile("pause");
folly::asm_volatile_pause();
EXPECT_EQ(lock.getData(), ourVal);
}
}
......
......@@ -37,7 +37,7 @@ void spinlockTestThread(LockedVal<LOCK>* v) {
const int max = 1000;
unsigned int seed = (uintptr_t)pthread_self();
for (int i = 0; i < max; i++) {
asm("pause");
folly::asm_pause();
SpinLockGuardImpl<LOCK> g(v->lock);
int first = v->ar[0];
......@@ -62,7 +62,7 @@ struct TryLockState {
template <typename LOCK>
void trylockTestThread(TryLockState<LOCK>* state, size_t count) {
while (true) {
asm("pause");
folly::asm_pause();
SpinLockGuardImpl<LOCK> g(state->lock1);
if (state->obtained >= count) {
break;
......@@ -81,7 +81,7 @@ void trylockTestThread(TryLockState<LOCK>* state, size_t count) {
auto oldFailed = state->failed;
while (state->failed == oldFailed && state->obtained < count) {
state->lock1.unlock();
asm("pause");
folly::asm_pause();
state->lock1.lock();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment