Commit 6457c442 authored by Ananth Jasty's avatar Ananth Jasty Committed by Sara Golemon

ARM64 assembler fixes for Folly.

Summary: Wrap asm("pause") in an inline so that it becomes
asm("wfe") on aarch64.

Closes #187
Closes #190

Reviewed By: @yfeldblum

Differential Revision: D2152868

Pulled By: @sgolemon
parent 372fad51
...@@ -273,13 +273,11 @@ struct Baton : boost::noncopyable { ...@@ -273,13 +273,11 @@ struct Baton : boost::noncopyable {
// hooray! // hooray!
return true; return true;
} }
#if FOLLY_X64
// The pause instruction is the polite way to spin, but it doesn't // The pause instruction is the polite way to spin, but it doesn't
// actually affect correctness to omit it if we don't have it. // actually affect correctness to omit it if we don't have it.
// Pausing donates the full capabilities of the current core to // Pausing donates the full capabilities of the current core to
// its other hyperthreads for a dozen cycles or so // its other hyperthreads for a dozen cycles or so
asm volatile ("pause"); asm_volatile_pause();
#endif
} }
return false; return false;
......
...@@ -120,6 +120,12 @@ struct MaxAlign { char c; } __attribute__((__aligned__)); ...@@ -120,6 +120,12 @@ struct MaxAlign { char c; } __attribute__((__aligned__));
# define FOLLY_X64 0 # define FOLLY_X64 0
#endif #endif
#if defined(__aarch64__)
# define FOLLY_A64 1
#else
# define FOLLY_A64 0
#endif
// packing is very ugly in msvc // packing is very ugly in msvc
#ifdef _MSC_VER #ifdef _MSC_VER
# define FOLLY_PACK_ATTR /**/ # define FOLLY_PACK_ATTR /**/
...@@ -278,4 +284,23 @@ inline size_t malloc_usable_size(void* ptr) { ...@@ -278,4 +284,23 @@ inline size_t malloc_usable_size(void* ptr) {
# define FOLLY_HAS_RTTI 1 # define FOLLY_HAS_RTTI 1
#endif #endif
namespace folly {
inline void asm_volatile_pause() {
#if defined(__i386__) || FOLLY_X64
asm volatile ("pause");
#elif FOLLY_A64
asm volatile ("wfe");
#endif
}
inline void asm_pause() {
#if defined(__i386__) || FOLLY_X64
asm ("pause");
#elif FOLLY_A64
asm ("wfe");
#endif
}
}
#endif // FOLLY_PORTABILITY_H_ #endif // FOLLY_PORTABILITY_H_
...@@ -587,7 +587,7 @@ class RWTicketSpinLockT : boost::noncopyable { ...@@ -587,7 +587,7 @@ class RWTicketSpinLockT : boost::noncopyable {
int count = 0; int count = 0;
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1); QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) { while (val != load_acquire(&ticket.write)) {
asm volatile("pause"); asm_volatile_pause();
if (UNLIKELY(++count > 1000)) sched_yield(); if (UNLIKELY(++count > 1000)) sched_yield();
} }
} }
...@@ -636,7 +636,7 @@ class RWTicketSpinLockT : boost::noncopyable { ...@@ -636,7 +636,7 @@ class RWTicketSpinLockT : boost::noncopyable {
// need to let threads that already have a shared lock complete // need to let threads that already have a shared lock complete
int count = 0; int count = 0;
while (!LIKELY(try_lock_shared())) { while (!LIKELY(try_lock_shared())) {
asm volatile("pause"); asm_volatile_pause();
if (UNLIKELY((++count & 1023) == 0)) sched_yield(); if (UNLIKELY((++count & 1023) == 0)) sched_yield();
} }
} }
......
...@@ -796,9 +796,7 @@ class SharedMutexImpl { ...@@ -796,9 +796,7 @@ class SharedMutexImpl {
if ((state & goal) == 0) { if ((state & goal) == 0) {
return true; return true;
} }
#if FOLLY_X64 asm_volatile_pause();
asm volatile("pause");
#endif
++spinCount; ++spinCount;
if (UNLIKELY(spinCount >= kMaxSpinCount)) { if (UNLIKELY(spinCount >= kMaxSpinCount)) {
return ctx.canBlock() && return ctx.canBlock() &&
...@@ -956,9 +954,7 @@ class SharedMutexImpl { ...@@ -956,9 +954,7 @@ class SharedMutexImpl {
return; return;
} }
} }
#if FOLLY_X64 asm_pause();
asm("pause");
#endif
if (UNLIKELY(++spinCount >= kMaxSpinCount)) { if (UNLIKELY(++spinCount >= kMaxSpinCount)) {
applyDeferredReaders(state, ctx, slot); applyDeferredReaders(state, ctx, slot);
return; return;
......
...@@ -47,8 +47,8 @@ ...@@ -47,8 +47,8 @@
#include <glog/logging.h> #include <glog/logging.h>
#include <folly/Portability.h> #include <folly/Portability.h>
#if !FOLLY_X64 #if !FOLLY_X64 && !FOLLY_A64
# error "SmallLocks.h is currently x64-only." # error "SmallLocks.h is currently x64 and aarch64 only."
#endif #endif
namespace folly { namespace folly {
...@@ -72,7 +72,7 @@ namespace detail { ...@@ -72,7 +72,7 @@ namespace detail {
void wait() { void wait() {
if (spinCount < kMaxActiveSpin) { if (spinCount < kMaxActiveSpin) {
++spinCount; ++spinCount;
asm volatile("pause"); asm_volatile_pause();
} else { } else {
/* /*
* Always sleep 0.5ms, assuming this will make the kernel put * Always sleep 0.5ms, assuming this will make the kernel put
...@@ -217,6 +217,7 @@ struct PicoSpinLock { ...@@ -217,6 +217,7 @@ struct PicoSpinLock {
bool try_lock() const { bool try_lock() const {
bool ret = false; bool ret = false;
#if FOLLY_X64
#define FB_DOBTS(size) \ #define FB_DOBTS(size) \
asm volatile("lock; bts" #size " %1, (%2); setnc %0" \ asm volatile("lock; bts" #size " %1, (%2); setnc %0" \
: "=r" (ret) \ : "=r" (ret) \
...@@ -231,6 +232,11 @@ struct PicoSpinLock { ...@@ -231,6 +232,11 @@ struct PicoSpinLock {
} }
#undef FB_DOBTS #undef FB_DOBTS
#elif FOLLY_A64
ret = __atomic_fetch_or(&lock_, 1 << Bit, __ATOMIC_SEQ_CST);
#else
#error "x86 aarch64 only"
#endif
return ret; return ret;
} }
...@@ -250,6 +256,7 @@ struct PicoSpinLock { ...@@ -250,6 +256,7 @@ struct PicoSpinLock {
* integer. * integer.
*/ */
void unlock() const { void unlock() const {
#if FOLLY_X64
#define FB_DOBTR(size) \ #define FB_DOBTR(size) \
asm volatile("lock; btr" #size " %0, (%1)" \ asm volatile("lock; btr" #size " %0, (%1)" \
: \ : \
...@@ -267,6 +274,11 @@ struct PicoSpinLock { ...@@ -267,6 +274,11 @@ struct PicoSpinLock {
} }
#undef FB_DOBTR #undef FB_DOBTR
#elif FOLLY_A64
__atomic_fetch_and(&lock_, ~(1 << Bit), __ATOMIC_SEQ_CST);
#else
# error "x64 aarch64 only"
#endif
} }
}; };
......
...@@ -124,9 +124,7 @@ struct TurnSequencer { ...@@ -124,9 +124,7 @@ struct TurnSequencer {
// the first effectSpinCutoff tries are spins, after that we will // the first effectSpinCutoff tries are spins, after that we will
// record ourself as a waiter and block with futexWait // record ourself as a waiter and block with futexWait
if (tries < effectiveSpinCutoff) { if (tries < effectiveSpinCutoff) {
#if defined(__i386__) || FOLLY_X64 asm_volatile_pause();
asm volatile ("pause");
#endif
continue; continue;
} }
......
...@@ -65,13 +65,11 @@ bool Baton::spinWaitForEarlyPost() { ...@@ -65,13 +65,11 @@ bool Baton::spinWaitForEarlyPost() {
// hooray! // hooray!
return true; return true;
} }
#if FOLLY_X64
// The pause instruction is the polite way to spin, but it doesn't // The pause instruction is the polite way to spin, but it doesn't
// actually affect correctness to omit it if we don't have it. // actually affect correctness to omit it if we don't have it.
// Pausing donates the full capabilities of the current core to // Pausing donates the full capabilities of the current core to
// its other hyperthreads for a dozen cycles or so // its other hyperthreads for a dozen cycles or so
asm volatile ("pause"); asm_volatile_pause();
#endif
} }
return false; return false;
......
...@@ -60,7 +60,7 @@ void splock_test() { ...@@ -60,7 +60,7 @@ void splock_test() {
const int max = 1000; const int max = 1000;
unsigned int seed = (uintptr_t)pthread_self(); unsigned int seed = (uintptr_t)pthread_self();
for (int i = 0; i < max; i++) { for (int i = 0; i < max; i++) {
asm("pause"); folly::asm_pause();
MSLGuard g(v.lock); MSLGuard g(v.lock);
int first = v.ar[0]; int first = v.ar[0];
...@@ -84,7 +84,7 @@ template<class T> struct PslTest { ...@@ -84,7 +84,7 @@ template<class T> struct PslTest {
std::lock_guard<PicoSpinLock<T>> guard(lock); std::lock_guard<PicoSpinLock<T>> guard(lock);
lock.setData(ourVal); lock.setData(ourVal);
for (int n = 0; n < 10; ++n) { for (int n = 0; n < 10; ++n) {
asm volatile("pause"); folly::asm_volatile_pause();
EXPECT_EQ(lock.getData(), ourVal); EXPECT_EQ(lock.getData(), ourVal);
} }
} }
......
...@@ -37,7 +37,7 @@ void spinlockTestThread(LockedVal<LOCK>* v) { ...@@ -37,7 +37,7 @@ void spinlockTestThread(LockedVal<LOCK>* v) {
const int max = 1000; const int max = 1000;
unsigned int seed = (uintptr_t)pthread_self(); unsigned int seed = (uintptr_t)pthread_self();
for (int i = 0; i < max; i++) { for (int i = 0; i < max; i++) {
asm("pause"); folly::asm_pause();
SpinLockGuardImpl<LOCK> g(v->lock); SpinLockGuardImpl<LOCK> g(v->lock);
int first = v->ar[0]; int first = v->ar[0];
...@@ -62,7 +62,7 @@ struct TryLockState { ...@@ -62,7 +62,7 @@ struct TryLockState {
template <typename LOCK> template <typename LOCK>
void trylockTestThread(TryLockState<LOCK>* state, size_t count) { void trylockTestThread(TryLockState<LOCK>* state, size_t count) {
while (true) { while (true) {
asm("pause"); folly::asm_pause();
SpinLockGuardImpl<LOCK> g(state->lock1); SpinLockGuardImpl<LOCK> g(state->lock1);
if (state->obtained >= count) { if (state->obtained >= count) {
break; break;
...@@ -81,7 +81,7 @@ void trylockTestThread(TryLockState<LOCK>* state, size_t count) { ...@@ -81,7 +81,7 @@ void trylockTestThread(TryLockState<LOCK>* state, size_t count) {
auto oldFailed = state->failed; auto oldFailed = state->failed;
while (state->failed == oldFailed && state->obtained < count) { while (state->failed == oldFailed && state->obtained < count) {
state->lock1.unlock(); state->lock1.unlock();
asm("pause"); folly::asm_pause();
state->lock1.lock(); state->lock1.lock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment