Commit 4856057b authored by Rui Zhang's avatar Rui Zhang Committed by Facebook Github Bot

Fix cacheline padding in folly/test/SharedMutexTest with alignment.

Summary: The cacheline padding in folly/test/SharedMutexTest is not actually functional since the compiler is free to rearrange and optimize away local variables. This diff fixes the cacheline padding by aligning the false-sharing critical values.

Reviewed By: nbronson

Differential Revision: D15523241

fbshipit-source-id: c51b1e4cd78ab33f75fa93faf0a0346ec602f97c
parent 47ef08c0
...@@ -528,12 +528,12 @@ struct PosixMutex { ...@@ -528,12 +528,12 @@ struct PosixMutex {
template <template <typename> class Atom, typename Lock, typename Locker> template <template <typename> class Atom, typename Lock, typename Locker>
static void static void
runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) { runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) {
char padding1[64]; struct alignas(hardware_destructive_interference_size)
(void)padding1; GlobalLockAndProtectedValue {
Lock globalLock; Lock globalLock;
int valueProtectedByLock = 10; int valueProtectedByLock = 10;
char padding2[64]; };
(void)padding2; GlobalLockAndProtectedValue padded;
Atom<bool> go(false); Atom<bool> go(false);
Atom<bool>* goPtr = &go; // workaround for clang bug Atom<bool>* goPtr = &go; // workaround for clang bug
vector<thread> threads(numThreads); vector<thread> threads(numThreads);
...@@ -542,7 +542,7 @@ runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) { ...@@ -542,7 +542,7 @@ runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) {
for (size_t t = 0; t < numThreads; ++t) { for (size_t t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread([&, t, numThreads] { threads[t] = DSched::thread([&, t, numThreads] {
Lock privateLock; Lock privateLock;
Lock* lock = useSeparateLocks ? &privateLock : &globalLock; Lock* lock = useSeparateLocks ? &privateLock : &(padded.globalLock);
Locker locker; Locker locker;
while (!goPtr->load()) { while (!goPtr->load()) {
this_thread::yield(); this_thread::yield();
...@@ -552,7 +552,7 @@ runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) { ...@@ -552,7 +552,7 @@ runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) {
// note: folly::doNotOptimizeAway reads and writes to its arg, // note: folly::doNotOptimizeAway reads and writes to its arg,
// so the following two lines are very different than a call // so the following two lines are very different than a call
// to folly::doNotOptimizeAway(valueProtectedByLock); // to folly::doNotOptimizeAway(valueProtectedByLock);
auto copy = valueProtectedByLock; auto copy = padded.valueProtectedByLock;
folly::doNotOptimizeAway(copy); folly::doNotOptimizeAway(copy);
locker.unlock_shared(lock); locker.unlock_shared(lock);
} }
...@@ -620,12 +620,12 @@ static void runMixed( ...@@ -620,12 +620,12 @@ static void runMixed(
size_t numThreads, size_t numThreads,
double writeFraction, double writeFraction,
bool useSeparateLocks) { bool useSeparateLocks) {
char padding1[64]; struct alignas(hardware_destructive_interference_size)
(void)padding1; GlobalLockAndProtectedValue {
Lock globalLock; Lock globalLock;
int valueProtectedByLock = 0; int valueProtectedByLock = 0;
char padding2[64]; };
(void)padding2; GlobalLockAndProtectedValue padded;
Atom<bool> go(false); Atom<bool> go(false);
Atom<bool>* goPtr = &go; // workaround for clang bug Atom<bool>* goPtr = &go; // workaround for clang bug
vector<thread> threads(numThreads); vector<thread> threads(numThreads);
...@@ -637,7 +637,7 @@ static void runMixed( ...@@ -637,7 +637,7 @@ static void runMixed(
srand48_r(t, &buffer); srand48_r(t, &buffer);
long writeThreshold = writeFraction * 0x7fffffff; long writeThreshold = writeFraction * 0x7fffffff;
Lock privateLock; Lock privateLock;
Lock* lock = useSeparateLocks ? &privateLock : &globalLock; Lock* lock = useSeparateLocks ? &privateLock : &(padded.globalLock);
Locker locker; Locker locker;
while (!goPtr->load()) { while (!goPtr->load()) {
this_thread::yield(); this_thread::yield();
...@@ -649,12 +649,12 @@ static void runMixed( ...@@ -649,12 +649,12 @@ static void runMixed(
if (writeOp) { if (writeOp) {
locker.lock(lock); locker.lock(lock);
if (!useSeparateLocks) { if (!useSeparateLocks) {
++valueProtectedByLock; ++(padded.valueProtectedByLock);
} }
locker.unlock(lock); locker.unlock(lock);
} else { } else {
locker.lock_shared(lock); locker.lock_shared(lock);
auto v = valueProtectedByLock; auto v = padded.valueProtectedByLock;
folly::doNotOptimizeAway(v); folly::doNotOptimizeAway(v);
locker.unlock_shared(lock); locker.unlock_shared(lock);
} }
...@@ -1300,21 +1300,19 @@ static void burn(size_t n) { ...@@ -1300,21 +1300,19 @@ static void burn(size_t n) {
// in turn with reader/writer conflict // in turn with reader/writer conflict
template <typename Lock, template <typename> class Atom = atomic> template <typename Lock, template <typename> class Atom = atomic>
static void runPingPong(size_t numRounds, size_t burnCount) { static void runPingPong(size_t numRounds, size_t burnCount) {
char padding1[56]; struct alignas(hardware_destructive_interference_size) PaddedLock {
(void)padding1; Lock lock_;
pair<Lock, char[56]> locks[3]; };
char padding2[56]; array<PaddedLock, 3> paddedLocks;
(void)padding2;
Atom<int> avail(0); Atom<int> avail(0);
auto availPtr = &avail; // workaround for clang crash auto availPtr = &avail; // workaround for clang crash
Atom<bool> go(false); Atom<bool> go(false);
auto goPtr = &go; // workaround for clang crash auto goPtr = &go; // workaround for clang crash
vector<thread> threads(2); vector<thread> threads(2);
locks[0].first.lock(); paddedLocks[0].lock_.lock();
locks[1].first.lock(); paddedLocks[1].lock_.lock();
locks[2].first.lock_shared(); paddedLocks[2].lock_.lock_shared();
BENCHMARK_SUSPEND { BENCHMARK_SUSPEND {
threads[0] = DSched::thread([&] { threads[0] = DSched::thread([&] {
...@@ -1323,8 +1321,8 @@ static void runPingPong(size_t numRounds, size_t burnCount) { ...@@ -1323,8 +1321,8 @@ static void runPingPong(size_t numRounds, size_t burnCount) {
this_thread::yield(); this_thread::yield();
} }
for (size_t i = 0; i < numRounds; ++i) { for (size_t i = 0; i < numRounds; ++i) {
locks[i % 3].first.unlock(); paddedLocks[i % 3].lock_.unlock();
locks[(i + 2) % 3].first.lock(); paddedLocks[(i + 2) % 3].lock_.lock();
burn(burnCount); burn(burnCount);
} }
}); });
...@@ -1334,9 +1332,9 @@ static void runPingPong(size_t numRounds, size_t burnCount) { ...@@ -1334,9 +1332,9 @@ static void runPingPong(size_t numRounds, size_t burnCount) {
this_thread::yield(); this_thread::yield();
} }
for (size_t i = 0; i < numRounds; ++i) { for (size_t i = 0; i < numRounds; ++i) {
locks[i % 3].first.lock_shared(); paddedLocks[i % 3].lock_.lock_shared();
burn(burnCount); burn(burnCount);
locks[(i + 2) % 3].first.unlock_shared(); paddedLocks[(i + 2) % 3].lock_.unlock_shared();
} }
}); });
...@@ -1349,9 +1347,9 @@ static void runPingPong(size_t numRounds, size_t burnCount) { ...@@ -1349,9 +1347,9 @@ static void runPingPong(size_t numRounds, size_t burnCount) {
for (auto& thr : threads) { for (auto& thr : threads) {
DSched::join(thr); DSched::join(thr);
} }
locks[numRounds % 3].first.unlock(); paddedLocks[numRounds % 3].lock_.unlock();
locks[(numRounds + 1) % 3].first.unlock(); paddedLocks[(numRounds + 1) % 3].lock_.unlock();
locks[(numRounds + 2) % 3].first.unlock_shared(); paddedLocks[(numRounds + 2) % 3].lock_.unlock_shared();
} }
static void folly_rwspin_ping_pong(size_t n, size_t scale, size_t burnCount) { static void folly_rwspin_ping_pong(size_t n, size_t scale, size_t burnCount) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment