Commit 85d3f301 authored by Aaryaman Sagar's avatar Aaryaman Sagar Committed by Facebook Github Bot

Pessimize DistributedMutex::try_lock, update comments and benchmarks

Summary:
A few changes:

- Pessimize DistributedMutex::try_lock() to do a load on the mutex before
  attempting a lock bts.  This allows us to be effectively parallel in the case
  where acquiring the mutex is not a hard requirement for the application.
  Whenever perf-sensitive try_lock()s come up, they tend to fit this model -
  where acquiring the mutex allows one thread to make progress, but missing the
  lock doesn't lead to incorrectness.  It mostly does not hurt performance, so
  seems like a good thing to have for all cases.
- More unit tests that test progress in the presence of many locks
- Also update comments to be slightly more clear, in particular add a small
  comparison with std::atomic in the first few sentences, and
- Rerun benchmarks on both broadwell and skylake

Reviewed By: djwatson

Differential Revision: D15378164

fbshipit-source-id: 124db4052dadae102a5c04ad9b9f3bcbefe74458
parent 5b35fe63
......@@ -683,7 +683,7 @@ bool spin(Waiter& waiter, std::uint32_t& sig, std::uint32_t mode) {
// signal or a signal to wake up
auto skipped = (signal == kSkipped);
if (skipped || (signal == kWake) || (signal == kCombined)) {
sig = signal;
sig = static_cast<std::uint32_t>(signal);
return !skipped;
}
......@@ -896,6 +896,40 @@ DistributedMutex<Atomic, TimePublishing>::try_lock_combine_until(
return folly::none;
}
template <typename Atomic, template <typename> class A, bool T>
auto tryLockNoLoad(Atomic& atomic, DistributedMutex<A, T>&) {
// Try and set the least significant bit of the centralized lock state to 1,
// if this succeeds, it must have been the case that we had a kUnlocked (or
// 0) in the central storage before, since that is the only case where a 0
// can be found in the least significant bit
//
// If this fails, then it is a no-op
using Proxy = typename DistributedMutex<A, T>::DistributedMutexStateProxy;
auto previous = atomic_fetch_set(atomic, 0, std::memory_order_acquire);
if (!previous) {
return Proxy{nullptr, kLocked};
}
return Proxy{nullptr, 0};
}
template <template <typename> class Atomic, bool TimePublishing>
typename DistributedMutex<Atomic, TimePublishing>::DistributedMutexStateProxy
DistributedMutex<Atomic, TimePublishing>::try_lock() {
// The lock attempt below requires an expensive atomic fetch-and-mutate or
// an even more expensive atomic compare-and-swap loop depending on the
// platform. These operations require pulling the lock cacheline into the
// current core in exclusive mode and are therefore hard to parallelize
//
// This probabilistically avoids the expense by first checking whether the
// mutex is currently locked
if (state_.load(std::memory_order_relaxed) != kUnlocked) {
return DistributedMutexStateProxy{nullptr, 0};
}
return tryLockNoLoad(state_, *this);
}
template <
template <typename> class Atomic,
bool TimePublishing,
......@@ -916,7 +950,7 @@ lockImplementation(
// that case as it causes an extra cacheline bounce
constexpr auto combineRequested = !std::is_same<Request, std::nullptr_t>{};
if (!combineRequested) {
if (auto state = mutex.try_lock()) {
if (auto state = tryLockNoLoad(atomic, mutex)) {
return state;
}
}
......@@ -1358,25 +1392,6 @@ void DistributedMutex<Atomic, Publish>::unlock(
}
}
template <template <typename> class Atomic, bool TimePublishing>
typename DistributedMutex<Atomic, TimePublishing>::DistributedMutexStateProxy
DistributedMutex<Atomic, TimePublishing>::try_lock() {
// Try and set the least significant bit of the centralized lock state to 1,
// indicating locked.
//
// If this succeeds, it must have been the case that we had a kUnlocked (or
// 0) in the centralized storage before, since that is the only case where a
// 0 can be found. So we assert that in debug mode
//
// If this fails, then it is a no-op
auto previous = atomic_fetch_set(state_, 0, std::memory_order_acquire);
if (!previous) {
return {nullptr, kLocked};
}
return {nullptr, 0};
}
template <typename Atomic, typename Deadline, typename MakeProxy>
auto timedLock(Atomic& state, Deadline deadline, MakeProxy proxy) {
while (true) {
......
This diff is collapsed.
......@@ -1648,4 +1648,111 @@ TEST(DistributedMutex, TestAppropriateDestructionAndConstructionWithCombine) {
thread.join();
}
namespace {
template <template <typename> class Atom = std::atomic>
void concurrentLocksManyMutexes(int numThreads, std::chrono::seconds duration) {
using DMutex = detail::distributed_mutex::DistributedMutex<Atom>;
const auto&& kNumMutexes = 10;
auto&& threads = std::vector<std::thread>{};
auto&& mutexes = std::vector<DMutex>(kNumMutexes);
auto&& barriers = std::vector<std::atomic<std::uint64_t>>(kNumMutexes);
auto&& stop = std::atomic<bool>{false};
for (auto i = 0; i < numThreads; ++i) {
threads.push_back(DSched::thread([&] {
auto&& total = std::atomic<std::uint64_t>{0};
auto&& expected = std::uint64_t{0};
for (auto j = 0; !stop.load(std::memory_order_relaxed); ++j) {
auto& mutex = mutexes[j % kNumMutexes];
auto& barrier = barriers[j % kNumMutexes];
++expected;
auto result = mutex.lock_combine([&]() {
EXPECT_EQ(barrier.fetch_add(1, std::memory_order_relaxed), 0);
std::this_thread::yield();
EXPECT_EQ(barrier.fetch_sub(1, std::memory_order_relaxed), 1);
return total.fetch_add(1, std::memory_order_relaxed);
});
EXPECT_EQ(result, expected - 1);
}
EXPECT_EQ(total.load(), expected);
}));
}
/* sleep override */
std::this_thread::sleep_for(duration);
stop.store(true);
for (auto& thread : threads) {
DSched::join(thread);
}
}
} // namespace
TEST(DistributedMutex, StressWithManyMutexesAlternatingTwoThreads) {
concurrentLocksManyMutexes(
2, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, StressWithManyMutexesAlternatingFourThreads) {
concurrentLocksManyMutexes(
4, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, StressWithManyMutexesAlternatingEightThreads) {
concurrentLocksManyMutexes(
8, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, StressWithManyMutexesAlternatingSixteenThreads) {
concurrentLocksManyMutexes(
16, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, StressWithManyMutexesAlternatingThirtyTwoThreads) {
concurrentLocksManyMutexes(
32, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, StressWithManyMutexesAlternatingSixtyFourThreads) {
concurrentLocksManyMutexes(
64, std::chrono::seconds{FLAGS_stress_test_seconds});
}
namespace {
void concurrentLocksManyMutexesDeterministic(
int threads,
std::chrono::seconds t) {
const auto kNumPasses = 3.0;
const auto seconds = std::ceil(static_cast<double>(t.count()) / kNumPasses);
const auto time = std::chrono::seconds{static_cast<std::uint64_t>(seconds)};
for (auto pass = 0; pass < kNumPasses; ++pass) {
auto&& schedule = DSched{DSched::uniform(pass)};
concurrentLocksManyMutexes<test::DeterministicAtomic>(threads, time);
static_cast<void>(schedule);
}
}
} // namespace
TEST(DistributedMutex, DeterministicWithManyMutexesAlternatingTwoThreads) {
concurrentLocksManyMutexesDeterministic(
2, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, DeterministicWithManyMutexesAlternatingFourThreads) {
concurrentLocksManyMutexesDeterministic(
4, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, DeterministicWithManyMutexesAlternatingEightThreads) {
concurrentLocksManyMutexesDeterministic(
8, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, DeterministicWithManyMutexesAlternatingSixteenThreads) {
concurrentLocksManyMutexesDeterministic(
16, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, DeterministicWithManyMtxAlternatingThirtyTwoThreads) {
concurrentLocksManyMutexesDeterministic(
32, std::chrono::seconds{FLAGS_stress_test_seconds});
}
TEST(DistributedMutex, DeterministicWithManyMtxAlternatingSixtyFourThreads) {
concurrentLocksManyMutexesDeterministic(
64, std::chrono::seconds{FLAGS_stress_test_seconds});
}
} // namespace folly
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment