Commit 712b8b8d authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Remove multi-poster support from Baton

Summary:
[Folly] Remove multi-poster support from `Baton`.

Code which needs multi-poster support may use `SaturatingSemaphore` instead.

Reviewed By: magedm

Differential Revision: D6529661

fbshipit-source-id: d9dc053ca984ef3a404e9361910b0044817d4905
parent 90ce64f9
...@@ -29,22 +29,16 @@ ...@@ -29,22 +29,16 @@
namespace folly { namespace folly {
/// A Baton allows a thread to block once and be awoken. The single /// A Baton allows a thread to block once and be awoken. Captures a
/// poster version (with SinglePoster == true) captures a single /// single handoff, and during its lifecycle (from construction/reset
/// handoff, and during its lifecycle (from construction/reset to /// to destruction/reset) a baton must either be post()ed and wait()ed
/// destruction/reset) a baton must either be post()ed and wait()ed
/// exactly once each, or not at all. /// exactly once each, or not at all.
/// ///
/// The multi-poster version (SinglePoster == false) allows multiple
/// concurrent handoff attempts, the first of which completes the
/// handoff and the rest if any are idempotent.
///
/// Baton includes no internal padding, and is only 4 bytes in size. /// Baton includes no internal padding, and is only 4 bytes in size.
/// Any alignment or padding to avoid false sharing is up to the user. /// Any alignment or padding to avoid false sharing is up to the user.
/// ///
/// This is basically a stripped-down semaphore that supports (only a /// This is basically a stripped-down semaphore that supports only a
/// single call to sem_post, when SinglePoster == true) and a single /// single call to sem_post and a single call to sem_wait.
/// call to sem_wait.
/// ///
/// The non-blocking version (Blocking == false) provides more speed /// The non-blocking version (Blocking == false) provides more speed
/// by using only load acquire and store release operations in the /// by using only load acquire and store release operations in the
...@@ -58,7 +52,6 @@ namespace folly { ...@@ -58,7 +52,6 @@ namespace folly {
/// catch race conditions ahead of time. /// catch race conditions ahead of time.
template < template <
template <typename> class Atom = std::atomic, template <typename> class Atom = std::atomic,
bool SinglePoster = true, // single vs multiple posters
bool Blocking = true> // blocking vs spinning bool Blocking = true> // blocking vs spinning
struct Baton { struct Baton {
constexpr Baton() : state_(INIT) {} constexpr Baton() : state_(INIT) {}
...@@ -130,65 +123,24 @@ struct Baton { ...@@ -130,65 +123,24 @@ struct Baton {
/// Blocking versions /// Blocking versions
/// ///
if (SinglePoster) { uint32_t before = state_.load(std::memory_order_acquire);
/// Single poster version
///
uint32_t before = state_.load(std::memory_order_acquire);
assert(before == INIT || before == WAITING || before == TIMED_OUT); assert(before == INIT || before == WAITING || before == TIMED_OUT);
if (before == INIT && if (before == INIT &&
state_.compare_exchange_strong(before, EARLY_DELIVERY)) { state_.compare_exchange_strong(before, EARLY_DELIVERY)) {
return; return;
} }
assert(before == WAITING || before == TIMED_OUT);
if (before == TIMED_OUT) { assert(before == WAITING || before == TIMED_OUT);
return;
}
assert(before == WAITING); if (before == TIMED_OUT) {
state_.store(LATE_DELIVERY, std::memory_order_release); return;
state_.futexWake(1);
} else {
/// Multi-poster version
///
while (true) {
uint32_t before = state_.load(std::memory_order_acquire);
if (before == INIT &&
state_.compare_exchange_strong(before, EARLY_DELIVERY)) {
return;
}
if (before == TIMED_OUT) {
return;
}
if (before == EARLY_DELIVERY || before == LATE_DELIVERY) {
// The reason for not simply returning (without the following
// atomic operation) is to avoid the following case:
//
// T1: T2: T3:
// local1.post(); local2.post(); global.wait();
// global.post(); global.post(); local1.try_wait() == true;
// local2.try_wait() == false;
//
if (state_.fetch_add(0) != before) {
continue;
}
return;
}
assert(before == WAITING);
if (!state_.compare_exchange_weak(before, LATE_DELIVERY)) {
continue;
}
state_.futexWake(1);
return;
}
} }
assert(before == WAITING);
state_.store(LATE_DELIVERY, std::memory_order_release);
state_.futexWake(1);
} }
/// Waits until post() has been called in the current Baton lifetime. /// Waits until post() has been called in the current Baton lifetime.
......
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
#include <thread> #include <thread>
#include <folly/Benchmark.h> #include <folly/Benchmark.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <folly/portability/Semaphore.h> #include <folly/portability/Semaphore.h>
#include <folly/synchronization/test/BatonTestHelpers.h> #include <folly/synchronization/test/BatonTestHelpers.h>
#include <folly/test/DeterministicSchedule.h> #include <folly/test/DeterministicSchedule.h>
...@@ -31,38 +29,22 @@ using folly::detail::EmulatedFutexAtomic; ...@@ -31,38 +29,22 @@ using folly::detail::EmulatedFutexAtomic;
typedef DeterministicSchedule DSched; typedef DeterministicSchedule DSched;
BENCHMARK(baton_pingpong_single_poster_blocking, iters) { BENCHMARK(baton_pingpong_blocking, iters) {
run_pingpong_test<std::atomic, true, true>(iters); run_pingpong_test<std::atomic, true>(iters);
} }
BENCHMARK(baton_pingpong_multi_poster_blocking, iters) { BENCHMARK(baton_pingpong_nonblocking, iters) {
run_pingpong_test<std::atomic, false, true>(iters); run_pingpong_test<std::atomic, false>(iters);
}
BENCHMARK(baton_pingpong_single_poster_nonblocking, iters) {
run_pingpong_test<std::atomic, true, false>(iters);
}
BENCHMARK(baton_pingpong_multi_poster_nonblocking, iters) {
run_pingpong_test<std::atomic, false, false>(iters);
} }
BENCHMARK_DRAW_LINE() BENCHMARK_DRAW_LINE()
BENCHMARK(baton_pingpong_emulated_futex_single_poster_blocking, iters) { BENCHMARK(baton_pingpong_emulated_futex_blocking, iters) {
run_pingpong_test<EmulatedFutexAtomic, true, true>(iters); run_pingpong_test<EmulatedFutexAtomic, true>(iters);
}
BENCHMARK(baton_pingpong_emulated_futex_multi_poster_blocking, iters) {
run_pingpong_test<EmulatedFutexAtomic, false, true>(iters);
}
BENCHMARK(baton_pingpong_emulated_futex_single_poster_nonblocking, iters) {
run_pingpong_test<EmulatedFutexAtomic, true, false>(iters);
} }
BENCHMARK(baton_pingpong_emulated_futex_multi_poster_nonblocking, iters) { BENCHMARK(baton_pingpong_emulated_futex_nonblocking, iters) {
run_pingpong_test<EmulatedFutexAtomic, false, false>(iters); run_pingpong_test<EmulatedFutexAtomic, false>(iters);
} }
BENCHMARK_DRAW_LINE() BENCHMARK_DRAW_LINE()
...@@ -93,12 +75,7 @@ BENCHMARK(posix_sem_pingpong, iters) { ...@@ -93,12 +75,7 @@ BENCHMARK(posix_sem_pingpong, iters) {
// to the required futex calls for the blocking case // to the required futex calls for the blocking case
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true); gflags::ParseCommandLineFlags(&argc, &argv, true);
folly::runBenchmarks();
auto rv = RUN_ALL_TESTS(); return 0;
if (!rv && FLAGS_benchmark) {
folly::runBenchmarks();
}
return rv;
} }
This diff is collapsed.
...@@ -25,16 +25,16 @@ namespace test { ...@@ -25,16 +25,16 @@ namespace test {
typedef DeterministicSchedule DSched; typedef DeterministicSchedule DSched;
template <template <typename> class Atom, bool SinglePoster, bool Blocking> template <template <typename> class Atom, bool Blocking>
void run_basic_test() { void run_basic_test() {
Baton<Atom, SinglePoster, Blocking> b; Baton<Atom, Blocking> b;
b.post(); b.post();
b.wait(); b.wait();
} }
template <template <typename> class Atom, bool SinglePoster, bool Blocking> template <template <typename> class Atom, bool Blocking>
void run_pingpong_test(int numRounds) { void run_pingpong_test(int numRounds) {
using B = Baton<Atom, SinglePoster, Blocking>; using B = Baton<Atom, Blocking>;
B batons[17]; B batons[17];
B& a = batons[0]; B& a = batons[0];
B& b = batons[16]; // to get it on a different cache line B& b = batons[16]; // to get it on a different cache line
...@@ -53,17 +53,17 @@ void run_pingpong_test(int numRounds) { ...@@ -53,17 +53,17 @@ void run_pingpong_test(int numRounds) {
DSched::join(thr); DSched::join(thr);
} }
template <template <typename> class Atom, typename Clock, bool SinglePoster> template <template <typename> class Atom, typename Clock>
void run_basic_timed_wait_tests() { void run_basic_timed_wait_tests() {
Baton<Atom, SinglePoster> b; Baton<Atom> b;
b.post(); b.post();
// tests if early delivery works fine // tests if early delivery works fine
EXPECT_TRUE(b.timed_wait(Clock::now())); EXPECT_TRUE(b.timed_wait(Clock::now()));
} }
template <template <typename> class Atom, typename Clock, bool SinglePoster> template <template <typename> class Atom, typename Clock>
void run_timed_wait_tmo_tests() { void run_timed_wait_tmo_tests() {
Baton<Atom, SinglePoster> b; Baton<Atom> b;
auto thr = DSched::thread([&] { auto thr = DSched::thread([&] {
bool rv = b.timed_wait(Clock::now() + std::chrono::milliseconds(1)); bool rv = b.timed_wait(Clock::now() + std::chrono::milliseconds(1));
...@@ -73,9 +73,9 @@ void run_timed_wait_tmo_tests() { ...@@ -73,9 +73,9 @@ void run_timed_wait_tmo_tests() {
DSched::join(thr); DSched::join(thr);
} }
template <template <typename> class Atom, typename Clock, bool SinglePoster> template <template <typename> class Atom, typename Clock>
void run_timed_wait_regular_test() { void run_timed_wait_regular_test() {
Baton<Atom, SinglePoster> b; Baton<Atom> b;
auto thr = DSched::thread([&] { auto thr = DSched::thread([&] {
// To wait forever we'd like to use time_point<Clock>::max, but // To wait forever we'd like to use time_point<Clock>::max, but
...@@ -104,68 +104,13 @@ void run_timed_wait_regular_test() { ...@@ -104,68 +104,13 @@ void run_timed_wait_regular_test() {
DSched::join(thr); DSched::join(thr);
} }
template <template <typename> class Atom, bool SinglePoster, bool Blocking> template <template <typename> class Atom, bool Blocking>
void run_try_wait_tests() { void run_try_wait_tests() {
Baton<Atom, SinglePoster, Blocking> b; Baton<Atom, Blocking> b;
EXPECT_FALSE(b.try_wait()); EXPECT_FALSE(b.try_wait());
b.post(); b.post();
EXPECT_TRUE(b.try_wait()); EXPECT_TRUE(b.try_wait());
} }
template <template <typename> class Atom, bool SinglePoster, bool Blocking>
void run_multi_producer_tests() {
constexpr int NPROD = 5;
Baton<Atom, SinglePoster, Blocking> local_ping[NPROD];
Baton<Atom, SinglePoster, Blocking> local_pong[NPROD];
Baton<Atom, /* SingleProducer = */ false, Blocking> global;
Baton<Atom, SinglePoster, Blocking> shutdown;
std::thread prod[NPROD];
for (int i = 0; i < NPROD; ++i) {
prod[i] = DSched::thread([&, i] {
if (!std::is_same<Atom<int>, DeterministicAtomic<int>>::value) {
// If we are using std::atomic (or EmulatedFutexAtomic) then
// a variable sleep here will make it more likely that
// global.post()-s will span more than one global.wait() by
// the consumer thread and for the latter to block (if the
// global baton is blocking). For DeterministicAtomic, we just
// rely on DeterministicSchedule to do the scheduling. The
// test won't fail if we lose the race, we just don't get
// coverage.
for (int j = 0; j < i; ++j) {
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
}
local_ping[i].post();
global.post();
local_pong[i].wait();
});
}
auto cons = DSched::thread([&] {
while (true) {
global.wait();
global.reset();
if (shutdown.try_wait()) {
return;
}
for (int i = 0; i < NPROD; ++i) {
if (local_ping.try_wait()) {
local_ping.reset();
local_pong.post();
}
}
}
});
for (auto& t : prod) {
DSched::join(t);
}
global.post();
shutdown.post();
DSched::join(cons);
}
} // namespace test } // namespace test
} // namespace folly } // namespace folly
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment