Commit c8f7015f authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Use the portable test semaphore in DeterministicSchedule

Summary: [Folly] Use the portable test semaphore in `DeterministicSchedule`, v.s. `sem_t` which is not portable everywhere. The portable test semaphore is implemented purely in terms of `std::mutex` and `std::condition_variable`, and is therefore portable.

Reviewed By: nbronson

Differential Revision: D15909982

fbshipit-source-id: 63b3e5d16864ffee9fad20bd2ad46730fba220dd
parent 082df133
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
namespace folly { namespace folly {
namespace test { namespace test {
using Sem = DeterministicSchedule::Sem;
AuxChk DeterministicSchedule::aux_chk; AuxChk DeterministicSchedule::aux_chk;
// access is protected by futexLock // access is protected by futexLock
...@@ -123,8 +125,7 @@ DeterministicSchedule::DeterministicSchedule( ...@@ -123,8 +125,7 @@ DeterministicSchedule::DeterministicSchedule(
assert(tls.aux_act == nullptr); assert(tls.aux_act == nullptr);
tls.exiting = false; tls.exiting = false;
tls.sem = new sem_t; tls.sem = new Sem(true);
sem_init(tls.sem, 0, 1);
sems_.push_back(tls.sem); sems_.push_back(tls.sem);
tls.threadId = nextThreadId_++; tls.threadId = nextThreadId_++;
...@@ -207,7 +208,7 @@ DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) { ...@@ -207,7 +208,7 @@ DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) {
void DeterministicSchedule::beforeSharedAccess() { void DeterministicSchedule::beforeSharedAccess() {
auto& tls = TLState::get(); auto& tls = TLState::get();
if (tls.sem) { if (tls.sem) {
sem_wait(tls.sem); tls.sem->wait();
} }
} }
...@@ -217,7 +218,7 @@ void DeterministicSchedule::afterSharedAccess() { ...@@ -217,7 +218,7 @@ void DeterministicSchedule::afterSharedAccess() {
if (!sched) { if (!sched) {
return; return;
} }
sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]); sched->sems_[sched->scheduler_(sched->sems_.size())]->post();
} }
void DeterministicSchedule::afterSharedAccess(bool success) { void DeterministicSchedule::afterSharedAccess(bool success) {
...@@ -227,7 +228,7 @@ void DeterministicSchedule::afterSharedAccess(bool success) { ...@@ -227,7 +228,7 @@ void DeterministicSchedule::afterSharedAccess(bool success) {
return; return;
} }
sched->callAux(success); sched->callAux(success);
sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]); sched->sems_[sched->scheduler_(sched->sems_.size())]->post();
} }
size_t DeterministicSchedule::getRandNumber(size_t n) { size_t DeterministicSchedule::getRandNumber(size_t n) {
...@@ -265,7 +266,7 @@ void DeterministicSchedule::clearAuxChk() { ...@@ -265,7 +266,7 @@ void DeterministicSchedule::clearAuxChk() {
aux_chk = nullptr; aux_chk = nullptr;
} }
void DeterministicSchedule::reschedule(sem_t* sem) { void DeterministicSchedule::reschedule(Sem* sem) {
auto& tls = TLState::get(); auto& tls = TLState::get();
auto sched = tls.sched; auto sched = tls.sched;
if (sched) { if (sched) {
...@@ -273,7 +274,7 @@ void DeterministicSchedule::reschedule(sem_t* sem) { ...@@ -273,7 +274,7 @@ void DeterministicSchedule::reschedule(sem_t* sem) {
} }
} }
sem_t* DeterministicSchedule::descheduleCurrentThread() { Sem* DeterministicSchedule::descheduleCurrentThread() {
auto& tls = TLState::get(); auto& tls = TLState::get();
auto sched = tls.sched; auto sched = tls.sched;
if (sched) { if (sched) {
...@@ -283,16 +284,15 @@ sem_t* DeterministicSchedule::descheduleCurrentThread() { ...@@ -283,16 +284,15 @@ sem_t* DeterministicSchedule::descheduleCurrentThread() {
return tls.sem; return tls.sem;
} }
sem_t* DeterministicSchedule::beforeThreadCreate() { Sem* DeterministicSchedule::beforeThreadCreate() {
sem_t* s = new sem_t; Sem* s = new Sem(false);
sem_init(s, 0, 0);
beforeSharedAccess(); beforeSharedAccess();
sems_.push_back(s); sems_.push_back(s);
afterSharedAccess(); afterSharedAccess();
return s; return s;
} }
void DeterministicSchedule::afterThreadCreate(sem_t* sem) { void DeterministicSchedule::afterThreadCreate(Sem* sem) {
auto& tls = TLState::get(); auto& tls = TLState::get();
assert(tls.sem == nullptr); assert(tls.sem == nullptr);
assert(tls.sched == nullptr); assert(tls.sched == nullptr);
...@@ -332,12 +332,11 @@ void DeterministicSchedule::beforeThreadExit() { ...@@ -332,12 +332,11 @@ void DeterministicSchedule::beforeThreadExit() {
* enters the thread local destructors. */ * enters the thread local destructors. */
exitingSems_[std::this_thread::get_id()] = tls.sem; exitingSems_[std::this_thread::get_id()] = tls.sem;
afterSharedAccess(); afterSharedAccess();
sem_wait(tls.sem); tls.sem->wait();
} }
tls.sched = nullptr; tls.sched = nullptr;
tls.aux_act = nullptr; tls.aux_act = nullptr;
tls.exiting = true; tls.exiting = true;
sem_destroy(tls.sem);
delete tls.sem; delete tls.sem;
tls.sem = nullptr; tls.sem = nullptr;
} }
...@@ -348,7 +347,7 @@ void DeterministicSchedule::waitForBeforeThreadExit(std::thread& child) { ...@@ -348,7 +347,7 @@ void DeterministicSchedule::waitForBeforeThreadExit(std::thread& child) {
beforeSharedAccess(); beforeSharedAccess();
assert(tls.sched->joins_.count(child.get_id()) == 0); assert(tls.sched->joins_.count(child.get_id()) == 0);
if (tls.sched->active_.count(child.get_id())) { if (tls.sched->active_.count(child.get_id())) {
sem_t* sem = descheduleCurrentThread(); Sem* sem = descheduleCurrentThread();
tls.sched->joins_.insert({child.get_id(), sem}); tls.sched->joins_.insert({child.get_id(), sem});
afterSharedAccess(); afterSharedAccess();
// Wait to be scheduled by exiting child thread // Wait to be scheduled by exiting child thread
...@@ -372,7 +371,7 @@ void DeterministicSchedule::joinAll(std::vector<std::thread>& children) { ...@@ -372,7 +371,7 @@ void DeterministicSchedule::joinAll(std::vector<std::thread>& children) {
* shared access during thread local destructors.*/ * shared access during thread local destructors.*/
for (auto& child : children) { for (auto& child : children) {
if (sched) { if (sched) {
sem_post(sched->exitingSems_[child.get_id()]); sched->exitingSems_[child.get_id()]->post();
} }
child.join(); child.join();
} }
...@@ -387,7 +386,7 @@ void DeterministicSchedule::join(std::thread& child) { ...@@ -387,7 +386,7 @@ void DeterministicSchedule::join(std::thread& child) {
atomic_thread_fence(std::memory_order_seq_cst); atomic_thread_fence(std::memory_order_seq_cst);
FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id()); FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id());
if (sched) { if (sched) {
sem_post(sched->exitingSems_[child.get_id()]); sched->exitingSems_[child.get_id()]->post();
} }
child.join(); child.join();
} }
...@@ -404,45 +403,40 @@ void DeterministicSchedule::callAux(bool success) { ...@@ -404,45 +403,40 @@ void DeterministicSchedule::callAux(bool success) {
} }
} }
static std::unordered_map<sem_t*, std::unique_ptr<ThreadSyncVar>> semSyncVar; static std::unordered_map<Sem*, std::unique_ptr<ThreadSyncVar>> semSyncVar;
void DeterministicSchedule::post(sem_t* sem) { void DeterministicSchedule::post(Sem* sem) {
beforeSharedAccess(); beforeSharedAccess();
if (semSyncVar.count(sem) == 0) { if (semSyncVar.count(sem) == 0) {
semSyncVar[sem] = std::make_unique<ThreadSyncVar>(); semSyncVar[sem] = std::make_unique<ThreadSyncVar>();
} }
semSyncVar[sem]->release(); semSyncVar[sem]->release();
sem_post(sem); sem->post();
FOLLY_TEST_DSCHED_VLOG("sem_post(" << sem << ")"); FOLLY_TEST_DSCHED_VLOG("sem->post() [sem=" << sem << "]");
afterSharedAccess(); afterSharedAccess();
} }
bool DeterministicSchedule::tryWait(sem_t* sem) { bool DeterministicSchedule::tryWait(Sem* sem) {
beforeSharedAccess(); beforeSharedAccess();
if (semSyncVar.count(sem) == 0) { if (semSyncVar.count(sem) == 0) {
semSyncVar[sem] = std::make_unique<ThreadSyncVar>(); semSyncVar[sem] = std::make_unique<ThreadSyncVar>();
} }
int rv = sem_trywait(sem); bool acquired = sem->try_wait();
int e = rv == 0 ? 0 : errno; bool acquired_s = acquired ? "true" : "false";
FOLLY_TEST_DSCHED_VLOG( FOLLY_TEST_DSCHED_VLOG(
"sem_trywait(" << sem << ") = " << rv << " errno=" << e); "sem->try_wait() [sem=" << sem << "] -> " << acquired_s);
if (rv == 0) { if (acquired) {
semSyncVar[sem]->acq_rel(); semSyncVar[sem]->acq_rel();
} else { } else {
semSyncVar[sem]->acquire(); semSyncVar[sem]->acquire();
} }
afterSharedAccess(); afterSharedAccess();
if (rv == 0) { return acquired;
return true;
} else {
assert(e == EAGAIN);
return false;
}
} }
void DeterministicSchedule::wait(sem_t* sem) { void DeterministicSchedule::wait(Sem* sem) {
while (!tryWait(sem)) { while (!tryWait(sem)) {
// we're not busy waiting because this is a deterministic schedule // we're not busy waiting because this is a deterministic schedule
} }
......
...@@ -32,8 +32,8 @@ ...@@ -32,8 +32,8 @@
#include <folly/SingletonThreadLocal.h> #include <folly/SingletonThreadLocal.h>
#include <folly/concurrency/CacheLocality.h> #include <folly/concurrency/CacheLocality.h>
#include <folly/detail/Futex.h> #include <folly/detail/Futex.h>
#include <folly/portability/Semaphore.h>
#include <folly/synchronization/detail/AtomicUtils.h> #include <folly/synchronization/detail/AtomicUtils.h>
#include <folly/synchronization/test/Semaphore.h>
namespace folly { namespace folly {
namespace test { namespace test {
...@@ -136,7 +136,7 @@ class ThreadSyncVar { ...@@ -136,7 +136,7 @@ class ThreadSyncVar {
* constructor, DeterministicSchedule::join(thr) instead of thr.join(), * constructor, DeterministicSchedule::join(thr) instead of thr.join(),
* and access semaphores via the helper functions in DeterministicSchedule. * and access semaphores via the helper functions in DeterministicSchedule.
* Locks are not yet supported, although they would be easy to add with * Locks are not yet supported, although they would be easy to add with
* the same strategy as the mapping of sem_wait. * the same strategy as the mapping of Sem::wait.
* *
* The actual schedule is defined by a function from n -> [0,n). At * The actual schedule is defined by a function from n -> [0,n). At
* each step, the function will be given the number of active threads * each step, the function will be given the number of active threads
...@@ -146,6 +146,8 @@ class ThreadSyncVar { ...@@ -146,6 +146,8 @@ class ThreadSyncVar {
*/ */
class DeterministicSchedule { class DeterministicSchedule {
public: public:
using Sem = Semaphore;
/** /**
* Arranges for the current thread (and all threads created by * Arranges for the current thread (and all threads created by
* DeterministicSchedule::thread on a thread participating in this * DeterministicSchedule::thread on a thread participating in this
...@@ -238,15 +240,15 @@ class DeterministicSchedule { ...@@ -238,15 +240,15 @@ class DeterministicSchedule {
*/ */
static void joinAll(std::vector<std::thread>& children); static void joinAll(std::vector<std::thread>& children);
/** Calls sem_post(sem) as part of a deterministic schedule. */ /** Calls sem->post() as part of a deterministic schedule. */
static void post(sem_t* sem); static void post(Sem* sem);
/** Calls sem_trywait(sem) as part of a deterministic schedule, returning /** Calls sem->try_wait() as part of a deterministic schedule, returning
* true on success and false on transient failure. */ * true on success and false on transient failure. */
static bool tryWait(sem_t* sem); static bool tryWait(Sem* sem);
/** Calls sem_wait(sem) as part of a deterministic schedule. */ /** Calls sem->wait() as part of a deterministic schedule. */
static void wait(sem_t* sem); static void wait(Sem* sem);
/** Used scheduler_ to get a random number b/w [0, n). If tls_sched is /** Used scheduler_ to get a random number b/w [0, n). If tls_sched is
* not set-up it falls back to std::rand() */ * not set-up it falls back to std::rand() */
...@@ -272,7 +274,7 @@ class DeterministicSchedule { ...@@ -272,7 +274,7 @@ class DeterministicSchedule {
static void clearAuxChk(); static void clearAuxChk();
/** Remove the current thread's semaphore from sems_ */ /** Remove the current thread's semaphore from sems_ */
static sem_t* descheduleCurrentThread(); static Sem* descheduleCurrentThread();
/** Returns true if the current thread has already completed /** Returns true if the current thread has already completed
* the thread function, for example if the thread is executing * the thread function, for example if the thread is executing
...@@ -283,7 +285,7 @@ class DeterministicSchedule { ...@@ -283,7 +285,7 @@ class DeterministicSchedule {
} }
/** Add sem back into sems_ */ /** Add sem back into sems_ */
static void reschedule(sem_t* sem); static void reschedule(Sem* sem);
static bool isActive() { static bool isActive() {
auto& tls = TLState::get(); auto& tls = TLState::get();
...@@ -313,7 +315,7 @@ class DeterministicSchedule { ...@@ -313,7 +315,7 @@ class DeterministicSchedule {
PerThreadState& operator=(PerThreadState&&) = default; PerThreadState& operator=(PerThreadState&&) = default;
PerThreadState() = default; PerThreadState() = default;
sem_t* sem{nullptr}; Sem* sem{nullptr};
DeterministicSchedule* sched{nullptr}; DeterministicSchedule* sched{nullptr};
bool exiting{false}; bool exiting{false};
DSchedThreadId threadId{}; DSchedThreadId threadId{};
...@@ -323,10 +325,10 @@ class DeterministicSchedule { ...@@ -323,10 +325,10 @@ class DeterministicSchedule {
static AuxChk aux_chk; static AuxChk aux_chk;
std::function<size_t(size_t)> scheduler_; std::function<size_t(size_t)> scheduler_;
std::vector<sem_t*> sems_; std::vector<Sem*> sems_;
std::unordered_set<std::thread::id> active_; std::unordered_set<std::thread::id> active_;
std::unordered_map<std::thread::id, sem_t*> joins_; std::unordered_map<std::thread::id, Sem*> joins_;
std::unordered_map<std::thread::id, sem_t*> exitingSems_; std::unordered_map<std::thread::id, Sem*> exitingSems_;
std::vector<ThreadInfo> threadInfoMap_; std::vector<ThreadInfo> threadInfoMap_;
ThreadTimestamps seqCstFenceOrder_; ThreadTimestamps seqCstFenceOrder_;
...@@ -340,8 +342,8 @@ class DeterministicSchedule { ...@@ -340,8 +342,8 @@ class DeterministicSchedule {
* functions for some shared accesses. */ * functions for some shared accesses. */
uint64_t step_; uint64_t step_;
sem_t* beforeThreadCreate(); Sem* beforeThreadCreate();
void afterThreadCreate(sem_t*); void afterThreadCreate(Sem*);
void beforeThreadExit(); void beforeThreadExit();
void waitForBeforeThreadExit(std::thread& child); void waitForBeforeThreadExit(std::thread& child);
/** Calls user-defined auxiliary function (if any) */ /** Calls user-defined auxiliary function (if any) */
...@@ -744,8 +746,10 @@ void atomic_notify_all(const DeterministicAtomic<Integer>*) {} ...@@ -744,8 +746,10 @@ void atomic_notify_all(const DeterministicAtomic<Integer>*) {}
* cooperates with DeterministicSchedule. * cooperates with DeterministicSchedule.
*/ */
struct DeterministicMutex { struct DeterministicMutex {
using Sem = DeterministicSchedule::Sem;
std::mutex m; std::mutex m;
std::queue<sem_t*> waiters_; std::queue<Sem*> waiters_;
ThreadSyncVar syncVar_; ThreadSyncVar syncVar_;
DeterministicMutex() = default; DeterministicMutex() = default;
...@@ -757,7 +761,7 @@ struct DeterministicMutex { ...@@ -757,7 +761,7 @@ struct DeterministicMutex {
FOLLY_TEST_DSCHED_VLOG(this << ".lock()"); FOLLY_TEST_DSCHED_VLOG(this << ".lock()");
DeterministicSchedule::beforeSharedAccess(); DeterministicSchedule::beforeSharedAccess();
while (!m.try_lock()) { while (!m.try_lock()) {
sem_t* sem = DeterministicSchedule::descheduleCurrentThread(); Sem* sem = DeterministicSchedule::descheduleCurrentThread();
if (sem) { if (sem) {
waiters_.push(sem); waiters_.push(sem);
} }
...@@ -790,7 +794,7 @@ struct DeterministicMutex { ...@@ -790,7 +794,7 @@ struct DeterministicMutex {
syncVar_.release(); syncVar_.release();
} }
if (!waiters_.empty()) { if (!waiters_.empty()) {
sem_t* sem = waiters_.front(); Sem* sem = waiters_.front();
DeterministicSchedule::reschedule(sem); DeterministicSchedule::reschedule(sem);
waiters_.pop(); waiters_.pop();
} }
......
...@@ -65,13 +65,11 @@ TEST(IndexedMemPool, no_starvation) { ...@@ -65,13 +65,11 @@ TEST(IndexedMemPool, no_starvation) {
EXPECT_EQ(pipe(fd), 0); EXPECT_EQ(pipe(fd), 0);
// makes sure we wait for available nodes, rather than fail allocIndex // makes sure we wait for available nodes, rather than fail allocIndex
sem_t allocSem; DeterministicSchedule::Sem allocSem(poolSize);
sem_init(&allocSem, 0, poolSize);
// this semaphore is only needed for deterministic replay, so that we // this semaphore is only needed for deterministic replay, so that we
// always block in an Sched:: operation rather than in a read() syscall // always block in an Sched:: operation rather than in a read() syscall
sem_t readSem; DeterministicSchedule::Sem readSem(0);
sem_init(&readSem, 0, 0);
std::thread produce = Sched::thread([&]() { std::thread produce = Sched::thread([&]() {
for (auto i = 0; i < count; ++i) { for (auto i = 0; i < count; ++i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment