Commit c8f7015f authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Use the portable test semaphore in DeterministicSchedule

Summary: [Folly] Use the portable test semaphore in `DeterministicSchedule`, v.s. `sem_t` which is not portable everywhere. The portable test semaphore is implemented purely in terms of `std::mutex` and `std::condition_variable`, and is therefore portable.

Reviewed By: nbronson

Differential Revision: D15909982

fbshipit-source-id: 63b3e5d16864ffee9fad20bd2ad46730fba220dd
parent 082df133
......@@ -30,6 +30,8 @@
namespace folly {
namespace test {
using Sem = DeterministicSchedule::Sem;
AuxChk DeterministicSchedule::aux_chk;
// access is protected by futexLock
......@@ -123,8 +125,7 @@ DeterministicSchedule::DeterministicSchedule(
assert(tls.aux_act == nullptr);
tls.exiting = false;
tls.sem = new sem_t;
sem_init(tls.sem, 0, 1);
tls.sem = new Sem(true);
sems_.push_back(tls.sem);
tls.threadId = nextThreadId_++;
......@@ -207,7 +208,7 @@ DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) {
void DeterministicSchedule::beforeSharedAccess() {
auto& tls = TLState::get();
if (tls.sem) {
sem_wait(tls.sem);
tls.sem->wait();
}
}
......@@ -217,7 +218,7 @@ void DeterministicSchedule::afterSharedAccess() {
if (!sched) {
return;
}
sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
sched->sems_[sched->scheduler_(sched->sems_.size())]->post();
}
void DeterministicSchedule::afterSharedAccess(bool success) {
......@@ -227,7 +228,7 @@ void DeterministicSchedule::afterSharedAccess(bool success) {
return;
}
sched->callAux(success);
sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
sched->sems_[sched->scheduler_(sched->sems_.size())]->post();
}
size_t DeterministicSchedule::getRandNumber(size_t n) {
......@@ -265,7 +266,7 @@ void DeterministicSchedule::clearAuxChk() {
aux_chk = nullptr;
}
void DeterministicSchedule::reschedule(sem_t* sem) {
void DeterministicSchedule::reschedule(Sem* sem) {
auto& tls = TLState::get();
auto sched = tls.sched;
if (sched) {
......@@ -273,7 +274,7 @@ void DeterministicSchedule::reschedule(sem_t* sem) {
}
}
sem_t* DeterministicSchedule::descheduleCurrentThread() {
Sem* DeterministicSchedule::descheduleCurrentThread() {
auto& tls = TLState::get();
auto sched = tls.sched;
if (sched) {
......@@ -283,16 +284,15 @@ sem_t* DeterministicSchedule::descheduleCurrentThread() {
return tls.sem;
}
sem_t* DeterministicSchedule::beforeThreadCreate() {
sem_t* s = new sem_t;
sem_init(s, 0, 0);
Sem* DeterministicSchedule::beforeThreadCreate() {
Sem* s = new Sem(false);
beforeSharedAccess();
sems_.push_back(s);
afterSharedAccess();
return s;
}
void DeterministicSchedule::afterThreadCreate(sem_t* sem) {
void DeterministicSchedule::afterThreadCreate(Sem* sem) {
auto& tls = TLState::get();
assert(tls.sem == nullptr);
assert(tls.sched == nullptr);
......@@ -332,12 +332,11 @@ void DeterministicSchedule::beforeThreadExit() {
* enters the thread local destructors. */
exitingSems_[std::this_thread::get_id()] = tls.sem;
afterSharedAccess();
sem_wait(tls.sem);
tls.sem->wait();
}
tls.sched = nullptr;
tls.aux_act = nullptr;
tls.exiting = true;
sem_destroy(tls.sem);
delete tls.sem;
tls.sem = nullptr;
}
......@@ -348,7 +347,7 @@ void DeterministicSchedule::waitForBeforeThreadExit(std::thread& child) {
beforeSharedAccess();
assert(tls.sched->joins_.count(child.get_id()) == 0);
if (tls.sched->active_.count(child.get_id())) {
sem_t* sem = descheduleCurrentThread();
Sem* sem = descheduleCurrentThread();
tls.sched->joins_.insert({child.get_id(), sem});
afterSharedAccess();
// Wait to be scheduled by exiting child thread
......@@ -372,7 +371,7 @@ void DeterministicSchedule::joinAll(std::vector<std::thread>& children) {
* shared access during thread local destructors.*/
for (auto& child : children) {
if (sched) {
sem_post(sched->exitingSems_[child.get_id()]);
sched->exitingSems_[child.get_id()]->post();
}
child.join();
}
......@@ -387,7 +386,7 @@ void DeterministicSchedule::join(std::thread& child) {
atomic_thread_fence(std::memory_order_seq_cst);
FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id());
if (sched) {
sem_post(sched->exitingSems_[child.get_id()]);
sched->exitingSems_[child.get_id()]->post();
}
child.join();
}
......@@ -404,45 +403,40 @@ void DeterministicSchedule::callAux(bool success) {
}
}
static std::unordered_map<sem_t*, std::unique_ptr<ThreadSyncVar>> semSyncVar;
static std::unordered_map<Sem*, std::unique_ptr<ThreadSyncVar>> semSyncVar;
void DeterministicSchedule::post(sem_t* sem) {
void DeterministicSchedule::post(Sem* sem) {
beforeSharedAccess();
if (semSyncVar.count(sem) == 0) {
semSyncVar[sem] = std::make_unique<ThreadSyncVar>();
}
semSyncVar[sem]->release();
sem_post(sem);
FOLLY_TEST_DSCHED_VLOG("sem_post(" << sem << ")");
sem->post();
FOLLY_TEST_DSCHED_VLOG("sem->post() [sem=" << sem << "]");
afterSharedAccess();
}
bool DeterministicSchedule::tryWait(sem_t* sem) {
bool DeterministicSchedule::tryWait(Sem* sem) {
beforeSharedAccess();
if (semSyncVar.count(sem) == 0) {
semSyncVar[sem] = std::make_unique<ThreadSyncVar>();
}
int rv = sem_trywait(sem);
int e = rv == 0 ? 0 : errno;
bool acquired = sem->try_wait();
bool acquired_s = acquired ? "true" : "false";
FOLLY_TEST_DSCHED_VLOG(
"sem_trywait(" << sem << ") = " << rv << " errno=" << e);
if (rv == 0) {
"sem->try_wait() [sem=" << sem << "] -> " << acquired_s);
if (acquired) {
semSyncVar[sem]->acq_rel();
} else {
semSyncVar[sem]->acquire();
}
afterSharedAccess();
if (rv == 0) {
return true;
} else {
assert(e == EAGAIN);
return false;
}
return acquired;
}
void DeterministicSchedule::wait(sem_t* sem) {
void DeterministicSchedule::wait(Sem* sem) {
while (!tryWait(sem)) {
// we're not busy waiting because this is a deterministic schedule
}
......
......@@ -32,8 +32,8 @@
#include <folly/SingletonThreadLocal.h>
#include <folly/concurrency/CacheLocality.h>
#include <folly/detail/Futex.h>
#include <folly/portability/Semaphore.h>
#include <folly/synchronization/detail/AtomicUtils.h>
#include <folly/synchronization/test/Semaphore.h>
namespace folly {
namespace test {
......@@ -136,7 +136,7 @@ class ThreadSyncVar {
* constructor, DeterministicSchedule::join(thr) instead of thr.join(),
* and access semaphores via the helper functions in DeterministicSchedule.
* Locks are not yet supported, although they would be easy to add with
* the same strategy as the mapping of sem_wait.
* the same strategy as the mapping of Sem::wait.
*
* The actual schedule is defined by a function from n -> [0,n). At
* each step, the function will be given the number of active threads
......@@ -146,6 +146,8 @@ class ThreadSyncVar {
*/
class DeterministicSchedule {
public:
using Sem = Semaphore;
/**
* Arranges for the current thread (and all threads created by
* DeterministicSchedule::thread on a thread participating in this
......@@ -238,15 +240,15 @@ class DeterministicSchedule {
*/
static void joinAll(std::vector<std::thread>& children);
/** Calls sem_post(sem) as part of a deterministic schedule. */
static void post(sem_t* sem);
/** Calls sem->post() as part of a deterministic schedule. */
static void post(Sem* sem);
/** Calls sem_trywait(sem) as part of a deterministic schedule, returning
/** Calls sem->try_wait() as part of a deterministic schedule, returning
* true on success and false on transient failure. */
static bool tryWait(sem_t* sem);
static bool tryWait(Sem* sem);
/** Calls sem_wait(sem) as part of a deterministic schedule. */
static void wait(sem_t* sem);
/** Calls sem->wait() as part of a deterministic schedule. */
static void wait(Sem* sem);
/** Used scheduler_ to get a random number b/w [0, n). If tls_sched is
* not set-up it falls back to std::rand() */
......@@ -272,7 +274,7 @@ class DeterministicSchedule {
static void clearAuxChk();
/** Remove the current thread's semaphore from sems_ */
static sem_t* descheduleCurrentThread();
static Sem* descheduleCurrentThread();
/** Returns true if the current thread has already completed
* the thread function, for example if the thread is executing
......@@ -283,7 +285,7 @@ class DeterministicSchedule {
}
/** Add sem back into sems_ */
static void reschedule(sem_t* sem);
static void reschedule(Sem* sem);
static bool isActive() {
auto& tls = TLState::get();
......@@ -313,7 +315,7 @@ class DeterministicSchedule {
PerThreadState& operator=(PerThreadState&&) = default;
PerThreadState() = default;
sem_t* sem{nullptr};
Sem* sem{nullptr};
DeterministicSchedule* sched{nullptr};
bool exiting{false};
DSchedThreadId threadId{};
......@@ -323,10 +325,10 @@ class DeterministicSchedule {
static AuxChk aux_chk;
std::function<size_t(size_t)> scheduler_;
std::vector<sem_t*> sems_;
std::vector<Sem*> sems_;
std::unordered_set<std::thread::id> active_;
std::unordered_map<std::thread::id, sem_t*> joins_;
std::unordered_map<std::thread::id, sem_t*> exitingSems_;
std::unordered_map<std::thread::id, Sem*> joins_;
std::unordered_map<std::thread::id, Sem*> exitingSems_;
std::vector<ThreadInfo> threadInfoMap_;
ThreadTimestamps seqCstFenceOrder_;
......@@ -340,8 +342,8 @@ class DeterministicSchedule {
* functions for some shared accesses. */
uint64_t step_;
sem_t* beforeThreadCreate();
void afterThreadCreate(sem_t*);
Sem* beforeThreadCreate();
void afterThreadCreate(Sem*);
void beforeThreadExit();
void waitForBeforeThreadExit(std::thread& child);
/** Calls user-defined auxiliary function (if any) */
......@@ -744,8 +746,10 @@ void atomic_notify_all(const DeterministicAtomic<Integer>*) {}
* cooperates with DeterministicSchedule.
*/
struct DeterministicMutex {
using Sem = DeterministicSchedule::Sem;
std::mutex m;
std::queue<sem_t*> waiters_;
std::queue<Sem*> waiters_;
ThreadSyncVar syncVar_;
DeterministicMutex() = default;
......@@ -757,7 +761,7 @@ struct DeterministicMutex {
FOLLY_TEST_DSCHED_VLOG(this << ".lock()");
DeterministicSchedule::beforeSharedAccess();
while (!m.try_lock()) {
sem_t* sem = DeterministicSchedule::descheduleCurrentThread();
Sem* sem = DeterministicSchedule::descheduleCurrentThread();
if (sem) {
waiters_.push(sem);
}
......@@ -790,7 +794,7 @@ struct DeterministicMutex {
syncVar_.release();
}
if (!waiters_.empty()) {
sem_t* sem = waiters_.front();
Sem* sem = waiters_.front();
DeterministicSchedule::reschedule(sem);
waiters_.pop();
}
......
......@@ -65,13 +65,11 @@ TEST(IndexedMemPool, no_starvation) {
EXPECT_EQ(pipe(fd), 0);
// makes sure we wait for available nodes, rather than fail allocIndex
sem_t allocSem;
sem_init(&allocSem, 0, poolSize);
DeterministicSchedule::Sem allocSem(poolSize);
// this semaphore is only needed for deterministic replay, so that we
// always block in an Sched:: operation rather than in a read() syscall
sem_t readSem;
sem_init(&readSem, 0, 0);
DeterministicSchedule::Sem readSem(0);
std::thread produce = Sched::thread([&]() {
for (auto i = 0; i < count; ++i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment