Commit 140f6df7 authored by Doron Roberts-Kedes's avatar Doron Roberts-Kedes Committed by Facebook Github Bot

DeterministicSchedule: Introduce BufferedAtomic

Summary:
Mimic the atomic history buffers in relacy. This diff allows older values for atomics to be returned as long as the memory model is respected. Enforce the memory model with vector clocks (ThreadTimestamps).

Depends on D10418223

Reviewed By: djwatson

Differential Revision: D10418450

fbshipit-source-id: 0556259e96d512030ff9924bbd5aa0f79a38dfff
parent e1766e5e
...@@ -32,5 +32,22 @@ inline std::memory_order default_failure_memory_order( ...@@ -32,5 +32,22 @@ inline std::memory_order default_failure_memory_order(
return successMode; return successMode;
} }
} }
inline char const* memory_order_to_str(std::memory_order mo) {
switch (mo) {
case std::memory_order_relaxed:
return "relaxed";
case std::memory_order_consume:
return "consume";
case std::memory_order_acquire:
return "acquire";
case std::memory_order_release:
return "release";
case std::memory_order_acq_rel:
return "acq_rel";
case std::memory_order_seq_cst:
return "seq_cst";
}
}
} // namespace detail } // namespace detail
} // namespace folly } // namespace folly
/*
* Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sstream>
#include <folly/synchronization/detail/AtomicUtils.h>
#include <folly/test/DeterministicSchedule.h>
namespace folly {
namespace test {
template <typename T>
class RecordBuffer {
private:
struct Record {
Record(DSchedTimestamp ts, DSchedThreadId tid, bool sc, T val)
: acqRelTimestamp_(ts), storingThread_(tid), seqCst_(sc), val_(val) {}
explicit Record(T val) : val_(val) {}
Record() = delete;
DSchedTimestamp acqRelTimestamp_;
DSchedThreadId storingThread_;
bool seqCst_;
T val_;
ThreadTimestamps acqRelOrder_;
ThreadTimestamps firstObservedOrder_;
};
public:
RecordBuffer() = default;
T load(ThreadInfo& threadInfo, std::memory_order mo, bool rmw) {
DSchedThreadId tid = DeterministicSchedule::getThreadId();
return load(tid, threadInfo, mo, rmw);
}
T load(
DSchedThreadId tid,
ThreadInfo& threadInfo,
std::memory_order mo,
bool rmw = false) {
if (!rmw) {
assert(mo != std::memory_order_release);
assert(mo != std::memory_order_acq_rel);
}
if (!isInitialized()) {
return 0;
}
size_t oldestAllowed =
rmw ? 0 : getOldestAllowed(mo, threadInfo.acqRelOrder_);
size_t selected = DeterministicSchedule::getRandNumber(oldestAllowed + 1);
FOLLY_TEST_DSCHED_VLOG(
"buffered load, mo: " << folly::detail::memory_order_to_str(mo)
<< " index " << selected << "/" << oldestAllowed
<< " allowed."
<< " current value: " << loadDirect()
<< " return value: " << history_[selected].val_);
Record& rec = history_[selected];
DSchedTimestamp ts = threadInfo.acqRelOrder_.advance(tid);
rec.firstObservedOrder_.setIfNotPresent(tid, ts);
bool synch =
(mo == std::memory_order_acquire || mo == std::memory_order_acq_rel ||
mo == std::memory_order_seq_cst);
ThreadTimestamps& dst =
synch ? threadInfo.acqRelOrder_ : threadInfo.acqFenceOrder_;
dst.sync(rec.acqRelOrder_);
return rec.val_;
}
T loadDirect() const {
if (!isInitialized()) {
return 0;
}
return history_[0].val_;
}
void storeDirect(T val) {
if (isInitialized()) {
history_[0].val_ = val;
} else {
history_.emplace_front(val);
}
}
void store(ThreadInfo& threadInfo, T v, std::memory_order mo, bool rmw) {
DSchedThreadId tid = DeterministicSchedule::getThreadId();
store(tid, threadInfo, v, mo, rmw);
}
void store(
DSchedThreadId tid,
ThreadInfo& threadInfo,
T v,
std::memory_order mo,
bool rmw = false) {
if (!rmw) {
assert(mo != std::memory_order_acquire);
assert(mo != std::memory_order_acq_rel);
assert(mo != std::memory_order_consume);
}
DSchedTimestamp ts = threadInfo.acqRelOrder_.advance(tid);
bool preserve = isInitialized() &&
(rmw || tid.val == history_.front().storingThread_.val);
bool sc = (mo == std::memory_order_seq_cst);
history_.emplace_front(ts, tid, sc, v);
Record& rec = history_.front();
rec.firstObservedOrder_.setIfNotPresent(tid, ts);
bool synch =
(mo == std::memory_order_release || mo == std::memory_order_acq_rel ||
mo == std::memory_order_seq_cst);
ThreadTimestamps& src =
synch ? threadInfo.acqRelOrder_ : threadInfo.relFenceOrder_;
if (preserve) {
rec.acqRelOrder_ = history_.front().acqRelOrder_;
}
rec.acqRelOrder_.sync(src);
if (history_.size() > kMaxRecordBufferSize) {
history_.pop_back();
}
}
protected:
size_t getOldestAllowed(
std::memory_order mo,
const ThreadTimestamps& acqRelOrder) {
assert(isInitialized());
for (size_t i = 0; i < history_.size() - 1; i++) {
Record& rec = history_[i];
if (rec.seqCst_ && (mo == std::memory_order_seq_cst)) {
return i;
}
if (acqRelOrder.atLeastAsRecentAs(
rec.storingThread_, rec.acqRelTimestamp_)) {
return i;
}
if (acqRelOrder.atLeastAsRecentAsAny(rec.firstObservedOrder_)) {
return i;
}
}
return history_.size() - 1;
}
// index 0 is newest, index size - 1 is oldest
std::deque<Record> history_;
private:
static constexpr size_t kMaxRecordBufferSize = 64;
bool isInitialized() const {
return !history_.empty();
}
};
template <typename T>
struct BufferedAtomic {
BufferedAtomic() {
DeterministicSchedule::beforeSharedAccess();
assert(bufs.count(this) == 0);
bufs[this];
DeterministicSchedule::afterSharedAccess();
}
~BufferedAtomic() {
DeterministicSchedule::beforeSharedAccess();
assert(bufs.count(this) == 1);
bufs.erase(this);
DeterministicSchedule::afterSharedAccess();
}
BufferedAtomic(BufferedAtomic<T> const&) = delete;
BufferedAtomic<T>& operator=(BufferedAtomic<T> const&) = delete;
using Modification = std::function<T(const T&)>;
constexpr /* implicit */ BufferedAtomic(T v) noexcept {
DeterministicSchedule::beforeSharedAccess();
assert(bufs.count(this) == 0);
bufs[this];
doStore(v, std::memory_order_relaxed);
DeterministicSchedule::afterSharedAccess();
}
bool is_lock_free() const noexcept {
return false;
}
bool compare_exchange_strong(
T& v0,
T v1,
std::memory_order mo = std::memory_order_seq_cst) noexcept {
return compare_exchange_strong(
v0, v1, mo, folly::detail::default_failure_memory_order(mo));
}
bool compare_exchange_strong(
T& expected,
T desired,
std::memory_order success,
std::memory_order failure) noexcept {
return doCompareExchange(expected, desired, success, failure, false);
}
bool compare_exchange_weak(
T& v0,
T v1,
std::memory_order mo = std::memory_order_seq_cst) noexcept {
return compare_exchange_weak(
v0, v1, mo, ::folly::detail::default_failure_memory_order(mo));
}
bool compare_exchange_weak(
T& expected,
T desired,
std::memory_order success,
std::memory_order failure) noexcept {
return doCompareExchange(expected, desired, success, failure, true);
}
T exchange(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
Modification mod = [&](const T& /*prev*/) { return v; };
return doReadModifyWrite(mod, mo);
}
/* implicit */ operator T() const noexcept {
return doLoad(std::memory_order_seq_cst);
}
T load(std::memory_order mo = std::memory_order_seq_cst) const noexcept {
return doLoad(mo);
}
T operator=(T v) noexcept {
doStore(v, std::memory_order_seq_cst);
return v;
}
void store(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
doStore(v, mo);
}
T operator++() noexcept {
Modification mod = [](const T& prev) { return prev + 1; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) + 1;
}
T operator++(int /* postDummy */) noexcept {
Modification mod = [](const T& prev) { return prev + 1; };
return doReadModifyWrite(mod, std::memory_order_seq_cst);
}
T operator--() noexcept {
Modification mod = [](const T& prev) { return prev - 1; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) - 1;
}
T operator--(int /* postDummy */) noexcept {
Modification mod = [](const T& prev) { return prev - 1; };
return doReadModifyWrite(mod, std::memory_order_seq_cst);
}
T operator+=(T v) noexcept {
Modification mod = [&](const T& prev) { return prev + v; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) + v;
}
T fetch_add(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
Modification mod = [&](const T& prev) { return prev + v; };
return doReadModifyWrite(mod, mo);
}
T operator-=(T v) noexcept {
Modification mod = [&](const T& prev) { return prev - v; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) - v;
}
T fetch_sub(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
Modification mod = [&](const T& prev) { return prev - v; };
return doReadModifyWrite(mod, mo);
}
T operator&=(T v) noexcept {
Modification mod = [&](const T& prev) { return prev & v; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) & v;
}
T fetch_and(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
Modification mod = [&](const T& prev) { return prev & v; };
return doReadModifyWrite(mod, mo);
}
T operator|=(T v) noexcept {
Modification mod = [&](const T& prev) { return prev | v; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) | v;
}
T fetch_or(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
Modification mod = [&](const T& prev) { return prev | v; };
return doReadModifyWrite(mod, mo);
}
T operator^=(T v) noexcept {
Modification mod = [&](const T& prev) { return prev ^ v; };
return doReadModifyWrite(mod, std::memory_order_seq_cst) ^ v;
}
T fetch_xor(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
Modification mod = [&](const T& prev) { return prev ^ v; };
return doReadModifyWrite(mod, mo);
}
private:
T doLoad(std::memory_order mo, bool rmw = false) const {
// Static destructors that outlive DSched instance may load atomics
if (!DeterministicSchedule::isActive()) {
auto prev = prevUnguardedAccess.exchange(std::this_thread::get_id());
assert(prev == std::thread::id() || prev == std::this_thread::get_id());
return getBuf().loadDirect();
}
ThreadInfo& threadInfo = DeterministicSchedule::getCurrentThreadInfo();
T rv = getBuf().load(threadInfo, mo, rmw);
return rv;
}
void doStore(T val, std::memory_order mo, bool rmw = false) {
// Static destructors that outlive DSched instance may store to atomics
if (!DeterministicSchedule::isActive()) {
auto prev = prevUnguardedAccess.exchange(std::this_thread::get_id());
assert(prev == std::thread::id() || prev == std::this_thread::get_id());
getBuf().storeDirect(val);
return;
}
ThreadInfo& threadInfo = DeterministicSchedule::getCurrentThreadInfo();
getBuf().store(threadInfo, val, mo, rmw);
FOLLY_TEST_DSCHED_VLOG(
"\tstore mo: " << folly::detail::memory_order_to_str(mo)
<< " rmw: " << rmw);
}
T doReadModifyWrite(Modification mod, std::memory_order mo) {
T prev = doLoad(mo, true);
T next = mod(prev);
doStore(next, mo, true);
return prev;
}
bool doCompareExchange(
T& expected,
T desired,
std::memory_order success,
std::memory_order failure,
bool spuriousFailures) {
T current = getBuf().loadDirect();
if (current == expected) {
if (!spuriousFailures || DeterministicSchedule::getRandNumber(2)) {
Modification mod = [&](const T& /*prev*/) { return desired; };
doReadModifyWrite(mod, success);
return true;
}
}
expected = doLoad(failure, true);
assert(expected == current);
return false;
}
RecordBuffer<T>& getBuf() const {
assert(bufs.count(this) == 1);
return bufs.at(this);
}
static std::unordered_map<const BufferedAtomic<T>*, RecordBuffer<T>> bufs;
mutable std::atomic<std::thread::id> prevUnguardedAccess;
};
template <typename T>
std::unordered_map<const BufferedAtomic<T>*, RecordBuffer<T>>
BufferedAtomic<T>::bufs =
std::unordered_map<const BufferedAtomic<T>*, RecordBuffer<T>>();
} // namespace test
} // namespace folly
/*
* Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/test/BufferedAtomic.h>
#include <folly/SingletonThreadLocal.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <random>
using namespace folly::test;
using DSched = DeterministicSchedule;
template <typename T>
class RecordBufferTest : public RecordBuffer<T> {
public:
void assertOldestAllowed(
size_t expected,
std::memory_order mo,
const ThreadTimestamps& acqRelOrder) {
size_t oldestAllowed = RecordBuffer<T>::getOldestAllowed(mo, acqRelOrder);
ASSERT_EQ(expected, RecordBuffer<T>::history_[oldestAllowed].val_);
}
};
struct DSchedTimestampTest : public DSchedTimestamp {
explicit DSchedTimestampTest(size_t v) : DSchedTimestamp(v) {}
};
TEST(BufferedAtomic, basic) {
RecordBufferTest<int> buf;
DSchedThreadId tid(0);
ThreadInfo threadInfo(tid);
ASSERT_TRUE(
threadInfo.acqRelOrder_.atLeastAsRecentAs(tid, DSchedTimestampTest(1)));
ASSERT_FALSE(
threadInfo.acqRelOrder_.atLeastAsRecentAs(tid, DSchedTimestampTest(2)));
// value stored is equal to ts at time of store
for (int i = 2; i < 12; i++) {
buf.store(tid, threadInfo, i, std::memory_order_relaxed);
}
ASSERT_TRUE(
threadInfo.acqRelOrder_.atLeastAsRecentAs(tid, DSchedTimestampTest(11)));
ASSERT_FALSE(
threadInfo.acqRelOrder_.atLeastAsRecentAs(tid, DSchedTimestampTest(12)));
ThreadTimestamps tts;
buf.assertOldestAllowed(2, std::memory_order_relaxed, tts);
tts.setIfNotPresent(tid, DSchedTimestampTest(8));
buf.assertOldestAllowed(8, std::memory_order_relaxed, tts);
tts.clear();
tts.setIfNotPresent(tid, DSchedTimestampTest(10));
buf.assertOldestAllowed(10, std::memory_order_relaxed, tts);
tts.clear();
tts.setIfNotPresent(tid, DSchedTimestampTest(115));
buf.assertOldestAllowed(11, std::memory_order_relaxed, tts);
}
TEST(BufferedAtomic, seq_cst) {
RecordBufferTest<int> buf;
DSchedThreadId tid(0);
ThreadInfo threadInfo(tid);
buf.store(tid, threadInfo, 0, std::memory_order_relaxed);
buf.store(tid, threadInfo, 1, std::memory_order_seq_cst);
buf.store(tid, threadInfo, 2, std::memory_order_relaxed);
ThreadTimestamps tts;
buf.assertOldestAllowed(0, std::memory_order_relaxed, tts);
buf.assertOldestAllowed(0, std::memory_order_acquire, tts);
buf.assertOldestAllowed(1, std::memory_order_seq_cst, tts);
}
TEST(BufferedAtomic, transitive_sync) {
RecordBufferTest<int> buf;
DSchedThreadId tid0(0);
DSchedThreadId tid1(1);
DSchedThreadId tid2(2);
ThreadInfo threadInfo0(tid0);
ThreadInfo threadInfo1(tid1);
ThreadInfo threadInfo2(tid2);
buf.store(tid0, threadInfo0, 0, std::memory_order_relaxed);
buf.store(tid0, threadInfo0, 1, std::memory_order_seq_cst);
int val = buf.load(tid1, threadInfo1, std::memory_order_seq_cst);
ASSERT_EQ(1, val);
buf.assertOldestAllowed(
0, std::memory_order_relaxed, threadInfo2.acqRelOrder_);
threadInfo2.acqRelOrder_.sync(threadInfo1.acqRelOrder_);
buf.assertOldestAllowed(
1, std::memory_order_relaxed, threadInfo2.acqRelOrder_);
}
TEST(BufferedAtomic, acq_rel) {
RecordBufferTest<int> buf;
DSchedThreadId tid0(0);
DSchedThreadId tid1(1);
ThreadInfo threadInfo0(tid0);
ThreadInfo threadInfo1(tid1);
buf.store(tid0, threadInfo0, 0, std::memory_order_relaxed);
buf.store(tid0, threadInfo0, 1, std::memory_order_release);
while (buf.load(tid1, threadInfo1, std::memory_order_relaxed) == 0) {
}
ASSERT_TRUE(threadInfo1.acqFenceOrder_.atLeastAsRecentAs(
tid0, DSchedTimestampTest(3)));
ASSERT_FALSE(threadInfo1.acqFenceOrder_.atLeastAsRecentAs(
tid0, DSchedTimestampTest(4)));
ASSERT_FALSE(
threadInfo1.acqRelOrder_.atLeastAsRecentAs(tid0, DSchedTimestampTest(1)));
}
TEST(BufferedAtomic, atomic_buffer_thread_create_join_sync) {
for (int i = 0; i < 32; i++) {
DSched sched(DSched::uniform(i));
DeterministicAtomicImpl<int, DeterministicSchedule, BufferedAtomic> x;
x.store(0, std::memory_order_relaxed);
x.store(1, std::memory_order_relaxed);
std::thread thread = DeterministicSchedule::thread([&]() {
ASSERT_EQ(1, x.load(std::memory_order_relaxed));
x.store(2, std::memory_order_relaxed);
});
DeterministicSchedule::join(thread);
thread = DeterministicSchedule::thread([&]() {
ASSERT_EQ(2, x.load(std::memory_order_relaxed));
x.store(3, std::memory_order_relaxed);
});
DeterministicSchedule::join(thread);
ASSERT_EQ(3, x.load(std::memory_order_relaxed));
}
}
TEST(BufferedAtomic, atomic_buffer_fence) {
for (int i = 0; i < 1024; i++) {
FOLLY_TEST_DSCHED_VLOG("seed: " << i);
DSched sched(DSched::uniform(i));
DeterministicMutex mutex;
mutex.lock();
DeterministicAtomicImpl<int, DeterministicSchedule, BufferedAtomic> x;
DeterministicAtomicImpl<int, DeterministicSchedule, BufferedAtomic> y;
DeterministicAtomicImpl<int, DeterministicSchedule, BufferedAtomic> z;
x.store(0, std::memory_order_relaxed);
y.store(0, std::memory_order_relaxed);
z.store(0, std::memory_order_relaxed);
std::thread threadA = DeterministicSchedule::thread([&]() {
x.store(1, std::memory_order_relaxed);
DeterministicSchedule::atomic_thread_fence(std::memory_order_release);
y.store(1, std::memory_order_relaxed);
mutex.lock();
ASSERT_EQ(1, z.load(std::memory_order_relaxed));
mutex.unlock();
});
std::thread threadB = DeterministicSchedule::thread([&]() {
while (y.load(std::memory_order_relaxed) != 1) {
}
DeterministicSchedule::atomic_thread_fence(std::memory_order_acquire);
ASSERT_EQ(1, x.load(std::memory_order_relaxed));
});
DeterministicSchedule::join(threadB);
z.store(1, std::memory_order_relaxed);
mutex.unlock();
DeterministicSchedule::join(threadA);
}
}
TEST(BufferedAtomic, single_thread_unguarded_access) {
DSched* sched = new DSched(DSched::uniform(0));
DeterministicAtomicImpl<int, DeterministicSchedule, BufferedAtomic> x(0);
delete sched;
x.store(1);
ASSERT_EQ(1, x.load());
}
TEST(BufferedAtomic, multiple_thread_unguarded_access) {
DSched* sched = new DSched(DSched::uniform(0));
DeterministicAtomicImpl<int, DeterministicSchedule, BufferedAtomic> x(0);
delete sched;
// simulate static construction/destruction or access to shared
// DeterministicAtomic in pthread_setspecific callbacks after
// DeterministicSchedule::beforeThreadAccess() has been run.
ASSERT_EQ(0, x.load());
auto t = std::thread(
[&]() { ASSERT_DEATH(x.store(1), "prev == std::thread::id()"); });
t.join();
}
...@@ -32,7 +32,7 @@ namespace test { ...@@ -32,7 +32,7 @@ namespace test {
FOLLY_TLS sem_t* DeterministicSchedule::tls_sem; FOLLY_TLS sem_t* DeterministicSchedule::tls_sem;
FOLLY_TLS DeterministicSchedule* DeterministicSchedule::tls_sched; FOLLY_TLS DeterministicSchedule* DeterministicSchedule::tls_sched;
FOLLY_TLS unsigned DeterministicSchedule::tls_threadId; FOLLY_TLS DSchedThreadId DeterministicSchedule::tls_threadId;
thread_local AuxAct DeterministicSchedule::tls_aux_act; thread_local AuxAct DeterministicSchedule::tls_aux_act;
AuxChk DeterministicSchedule::aux_chk; AuxChk DeterministicSchedule::aux_chk;
...@@ -44,9 +44,83 @@ static std::unordered_map< ...@@ -44,9 +44,83 @@ static std::unordered_map<
static std::mutex futexLock; static std::mutex futexLock;
void ThreadTimestamps::sync(const ThreadTimestamps& src) {
if (src.timestamps_.size() > timestamps_.size()) {
timestamps_.resize(src.timestamps_.size());
}
for (size_t i = 0; i < src.timestamps_.size(); i++) {
timestamps_[i].sync(src.timestamps_[i]);
}
}
DSchedTimestamp ThreadTimestamps::advance(DSchedThreadId tid) {
assert(timestamps_.size() > tid.val);
return timestamps_[tid.val].advance();
}
void ThreadTimestamps::setIfNotPresent(DSchedThreadId tid, DSchedTimestamp ts) {
assert(ts.initialized());
if (tid.val >= timestamps_.size()) {
timestamps_.resize(tid.val + 1);
}
if (!timestamps_[tid.val].initialized()) {
timestamps_[tid.val].sync(ts);
}
}
void ThreadTimestamps::clear() {
timestamps_.clear();
}
bool ThreadTimestamps::atLeastAsRecentAs(DSchedThreadId tid, DSchedTimestamp ts)
const {
// It is not meaningful learn whether any instance is at least
// as recent as timestamp 0.
assert(ts.initialized());
if (tid.val >= timestamps_.size()) {
return false;
}
return timestamps_[tid.val].atLeastAsRecentAs(ts);
}
bool ThreadTimestamps::atLeastAsRecentAsAny(const ThreadTimestamps& src) const {
size_t min = timestamps_.size() < src.timestamps_.size()
? timestamps_.size()
: src.timestamps_.size();
for (size_t i = 0; i < min; i++) {
if (src.timestamps_[i].initialized() &&
timestamps_[i].atLeastAsRecentAs(src.timestamps_[i])) {
return true;
}
}
return false;
}
void ThreadSyncVar::acquire() {
ThreadInfo& threadInfo = DeterministicSchedule::getCurrentThreadInfo();
DSchedThreadId tid = DeterministicSchedule::getThreadId();
threadInfo.acqRelOrder_.advance(tid);
threadInfo.acqRelOrder_.sync(order_);
}
void ThreadSyncVar::release() {
ThreadInfo& threadInfo = DeterministicSchedule::getCurrentThreadInfo();
DSchedThreadId tid = DeterministicSchedule::getThreadId();
threadInfo.acqRelOrder_.advance(tid);
order_.sync(threadInfo.acqRelOrder_);
}
void ThreadSyncVar::acq_rel() {
ThreadInfo& threadInfo = DeterministicSchedule::getCurrentThreadInfo();
DSchedThreadId tid = DeterministicSchedule::getThreadId();
threadInfo.acqRelOrder_.advance(tid);
threadInfo.acqRelOrder_.sync(order_);
order_.sync(threadInfo.acqRelOrder_);
}
DeterministicSchedule::DeterministicSchedule( DeterministicSchedule::DeterministicSchedule(
const std::function<size_t(size_t)>& scheduler) const std::function<size_t(size_t)>& scheduler)
: scheduler_(scheduler), nextThreadId_(1), step_(0) { : scheduler_(scheduler), nextThreadId_(0), step_(0) {
assert(tls_sem == nullptr); assert(tls_sem == nullptr);
assert(tls_sched == nullptr); assert(tls_sched == nullptr);
assert(tls_aux_act == nullptr); assert(tls_aux_act == nullptr);
...@@ -55,6 +129,8 @@ DeterministicSchedule::DeterministicSchedule( ...@@ -55,6 +129,8 @@ DeterministicSchedule::DeterministicSchedule(
sem_init(tls_sem, 0, 1); sem_init(tls_sem, 0, 1);
sems_.push_back(tls_sem); sems_.push_back(tls_sem);
tls_threadId = nextThreadId_++;
threadInfoMap_.emplace_back(tls_threadId);
tls_sched = this; tls_sched = this;
} }
...@@ -162,16 +238,11 @@ int DeterministicSchedule::getcpu( ...@@ -162,16 +238,11 @@ int DeterministicSchedule::getcpu(
unsigned* cpu, unsigned* cpu,
unsigned* node, unsigned* node,
void* /* unused */) { void* /* unused */) {
if (!tls_threadId && tls_sched) {
beforeSharedAccess();
tls_threadId = tls_sched->nextThreadId_++;
afterSharedAccess();
}
if (cpu) { if (cpu) {
*cpu = tls_threadId; *cpu = tls_threadId.val;
} }
if (node) { if (node) {
*node = tls_threadId; *node = tls_threadId.val;
} }
return 0; return 0;
} }
...@@ -223,13 +294,19 @@ void DeterministicSchedule::afterThreadCreate(sem_t* sem) { ...@@ -223,13 +294,19 @@ void DeterministicSchedule::afterThreadCreate(sem_t* sem) {
beforeSharedAccess(); beforeSharedAccess();
if (active_.count(std::this_thread::get_id()) == 1) { if (active_.count(std::this_thread::get_id()) == 1) {
started = true; started = true;
tls_threadId = nextThreadId_++;
assert(tls_threadId.val == threadInfoMap_.size());
threadInfoMap_.emplace_back(tls_threadId);
} }
afterSharedAccess(); afterSharedAccess();
} }
atomic_thread_fence(std::memory_order_seq_cst);
} }
void DeterministicSchedule::beforeThreadExit() { void DeterministicSchedule::beforeThreadExit() {
assert(tls_sched == this); assert(tls_sched == this);
atomic_thread_fence(std::memory_order_seq_cst);
beforeSharedAccess(); beforeSharedAccess();
auto parent = joins_.find(std::this_thread::get_id()); auto parent = joins_.find(std::this_thread::get_id());
if (parent != joins_.end()) { if (parent != joins_.end()) {
...@@ -264,6 +341,7 @@ void DeterministicSchedule::join(std::thread& child) { ...@@ -264,6 +341,7 @@ void DeterministicSchedule::join(std::thread& child) {
} }
afterSharedAccess(); afterSharedAccess();
} }
atomic_thread_fence(std::memory_order_seq_cst);
FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id()); FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id());
child.join(); child.join();
} }
...@@ -279,8 +357,14 @@ void DeterministicSchedule::callAux(bool success) { ...@@ -279,8 +357,14 @@ void DeterministicSchedule::callAux(bool success) {
} }
} }
static std::unordered_map<sem_t*, std::unique_ptr<ThreadSyncVar>> semSyncVar;
void DeterministicSchedule::post(sem_t* sem) { void DeterministicSchedule::post(sem_t* sem) {
beforeSharedAccess(); beforeSharedAccess();
if (semSyncVar.count(sem) == 0) {
semSyncVar[sem] = std::make_unique<ThreadSyncVar>();
}
semSyncVar[sem]->release();
sem_post(sem); sem_post(sem);
FOLLY_TEST_DSCHED_VLOG("sem_post(" << sem << ")"); FOLLY_TEST_DSCHED_VLOG("sem_post(" << sem << ")");
afterSharedAccess(); afterSharedAccess();
...@@ -288,10 +372,20 @@ void DeterministicSchedule::post(sem_t* sem) { ...@@ -288,10 +372,20 @@ void DeterministicSchedule::post(sem_t* sem) {
bool DeterministicSchedule::tryWait(sem_t* sem) { bool DeterministicSchedule::tryWait(sem_t* sem) {
beforeSharedAccess(); beforeSharedAccess();
if (semSyncVar.count(sem) == 0) {
semSyncVar[sem] = std::make_unique<ThreadSyncVar>();
}
int rv = sem_trywait(sem); int rv = sem_trywait(sem);
int e = rv == 0 ? 0 : errno; int e = rv == 0 ? 0 : errno;
FOLLY_TEST_DSCHED_VLOG( FOLLY_TEST_DSCHED_VLOG(
"sem_trywait(" << sem << ") = " << rv << " errno=" << e); "sem_trywait(" << sem << ") = " << rv << " errno=" << e);
if (rv == 0) {
semSyncVar[sem]->acq_rel();
} else {
semSyncVar[sem]->acquire();
}
afterSharedAccess(); afterSharedAccess();
if (rv == 0) { if (rv == 0) {
return true; return true;
...@@ -307,6 +401,46 @@ void DeterministicSchedule::wait(sem_t* sem) { ...@@ -307,6 +401,46 @@ void DeterministicSchedule::wait(sem_t* sem) {
} }
} }
ThreadInfo& DeterministicSchedule::getCurrentThreadInfo() {
auto sched = tls_sched;
assert(sched);
assert(tls_threadId.val < sched->threadInfoMap_.size());
return sched->threadInfoMap_[tls_threadId.val];
}
void DeterministicSchedule::atomic_thread_fence(std::memory_order mo) {
if (!tls_sched) {
std::atomic_thread_fence(mo);
return;
}
beforeSharedAccess();
ThreadInfo& threadInfo = getCurrentThreadInfo();
switch (mo) {
case std::memory_order_relaxed:
assert(false);
break;
case std::memory_order_consume:
case std::memory_order_acquire:
threadInfo.acqRelOrder_.sync(threadInfo.acqFenceOrder_);
break;
case std::memory_order_release:
threadInfo.relFenceOrder_.sync(threadInfo.acqRelOrder_);
break;
case std::memory_order_acq_rel:
threadInfo.acqRelOrder_.sync(threadInfo.acqFenceOrder_);
threadInfo.relFenceOrder_.sync(threadInfo.acqRelOrder_);
break;
case std::memory_order_seq_cst:
threadInfo.acqRelOrder_.sync(threadInfo.acqFenceOrder_);
threadInfo.acqRelOrder_.sync(tls_sched->seqCstFenceOrder_);
tls_sched->seqCstFenceOrder_ = threadInfo.acqRelOrder_;
threadInfo.relFenceOrder_.sync(threadInfo.acqRelOrder_);
break;
}
FOLLY_TEST_DSCHED_VLOG("fence: " << folly::detail::memory_order_to_str(mo));
afterSharedAccess();
}
detail::FutexResult futexWaitImpl( detail::FutexResult futexWaitImpl(
const detail::Futex<DeterministicAtomic>* futex, const detail::Futex<DeterministicAtomic>* futex,
uint32_t expected, uint32_t expected,
...@@ -326,6 +460,7 @@ detail::FutexResult futexWaitImpl( ...@@ -326,6 +460,7 @@ detail::FutexResult futexWaitImpl(
"futexWait(" << futex << ", " << std::hex << expected << ", .., " "futexWait(" << futex << ", " << std::hex << expected << ", .., "
<< std::hex << waitMask << ") beginning.."); << std::hex << waitMask << ") beginning..");
futexLock.lock(); futexLock.lock();
// load_direct avoids deadlock on inner call to beforeSharedAccess
if (futex->load_direct() == expected) { if (futex->load_direct() == expected) {
auto& queue = futexQueues[futex]; auto& queue = futexQueues[futex];
queue.emplace_back(waitMask, &awoken); queue.emplace_back(waitMask, &awoken);
......
...@@ -51,6 +51,77 @@ namespace test { ...@@ -51,6 +51,77 @@ namespace test {
using AuxAct = std::function<void(bool)>; using AuxAct = std::function<void(bool)>;
using AuxChk = std::function<void(uint64_t)>; using AuxChk = std::function<void(uint64_t)>;
struct DSchedThreadId {
unsigned val;
explicit constexpr DSchedThreadId() : val(0) {}
explicit constexpr DSchedThreadId(unsigned v) : val(v) {}
unsigned operator=(unsigned v) {
return val = v;
}
};
class DSchedTimestamp {
public:
constexpr explicit DSchedTimestamp() : val_(0) {}
DSchedTimestamp advance() {
return DSchedTimestamp(++val_);
}
bool atLeastAsRecentAs(const DSchedTimestamp& other) const {
return val_ >= other.val_;
}
void sync(const DSchedTimestamp& other) {
val_ = std::max(val_, other.val_);
}
bool initialized() const {
return val_ > 0;
}
static constexpr DSchedTimestamp initial() {
return DSchedTimestamp(1);
}
protected:
constexpr explicit DSchedTimestamp(size_t v) : val_(v) {}
private:
size_t val_;
};
class ThreadTimestamps {
public:
void sync(const ThreadTimestamps& src);
DSchedTimestamp advance(DSchedThreadId tid);
void setIfNotPresent(DSchedThreadId tid, DSchedTimestamp ts);
void clear();
bool atLeastAsRecentAs(DSchedThreadId tid, DSchedTimestamp ts) const;
bool atLeastAsRecentAsAny(const ThreadTimestamps& src) const;
private:
std::vector<DSchedTimestamp> timestamps_;
};
struct ThreadInfo {
ThreadInfo() = delete;
explicit ThreadInfo(DSchedThreadId tid) {
acqRelOrder_.setIfNotPresent(tid, DSchedTimestamp::initial());
}
ThreadTimestamps acqRelOrder_;
ThreadTimestamps acqFenceOrder_;
ThreadTimestamps relFenceOrder_;
};
class ThreadSyncVar {
public:
ThreadSyncVar() = default;
void acquire();
void release();
void acq_rel();
private:
ThreadTimestamps order_;
};
/** /**
* DeterministicSchedule coordinates the inter-thread communication of a * DeterministicSchedule coordinates the inter-thread communication of a
* set of threads under test, so that despite concurrency the execution is * set of threads under test, so that despite concurrency the execution is
...@@ -122,6 +193,7 @@ class DeterministicSchedule : boost::noncopyable { ...@@ -122,6 +193,7 @@ class DeterministicSchedule : boost::noncopyable {
template <typename Func, typename... Args> template <typename Func, typename... Args>
static inline std::thread thread(Func&& func, Args&&... args) { static inline std::thread thread(Func&& func, Args&&... args) {
// TODO: maybe future versions of gcc will allow forwarding to thread // TODO: maybe future versions of gcc will allow forwarding to thread
atomic_thread_fence(std::memory_order_seq_cst);
auto sched = tls_sched; auto sched = tls_sched;
auto sem = sched ? sched->beforeThreadCreate() : nullptr; auto sem = sched ? sched->beforeThreadCreate() : nullptr;
auto child = std::thread( auto child = std::thread(
...@@ -191,10 +263,23 @@ class DeterministicSchedule : boost::noncopyable { ...@@ -191,10 +263,23 @@ class DeterministicSchedule : boost::noncopyable {
/** Add sem back into sems_ */ /** Add sem back into sems_ */
static void reschedule(sem_t* sem); static void reschedule(sem_t* sem);
static bool isActive() {
return tls_sched != nullptr;
}
static DSchedThreadId getThreadId() {
assert(tls_sched != nullptr);
return tls_threadId;
}
static ThreadInfo& getCurrentThreadInfo();
static void atomic_thread_fence(std::memory_order mo);
private: private:
static FOLLY_TLS sem_t* tls_sem; static FOLLY_TLS sem_t* tls_sem;
static FOLLY_TLS DeterministicSchedule* tls_sched; static FOLLY_TLS DeterministicSchedule* tls_sched;
static FOLLY_TLS unsigned tls_threadId; static FOLLY_TLS DSchedThreadId tls_threadId;
static thread_local AuxAct tls_aux_act; static thread_local AuxAct tls_aux_act;
static AuxChk aux_chk; static AuxChk aux_chk;
...@@ -202,6 +287,10 @@ class DeterministicSchedule : boost::noncopyable { ...@@ -202,6 +287,10 @@ class DeterministicSchedule : boost::noncopyable {
std::vector<sem_t*> sems_; std::vector<sem_t*> sems_;
std::unordered_set<std::thread::id> active_; std::unordered_set<std::thread::id> active_;
std::unordered_map<std::thread::id, sem_t*> joins_; std::unordered_map<std::thread::id, sem_t*> joins_;
std::vector<ThreadInfo> threadInfoMap_;
ThreadTimestamps seqCstFenceOrder_;
unsigned nextThreadId_; unsigned nextThreadId_;
/* step_ keeps count of shared accesses that correspond to user /* step_ keeps count of shared accesses that correspond to user
* synchronization steps (atomic accesses for now). * synchronization steps (atomic accesses for now).
...@@ -496,6 +585,7 @@ void atomic_notify_all(const DeterministicAtomic<Integer>*) {} ...@@ -496,6 +585,7 @@ void atomic_notify_all(const DeterministicAtomic<Integer>*) {}
struct DeterministicMutex { struct DeterministicMutex {
std::mutex m; std::mutex m;
std::queue<sem_t*> waiters_; std::queue<sem_t*> waiters_;
ThreadSyncVar syncVar_;
DeterministicMutex() = default; DeterministicMutex() = default;
~DeterministicMutex() = default; ~DeterministicMutex() = default;
...@@ -514,12 +604,18 @@ struct DeterministicMutex { ...@@ -514,12 +604,18 @@ struct DeterministicMutex {
// Wait to be scheduled by unlock // Wait to be scheduled by unlock
DeterministicSchedule::beforeSharedAccess(); DeterministicSchedule::beforeSharedAccess();
} }
if (DeterministicSchedule::isActive()) {
syncVar_.acquire();
}
DeterministicSchedule::afterSharedAccess(); DeterministicSchedule::afterSharedAccess();
} }
bool try_lock() { bool try_lock() {
DeterministicSchedule::beforeSharedAccess(); DeterministicSchedule::beforeSharedAccess();
bool rv = m.try_lock(); bool rv = m.try_lock();
if (rv && DeterministicSchedule::isActive()) {
syncVar_.acquire();
}
FOLLY_TEST_DSCHED_VLOG(this << ".try_lock() -> " << rv); FOLLY_TEST_DSCHED_VLOG(this << ".try_lock() -> " << rv);
DeterministicSchedule::afterSharedAccess(); DeterministicSchedule::afterSharedAccess();
return rv; return rv;
...@@ -527,8 +623,11 @@ struct DeterministicMutex { ...@@ -527,8 +623,11 @@ struct DeterministicMutex {
void unlock() { void unlock() {
FOLLY_TEST_DSCHED_VLOG(this << ".unlock()"); FOLLY_TEST_DSCHED_VLOG(this << ".unlock()");
m.unlock();
DeterministicSchedule::beforeSharedAccess(); DeterministicSchedule::beforeSharedAccess();
m.unlock();
if (DeterministicSchedule::isActive()) {
syncVar_.release();
}
if (!waiters_.empty()) { if (!waiters_.empty()) {
sem_t* sem = waiters_.front(); sem_t* sem = waiters_.front();
DeterministicSchedule::reschedule(sem); DeterministicSchedule::reschedule(sem);
......
...@@ -367,6 +367,53 @@ TEST(DeterministicSchedule, global_invariants) { ...@@ -367,6 +367,53 @@ TEST(DeterministicSchedule, global_invariants) {
} }
} }
struct DSchedTimestampTest : public DSchedTimestamp {
explicit DSchedTimestampTest(size_t v) : DSchedTimestamp(v) {}
};
TEST(DeterministicSchedule, thread_timestamps) {
ThreadTimestamps tss;
DSchedThreadId tid0(0);
DSchedThreadId tid1(1);
ASSERT_FALSE(tss.atLeastAsRecentAs(tid0, DSchedTimestampTest(1)));
tss.setIfNotPresent(tid0, DSchedTimestampTest(1));
ASSERT_TRUE(tss.atLeastAsRecentAs(tid0, DSchedTimestampTest(1)));
ASSERT_FALSE(tss.atLeastAsRecentAs(tid0, DSchedTimestampTest(2)));
ASSERT_FALSE(tss.atLeastAsRecentAs(tid1, DSchedTimestampTest(1)));
tss.setIfNotPresent(tid0, DSchedTimestampTest(2));
ASSERT_FALSE(tss.atLeastAsRecentAs(tid0, DSchedTimestampTest(2)));
auto ts = tss.advance(tid0);
ASSERT_TRUE(ts.atLeastAsRecentAs(DSchedTimestampTest(2)));
ASSERT_FALSE(ts.atLeastAsRecentAs(DSchedTimestampTest(3)));
ASSERT_TRUE(tss.atLeastAsRecentAs(tid0, DSchedTimestampTest(2)));
ASSERT_FALSE(tss.atLeastAsRecentAs(tid1, DSchedTimestampTest(1)));
ThreadTimestamps tss2;
tss2.setIfNotPresent(tid1, DSchedTimestampTest(3));
ASSERT_FALSE(tss2.atLeastAsRecentAs(tid1, DSchedTimestampTest(4)));
ASSERT_TRUE(tss2.atLeastAsRecentAs(tid1, DSchedTimestampTest(3)));
ASSERT_FALSE(tss.atLeastAsRecentAsAny(tss2));
tss.sync(tss2);
ASSERT_TRUE(tss.atLeastAsRecentAs(tid1, DSchedTimestampTest(3)));
ASSERT_FALSE(tss.atLeastAsRecentAs(tid1, DSchedTimestampTest(4)));
ThreadTimestamps tss3;
tss3.setIfNotPresent(tid1, DSchedTimestampTest(4));
ASSERT_TRUE(tss3.atLeastAsRecentAsAny(tss2));
ASSERT_FALSE(tss2.atLeastAsRecentAsAny(tss3));
ThreadTimestamps tss4, tss5;
tss4.setIfNotPresent(DSchedThreadId(10), DSchedTimestampTest(5));
tss5.setIfNotPresent(DSchedThreadId(11), DSchedTimestampTest(5));
ASSERT_FALSE(tss4.atLeastAsRecentAsAny(tss5));
ASSERT_FALSE(tss5.atLeastAsRecentAsAny(tss4));
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true); gflags::ParseCommandLineFlags(&argc, &argv, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment