Commit 69c7f1b7 authored by David Goldblatt's avatar David Goldblatt Committed by Facebook Github Bot

Add AtomicReadMostlyMainPtr

Summary:
This is a ReadMostlyMainPtr variant that allows racy accesses. By making the
write-side slower, the read side can avoid any contended shared accesses or
RMWs.

Reviewed By: djwatson

Differential Revision: D13413105

fbshipit-source-id: f03c7ad58be72b63549b145ed6f41c51563831d1
parent cc381bd4
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/AtomicReadMostlyMainPtr.h>
#include <folly/executors/InlineExecutor.h>
namespace folly {
namespace detail {
namespace {
struct FailingExecutor : folly::Executor {
// We shouldn't be invoking any callbacks.
void add(Func func) override {
LOG(DFATAL)
<< "Added an RCU callback to the AtomicReadMostlyMainPtr executor.";
InlineExecutor::instance().add(std::move(func));
}
};
} // namespace
// *All* modifications of *all* AtomicReadMostlyMainPtrs use the same mutex and
// domain. The first of these just shrinks the size of the individual objects a
// little, but the second is necessary for correctness; all rcu_domains need
// their own tag (as an rcu_domain API constraint), but we want to support
// arbitrarily many AtomicReadMostlyMainPtrs.
Indestructible<std::mutex> atomicReadMostlyMu;
Indestructible<folly::rcu_domain<detail::AtomicReadMostlyTag>>
atomicReadMostlyDomain(new FailingExecutor);
} // namespace detail
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <cstdint>
#include <memory>
#include <mutex>
#include <folly/Indestructible.h>
#include <folly/experimental/ReadMostlySharedPtr.h>
#include <folly/synchronization/Rcu.h>
namespace folly {
namespace detail {
struct AtomicReadMostlyTag;
extern Indestructible<std::mutex> atomicReadMostlyMu;
extern Indestructible<rcu_domain<AtomicReadMostlyTag>> atomicReadMostlyDomain;
} // namespace detail
/*
* What atomic_shared_ptr is to shared_ptr, AtomicReadMostlyMainPtr is to
* ReadMostlyMainPtr; it allows racy conflicting accesses to one. This gives
* true shared_ptr-like semantics, including reclamation at the point where the
* last pointer to an object goes away.
*
* It's about the same speed (slightly slower) as ReadMostlyMainPtr. The most
* significant feature they share is avoiding reader-reader contention and
* atomic RMWs in the absence of writes.
*/
template <typename T>
class AtomicReadMostlyMainPtr {
public:
AtomicReadMostlyMainPtr() : curMainPtrIndex_(0) {}
explicit AtomicReadMostlyMainPtr(std::shared_ptr<T> ptr)
: curMainPtrIndex_(0) {
mainPtrs_[0] = ReadMostlyMainPtr<T>{std::move(ptr)};
}
void operator=(std::shared_ptr<T> desired) {
store(std::move(desired));
}
bool is_lock_free() const {
return false;
}
ReadMostlySharedPtr<T> load(
std::memory_order order = std::memory_order_seq_cst) const {
rcu_token token = detail::atomicReadMostlyDomain->lock_shared();
// Synchronization point with the store in storeLocked().
auto index = curMainPtrIndex_.load(order);
auto result = mainPtrs_[index].getShared();
detail::atomicReadMostlyDomain->unlock_shared(std::move(token));
return result;
}
void store(
std::shared_ptr<T> ptr,
std::memory_order order = std::memory_order_seq_cst) {
std::shared_ptr<T> old;
{
std::lock_guard<std::mutex> lg(*detail::atomicReadMostlyMu);
old = exchangeLocked(std::move(ptr), order);
}
// If ~T() runs (triggered by the shared_ptr refcount decrement), it's here,
// after dropping the lock. This avoids a possible (albeit esoteric)
// deadlock if ~T() modifies the AtomicReadMostlyMainPtr that used to point
// to it.
}
std::shared_ptr<T> exchange(
std::shared_ptr<T> ptr,
std::memory_order order = std::memory_order_seq_cst) {
std::lock_guard<std::mutex> lg(*detail::atomicReadMostlyMu);
return exchangeLocked(std::move(ptr), order);
}
bool compare_exchange_weak(
std::shared_ptr<T>& expected,
const std::shared_ptr<T>& desired,
std::memory_order successOrder = std::memory_order_seq_cst,
std::memory_order failureOrder = std::memory_order_seq_cst) {
return compare_exchange_strong(
expected, desired, successOrder, failureOrder);
}
bool compare_exchange_strong(
std::shared_ptr<T>& expected,
const std::shared_ptr<T>& desired,
std::memory_order successOrder = std::memory_order_seq_cst,
std::memory_order failureOrder = std::memory_order_seq_cst) {
// See the note at the end of store; we need to defer any destruction we
// might trigger until after the lock is released.
// This is not actually needed down the success path (the reference passed
// in as expected is another pointer to the same object, so we won't
// decrement the refcount to 0), but "never decrement a refcount while
// holding a lock" is an easier rule to keep in our heads, and costs us
// nothing.
std::shared_ptr<T> prev;
std::shared_ptr<T> expectedDup;
{
std::lock_guard<std::mutex> lg(*detail::atomicReadMostlyMu);
auto index = curMainPtrIndex_.load(failureOrder);
ReadMostlyMainPtr<T>& oldMain = mainPtrs_[index];
if (oldMain.get() != expected.get()) {
expectedDup = std::move(expected);
expected = oldMain.getStdShared();
return false;
}
prev = exchangeLocked(desired, successOrder);
}
return true;
}
private:
// Must hold the global mutex.
std::shared_ptr<T> exchangeLocked(
std::shared_ptr<T> ptr,
std::memory_order order = std::memory_order_seq_cst) {
// This is where the tricky bits happen; all modifications of the mainPtrs_
// and index happen here. We maintain the invariant that, on entry to this
// method, all read-side critical sections in progress are using the version
// indicated by curMainPtrIndex_, and the other version is nulled out.
// (Readers can still hold a ReadMostlySharedPtr to the thing the old
// version used to point to; they just can't access the old version to get
// that handle any more).
auto index = curMainPtrIndex_.load(std::memory_order_relaxed);
ReadMostlyMainPtr<T>& oldMain = mainPtrs_[index];
ReadMostlyMainPtr<T>& newMain = mainPtrs_[1 - index];
// From the entry invariant, there's no readers accessing newMain right now.
newMain.reset(std::move(ptr));
// If order is acq_rel, it should degrade to just release, since this is a
// store rather than an RMW. (Of course, this is such a slow method that we
// don't really care, but precision is its own reward. If TSAN one day
// understands asymmetric barriers, this will also improve its error
// detection here). We get our "acquire-y-ness" from the mutex.
auto realOrder =
(order == std::memory_order_acq_rel ? std::memory_order_release
: order);
// After this, read-side critical sections can access both versions, but
// new ones will use newMain.
// This is also synchronization point with loads.
curMainPtrIndex_.store(1 - index, realOrder);
// Wait for all read-side critical sections using oldMain to finish.
detail::atomicReadMostlyDomain->synchronize();
// We've reestablished the first half of the invariant (all readers are
// using newMain), now let's establish the other one (that the other pointer
// is null).
auto result = oldMain.getStdShared();
oldMain.reset();
return result;
}
// The right way to think of this implementation is as an
// std::atomic<ReadMostlyMainPtr<T>*>, protected by RCU. There's only two
// tricky parts:
// 1. We give ourselves our own RCU domain, and synchronize on modification,
// so that we don't do any batching of deallocations. This gives
// shared_ptr-like eager reclamation semantics.
// 2. Instead of putting the ReadMostlyMainPtrs on the heap, we keep them as
// part of the same object to improve locality.
// Really, just a 0/1 index. This is also the synchronization point for memory
// orders.
std::atomic<uint8_t> curMainPtrIndex_;
// Both the ReadMostlyMainPtrs themselves and the domain have nontrivial
// indirections even on the read path, and asymmetric barriers on the write
// path. Some of these could be fused as a later optimization, at the cost of
// having to put more tricky threading primitives in this class that are
// currently abstracted out by those.
ReadMostlyMainPtr<T> mainPtrs_[2];
};
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/AtomicReadMostlyMainPtr.h>
#include <folly/Benchmark.h>
#include <folly/portability/GFlags.h>
using folly::AtomicReadMostlyMainPtr;
using folly::ReadMostlySharedPtr;
BENCHMARK(SingleThreadedLoads, n) {
auto sharedInt = std::make_shared<int>(123);
AtomicReadMostlyMainPtr<int> data(sharedInt);
for (unsigned i = 0; i < n; ++i) {
ReadMostlySharedPtr<int> ptr = data.load();
}
}
BENCHMARK(SingleThreadedStores, n) {
auto sharedInt = std::make_shared<int>(123);
AtomicReadMostlyMainPtr<int> data(sharedInt);
for (unsigned i = 0; i < n; ++i) {
data.store(sharedInt);
ReadMostlySharedPtr<int> ptr = data.load();
}
}
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
folly::runBenchmarks();
return 0;
}
/*
* Output:
============================================================================
folly/experimental/test/AtomicReadMostlyMainPtrBenchmark.cpprelative time/iter iters/s
============================================================================
SingleThreadedLoads 14.36ns 69.65M
SingleThreadedStores 5.88us 170.15K
============================================================================
*/
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/AtomicReadMostlyMainPtr.h>
#include <array>
#include <functional>
#include <memory>
#include <thread>
#include <vector>
#include <folly/portability/GTest.h>
using folly::AtomicReadMostlyMainPtr;
using folly::ReadMostlySharedPtr;
class AtomicReadMostlyMainPtrSimpleTest : public testing::Test {
protected:
AtomicReadMostlyMainPtrSimpleTest()
: sharedPtr123(std::make_shared<int>(123)),
sharedPtr123Dup(sharedPtr123),
sharedPtr456(std::make_shared<int>(456)),
sharedPtr456Dup(sharedPtr456) {}
AtomicReadMostlyMainPtr<int> data;
ReadMostlySharedPtr<int> readMostlySharedPtr;
std::shared_ptr<int> nullSharedPtr;
std::shared_ptr<int> sharedPtr123;
std::shared_ptr<int> sharedPtr123Dup;
std::shared_ptr<int> sharedPtr456;
std::shared_ptr<int> sharedPtr456Dup;
enum CasType {
kWeakCas,
kStrongCas,
};
template <typename T0, typename T1, typename T2>
bool cas(T0&& atom, T1&& expected, T2&& desired, CasType casType) {
if (casType == kWeakCas) {
return std::forward<T0>(atom).compare_exchange_weak(
std::forward<T1>(expected), std::forward<T2>(desired));
} else {
return std::forward<T0>(atom).compare_exchange_strong(
std::forward<T1>(expected), std::forward<T2>(desired));
}
}
void casSuccessTest(CasType casType) {
// Nominally, the weak compare exchange allows spurious failures, but we
// peek inside the implementation a little to realize that it doesn't.
// Null -> non-null
EXPECT_TRUE(cas(data, nullSharedPtr, sharedPtr123, casType));
EXPECT_EQ(sharedPtr123.get(), data.load().get());
EXPECT_EQ(nullptr, nullSharedPtr);
EXPECT_EQ(sharedPtr123, sharedPtr123Dup);
// Non-null -> non-null
EXPECT_TRUE(cas(data, sharedPtr123, sharedPtr456, casType));
EXPECT_EQ(sharedPtr456.get(), data.load().get());
EXPECT_EQ(sharedPtr123, sharedPtr123Dup);
EXPECT_EQ(sharedPtr456, sharedPtr456Dup);
// Non-null -> null
EXPECT_TRUE(cas(data, sharedPtr456, nullSharedPtr, casType));
EXPECT_EQ(nullptr, data.load().get());
EXPECT_EQ(sharedPtr123, sharedPtr123Dup);
EXPECT_EQ(nullSharedPtr, nullptr);
}
void casFailureTest(CasType casType) {
std::shared_ptr<int> expected;
// Null -> non-null
expected = std::make_shared<int>(789);
EXPECT_FALSE(cas(data, expected, sharedPtr123, casType));
EXPECT_EQ(nullptr, data.load().get());
EXPECT_EQ(nullptr, expected);
EXPECT_EQ(sharedPtr123, sharedPtr123Dup);
// Non-null -> non-null
expected = std::make_shared<int>(789);
data.store(sharedPtr123);
EXPECT_FALSE(cas(data, expected, sharedPtr456, casType));
EXPECT_EQ(sharedPtr123.get(), data.load().get());
EXPECT_EQ(sharedPtr123, expected);
EXPECT_EQ(sharedPtr456, sharedPtr456Dup);
// Non-null -> null
expected = std::make_shared<int>(789);
data.store(sharedPtr123);
EXPECT_FALSE(cas(data, expected, nullSharedPtr, casType));
EXPECT_EQ(sharedPtr123.get(), data.load().get());
EXPECT_EQ(sharedPtr123, expected);
EXPECT_EQ(nullptr, nullSharedPtr);
}
};
TEST_F(AtomicReadMostlyMainPtrSimpleTest, StartsNull) {
EXPECT_EQ(data.load(), nullptr);
}
TEST_F(AtomicReadMostlyMainPtrSimpleTest, Store) {
data.store(sharedPtr123);
EXPECT_EQ(sharedPtr123.get(), data.load().get());
}
TEST_F(AtomicReadMostlyMainPtrSimpleTest, Exchange) {
data.store(sharedPtr123);
auto prev = data.exchange(sharedPtr456);
EXPECT_EQ(sharedPtr123, prev);
EXPECT_EQ(sharedPtr456.get(), data.load().get());
}
TEST_F(AtomicReadMostlyMainPtrSimpleTest, CompareExchangeWeakSuccess) {
casSuccessTest(kWeakCas);
}
TEST_F(AtomicReadMostlyMainPtrSimpleTest, CompareExchangeStrongSuccess) {
casSuccessTest(kStrongCas);
}
TEST_F(AtomicReadMostlyMainPtrSimpleTest, CompareExchangeWeakFailure) {
casFailureTest(kWeakCas);
}
TEST_F(AtomicReadMostlyMainPtrSimpleTest, CompareExchangeStrongFailure) {
casFailureTest(kStrongCas);
}
class AtomicReadMostlyMainPtrCounterTest : public testing::Test {
protected:
struct InstanceCounter {
explicit InstanceCounter(int* counter_) : counter(counter_) {
++*counter;
}
~InstanceCounter() {
--*counter;
}
int* counter;
};
AtomicReadMostlyMainPtr<InstanceCounter> data;
int counter1 = 0;
int counter2 = 0;
};
TEST_F(AtomicReadMostlyMainPtrCounterTest, DestroysOldValuesSimple) {
data.store(std::make_shared<InstanceCounter>(&counter1));
EXPECT_EQ(1, counter1);
data.store(std::make_shared<InstanceCounter>(&counter2));
EXPECT_EQ(0, counter1);
EXPECT_EQ(1, counter2);
}
TEST_F(AtomicReadMostlyMainPtrCounterTest, DestroysOldValuesWithReuse) {
for (int i = 0; i < 100; ++i) {
data.store(std::make_shared<InstanceCounter>(&counter1));
EXPECT_EQ(1, counter1);
EXPECT_EQ(0, counter2);
data.store(std::make_shared<InstanceCounter>(&counter2));
EXPECT_EQ(0, counter1);
EXPECT_EQ(1, counter2);
}
}
TEST(AtomicReadMostlyMainPtrTest, HandlesDestructionModifications) {
struct RunOnDestruction {
explicit RunOnDestruction(std::function<void()> func) : func_(func) {}
~RunOnDestruction() {
func_();
}
std::function<void()> func_;
};
AtomicReadMostlyMainPtr<RunOnDestruction> data;
// All the ways to trigger the AtomicReadMostlyMainPtr's "destroy the
// pointed-to object" paths.
std::vector<std::function<void()>> outerAccesses = {
[&]() { data.store(nullptr); },
[&]() { data.exchange(nullptr); },
[&]() {
auto old = data.load().getStdShared();
EXPECT_TRUE(data.compare_exchange_strong(old, nullptr));
},
// We don't test CAS failure; unlike other accesses, it doesn't destroy
// the object pointed to by the AtomicReadMostlyMainPtr, it destroys an
// object pointed to by an external shared_ptr. (We'll test that later
// on).
};
// All the ways the destructor might access the AtomicReadMostlyMainPtr.
std::vector<std::function<void()>> innerAccesses = {
[&]() { data.load(); },
[&]() { data.store(std::make_shared<RunOnDestruction>([] {})); },
[&]() { data.exchange(std::make_shared<RunOnDestruction>([] {})); },
[&]() {
auto expected = data.load().getStdShared();
EXPECT_TRUE(data.compare_exchange_strong(
expected, std::make_shared<RunOnDestruction>([] {})));
},
[&]() {
auto notExpected = std::make_shared<RunOnDestruction>([] {});
EXPECT_FALSE(data.compare_exchange_strong(
notExpected, std::make_shared<RunOnDestruction>([] {})));
},
};
for (auto& outerAccess : outerAccesses) {
for (auto& innerAccess : innerAccesses) {
data.store(std::make_shared<RunOnDestruction>(innerAccess));
outerAccess();
}
}
// The case we left out is when the destroyed object is actually from the CAS
// failure path overwriting a pointer it doesn't hold.
for (auto& innerAccess : innerAccesses) {
data.store(std::make_shared<RunOnDestruction>([] {}));
auto expected = std::make_shared<RunOnDestruction>(innerAccess);
auto desired = std::make_shared<RunOnDestruction>([] {});
EXPECT_FALSE(data.compare_exchange_strong(expected, desired));
}
}
TEST(AtomicReadMostlyMainPtrTest, TakesMemoryOrders) {
// We don't really try to test the memory-order-y-ness of these; just that the
// code compiles and runs without anything falling over.
AtomicReadMostlyMainPtr<int> data;
std::shared_ptr<int> ptr;
data.load();
data.load(std::memory_order_relaxed);
data.load(std::memory_order_acquire);
data.load(std::memory_order_seq_cst);
data.store(ptr);
data.store(ptr, std::memory_order_relaxed);
data.store(ptr, std::memory_order_release);
data.store(ptr, std::memory_order_seq_cst);
data.exchange(ptr);
data.exchange(ptr, std::memory_order_relaxed);
data.exchange(ptr, std::memory_order_acquire);
data.exchange(ptr, std::memory_order_release);
data.exchange(ptr, std::memory_order_acq_rel);
data.exchange(ptr, std::memory_order_seq_cst);
auto successOrders = {
std::memory_order_relaxed,
std::memory_order_acquire,
std::memory_order_release,
std::memory_order_acq_rel,
std::memory_order_seq_cst,
};
auto failureOrders = {
std::memory_order_relaxed,
std::memory_order_acquire,
std::memory_order_seq_cst,
};
data.compare_exchange_weak(ptr, ptr);
data.compare_exchange_strong(ptr, ptr);
for (auto successOrder : successOrders) {
data.compare_exchange_weak(ptr, ptr, successOrder);
data.compare_exchange_strong(ptr, ptr, successOrder);
for (auto failureOrder : failureOrders) {
data.compare_exchange_weak(ptr, ptr, successOrder, failureOrder);
data.compare_exchange_strong(ptr, ptr, successOrder, failureOrder);
}
}
}
TEST(AtomicReadMostlyMainPtrTest, LongLivedReadsDoNotBlockWrites) {
std::vector<ReadMostlySharedPtr<int>> pointers;
AtomicReadMostlyMainPtr<int> mainPtr(std::make_shared<int>(123));
for (int i = 0; i < 10 * 1000; ++i) {
// Try several ways of trigger refcount modifications.
auto ptr = mainPtr.load();
pointers.push_back(ptr);
pointers.push_back(ptr);
pointers.emplace_back(std::move(ptr));
}
mainPtr.store(std::make_shared<int>(456));
EXPECT_EQ(*mainPtr.load(), 456);
}
TEST(AtomicReadMostlyMainPtrStressTest, ReadOnly) {
const int kReaders = 8;
const int kReads = 1000 * 1000;
const int kValue = 123;
AtomicReadMostlyMainPtr<int> data(std::make_shared<int>(kValue));
std::vector<std::thread> readers(kReaders);
for (auto& thread : readers) {
thread = std::thread([&] {
for (int i = 0; i < kReads; ++i) {
auto ptr = data.load();
ASSERT_EQ(*ptr, 123);
}
});
}
for (auto& thread : readers) {
thread.join();
}
}
TEST(AtomicReadMostlyMainPtrStressTest, ReadWrite) {
const static int kReaders = 4;
const static int kWriters = 4;
// This gives a test that runs for about 4 seconds on my machine; that's about
// the longest we should inflict on test runners.
const int kNumWrites = (folly::kIsDebug ? 10 * 1000 : 30 * 1000);
struct WriterAndData {
WriterAndData(int writer_, int data_) : writer(writer_), data(data_) {}
int writer;
int data;
};
struct ConsistencyTracker {
ConsistencyTracker() {
for (int& i : maxSeenValue) {
i = 0;
}
}
void update(const WriterAndData& pair) {
EXPECT_LE(maxSeenValue[pair.writer], pair.data);
maxSeenValue[pair.writer] = pair.data;
}
std::array<int, kWriters> maxSeenValue;
};
AtomicReadMostlyMainPtr<WriterAndData> writerAndData(
std::make_shared<WriterAndData>(0, 0));
std::atomic<int> numStoppedWriters(0);
std::vector<std::thread> readers(kReaders);
std::vector<std::thread> writers(kWriters);
for (auto& thread : readers) {
thread = std::thread([&] {
ConsistencyTracker consistencyTracker;
while (numStoppedWriters.load() != kWriters) {
auto ptr = writerAndData.load();
consistencyTracker.update(*ptr);
}
});
}
std::atomic<int> casFailures(0);
for (int threadId = 0; threadId < kWriters; ++threadId) {
writers[threadId] = std::thread([&, threadId] {
ConsistencyTracker consistencyTracker;
for (int j = 0; j < kNumWrites; j++) {
auto newValue = std::make_shared<WriterAndData>(threadId, j);
std::shared_ptr<WriterAndData> oldValue;
switch (j % 3) {
case 0:
writerAndData.store(newValue);
break;
case 1:
oldValue = writerAndData.exchange(newValue);
consistencyTracker.update(*oldValue);
break;
case 2:
oldValue = writerAndData.load().getStdShared();
consistencyTracker.update(*oldValue);
while (!writerAndData.compare_exchange_strong(oldValue, newValue)) {
consistencyTracker.update(*oldValue);
casFailures.fetch_add(1);
}
}
}
numStoppedWriters.fetch_add(1);
});
}
for (auto& thread : readers) {
thread.join();
}
for (auto& thread : writers) {
thread.join();
}
// This is a test of our test setup itself; we want to make sure it's
// sufficiently racy that we actually see concurrent conflicting operations.
// On a test on my machine, we see values in the thousands, so hopefully this
// is highly unflakey.
EXPECT_GE(casFailures.load(), 10);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment