Commit f885995c authored by Dave Watson's avatar Dave Watson Committed by Facebook Github Bot

RCU

Summary: This adds an RCU implementation, matching http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0461r1.pdf as closely as pratical.  This implementation does not require thread registration or quiescence.

Reviewed By: magedm

Differential Revision: D6330631

fbshipit-source-id: 2c729f3a4c0f151cde5d9a599ecd2a2c20c7da55
parent 6283c759
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/Function.h>
#include <folly/detail/AtFork.h>
#include <folly/detail/TurnSequencer.h>
namespace folly {
template <typename Tag>
bool rcu_domain<Tag>::singleton_{false};
template <typename Tag>
rcu_domain<Tag>::rcu_domain(Executor* executor) noexcept
: executor_(executor ? executor : &QueuedImmediateExecutor::instance()) {
// Please use a unique tag for each domain.
CHECK(!singleton_);
singleton_ = true;
// Register fork handlers. Holding read locks across fork is not
// supported. Using read locks in other atfork handlers is not
// supported. Other atfork handlers launching new child threads
// that use read locks *is* supported.
detail::AtFork::registerHandler(
this,
[this]() { syncMutex_.lock(); },
[this]() { syncMutex_.unlock(); },
[this]() {
counters_.resetAfterFork();
syncMutex_.unlock();
});
}
template <typename Tag>
rcu_domain<Tag>::~rcu_domain() {
detail::AtFork::unregisterHandler(this);
}
template <typename Tag>
rcu_token rcu_domain<Tag>::lock_shared() {
auto idx = version_.load(std::memory_order_acquire);
idx &= 1;
counters_.increment(idx);
return idx;
}
template <typename Tag>
void rcu_domain<Tag>::unlock_shared(rcu_token&& token) {
DCHECK(0 == token.epoch_ || 1 == token.epoch_);
counters_.decrement(token.epoch_);
}
template <typename Tag>
template <typename T>
void rcu_domain<Tag>::call(T&& cbin) {
auto node = new list_node;
node->cb_ = [node, cb = std::forward<T>(cbin)]() {
cb();
delete node;
};
retire(node);
}
template <typename Tag>
void rcu_domain<Tag>::retire(list_node* node) noexcept {
q_.push(node);
// Note that it's likely we hold a read lock here,
// so we can only half_sync(false). half_sync(true)
// or a synchronize() call might block forever.
uint64_t time = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
if (time > syncTime_.load(std::memory_order_relaxed) + syncTimePeriod_) {
list_head finished;
{
std::lock_guard<std::mutex> g(syncMutex_);
syncTime_.store(time, std::memory_order_relaxed);
half_sync(false, finished);
}
// callbacks are called outside of syncMutex_
finished.forEach(
[&](list_node* item) { executor_->add(std::move(item->cb_)); });
}
}
template <typename Tag>
void rcu_domain<Tag>::synchronize() noexcept {
auto curr = version_.load(std::memory_order_acquire);
// Target is two epochs away.
auto target = curr + 2;
while (true) {
// Try to assign ourselves to do the sync work.
// If someone else is already assigned, we can wait for
// the work to be finished by waiting on turn_.
auto work = work_.load(std::memory_order_acquire);
auto tmp = work;
if (work < target && work_.compare_exchange_strong(tmp, target)) {
list_head finished;
{
std::lock_guard<std::mutex> g(syncMutex_);
while (version_.load(std::memory_order_acquire) < target) {
half_sync(true, finished);
}
}
// callbacks are called outside of syncMutex_
finished.forEach(
[&](list_node* node) { executor_->add(std::move(node->cb_)); });
return;
} else {
if (version_.load(std::memory_order_acquire) >= target) {
return;
}
std::atomic<uint32_t> cutoff{100};
// Wait for someone to finish the work.
turn_.tryWaitForTurn(work, cutoff, false);
}
}
}
/*
* Not multithread safe, but it could be with proper version
* checking and stronger increment of version. See
* https://github.com/pramalhe/ConcurrencyFreaks/blob/master/papers/gracesharingurcu-2016.pdf
*
* This version, however, can go to sleep if there are outstanding
* readers, and does not spin or need rescheduling, unless blocking = false.
*/
template <typename Tag>
void rcu_domain<Tag>::half_sync(bool blocking, list_head& finished) {
uint64_t curr = version_.load(std::memory_order_acquire);
auto next = curr + 1;
// Push all work to a queue for moving through two epochs. One
// version is not enough because of late readers of the version_
// counter in lock_shared.
//
// Note that for a similar reason we can't swap out the q here,
// and instead drain it, so concurrent calls to call() are safe,
// and will wait for the next epoch.
q_.collect(queues_[0]);
if (blocking) {
counters_.waitForZero(next & 1);
} else {
if (counters_.readFull(next & 1) != 0) {
return;
}
}
// Run callbacks that have been through two epochs, and swap queues
// for those only through a single epoch.
finished.splice(queues_[1]);
queues_[1].splice(queues_[0]);
version_.store(next, std::memory_order_release);
// Notify synchronous waiters in synchronize().
turn_.completeTurn(curr);
}
} // namespace folly
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/synchronization/Rcu.h>
namespace folly {
FOLLY_STATIC_CTOR_PRIORITY_MAX rcu_domain<RcuTag> rcu_default_domain_;
} // namespace folly
This diff is collapsed.
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Function.h>
#include <folly/ThreadLocal.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
// This is unlike folly::ThreadCachedInt in that the full value
// is never rounded up globally and cached, it only supports readFull.
//
// folly/experimental/TLRefCount is similar, but does not support a
// waitForZero, and is not reset-able.
//
// Note that the RCU implementation is completely abstracted from the
// counter implementation, a rseq implementation can be dropped in
// if the kernel supports it.
namespace folly {
namespace detail {
template <typename Tag>
class ThreadCachedInts {
// These are only accessed under the ThreadLocal lock.
int64_t orphan_inc_[2]{0, 0};
int64_t orphan_dec_[2]{0, 0};
folly::detail::Futex<> waiting_;
class Integer {
public:
ThreadCachedInts* ints_;
constexpr Integer(ThreadCachedInts* ints) noexcept
: ints_(ints), inc_{}, dec_{} {}
std::atomic<int64_t> inc_[2];
std::atomic<int64_t> dec_[2];
~Integer() noexcept {
ints_->orphan_inc_[0] += inc_[0].load(std::memory_order_relaxed);
ints_->orphan_inc_[1] += inc_[1].load(std::memory_order_relaxed);
ints_->orphan_dec_[0] += dec_[0].load(std::memory_order_relaxed);
ints_->orphan_dec_[1] += dec_[1].load(std::memory_order_relaxed);
ints_->waiting_.store(0, std::memory_order_release);
ints_->waiting_.futexWake();
}
};
folly::ThreadLocalPtr<Integer, Tag> cs_;
// Cache the int pointer in a threadlocal.
static thread_local Integer* int_cache_;
void init() {
auto ret = new Integer(this);
cs_.reset(ret);
int_cache_ = ret;
}
public:
FOLLY_ALWAYS_INLINE void increment(uint8_t epoch) {
if (!int_cache_) {
init();
}
auto& c = int_cache_->inc_[epoch];
auto val = c.load(std::memory_order_relaxed);
c.store(val + 1, std::memory_order_relaxed);
folly::asymmetricLightBarrier(); // A
}
FOLLY_ALWAYS_INLINE void decrement(uint8_t epoch) {
folly::asymmetricLightBarrier(); // B
if (!int_cache_) {
init();
}
auto& c = int_cache_->dec_[epoch];
auto val = c.load(std::memory_order_relaxed);
c.store(val + 1, std::memory_order_relaxed);
folly::asymmetricLightBarrier(); // C
if (waiting_.load(std::memory_order_acquire)) {
waiting_.store(0, std::memory_order_release);
waiting_.futexWake();
}
}
int64_t readFull(uint8_t epoch) {
int64_t full = 0;
// Matches A - ensure all threads have seen new value of version,
// *and* that we see current values of counters in readFull()
//
// Note that in lock_shared if a reader is currently between the
// version load and counter increment, they may update the wrong
// epoch. However, this is ok - they started concurrently *after*
// any callbacks that will run, and therefore it is safe to run
// the callbacks.
folly::asymmetricHeavyBarrier();
for (auto& i : cs_.accessAllThreads()) {
full -= i.dec_[epoch].load(std::memory_order_relaxed);
}
// Matches B - ensure that all increments are seen if decrements
// are seen. This is necessary because increment and decrement
// are allowed to happen on different threads.
folly::asymmetricHeavyBarrier();
auto accessor = cs_.accessAllThreads();
for (auto& i : accessor) {
full += i.inc_[epoch].load(std::memory_order_relaxed);
}
// orphan is read behind accessAllThreads lock
auto res = full + orphan_inc_[epoch] - orphan_dec_[epoch];
return res;
}
void waitForZero(uint8_t phase) {
// Try reading before futex sleeping.
if (readFull(phase) == 0) {
return;
}
while (true) {
waiting_.store(1, std::memory_order_release);
// Matches C. Ensure either decrement sees waiting_,
// or we see their decrement and can safely sleep.
folly::asymmetricHeavyBarrier();
if (readFull(phase) == 0) {
break;
}
waiting_.futexWait(1);
}
waiting_.store(0, std::memory_order_relaxed);
}
// We are guaranteed to be called while StaticMeta lock is still
// held because of ordering in AtForkList. We can therefore safely
// touch orphan_ and clear out all counts.
void resetAfterFork() {
if (int_cache_) {
int_cache_->dec_[0].store(0, std::memory_order_relaxed);
int_cache_->dec_[1].store(0, std::memory_order_relaxed);
int_cache_->inc_[0].store(0, std::memory_order_relaxed);
int_cache_->inc_[1].store(0, std::memory_order_relaxed);
}
orphan_inc_[0] = 0;
orphan_inc_[1] = 0;
orphan_dec_[0] = 0;
orphan_dec_[1] = 0;
}
};
template <typename Tag>
thread_local typename detail::ThreadCachedInts<Tag>::Integer*
detail::ThreadCachedInts<Tag>::int_cache_{nullptr};
} // namespace detail
} // namespace folly
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <folly/Function.h>
#include <folly/ThreadLocal.h>
#include <glog/logging.h>
namespace folly {
namespace detail {
// This is a thread-local cached, multi-producer single-consumer
// queue, similar to a concurrent version of std::list.
//
class ThreadCachedListsBase {
public:
struct Node {
folly::Function<void()> cb_;
Node* next_{nullptr};
};
};
template <typename Tag>
class ThreadCachedLists : public ThreadCachedListsBase {
public:
struct AtomicListHead {
std::atomic<Node*> tail_{nullptr};
std::atomic<Node*> head_{nullptr};
};
// Non-concurrent list, similar to std::list.
struct ListHead {
Node* head_{nullptr};
Node* tail_{nullptr};
// Run func on each list node.
template <typename Func>
void forEach(Func func) {
auto node = tail_;
while (node != nullptr) {
auto next = node->next_;
func(node);
node = next;
}
}
// Splice other in to this list.
// Afterwards, other is a valid empty listhead.
void splice(ListHead& other);
void splice(AtomicListHead& other);
};
// Push a node on a thread-local list. Returns true if local list
// was pushed global.
void push(Node* node);
// Collect all thread local lists to a single local list.
// This function is threadsafe with concurrent push()es,
// but only a single thread may call collect() at a time.
void collect(ListHead& list);
private:
// Push list to the global list.
void pushGlobal(ListHead& list);
ListHead ghead_;
struct TLHead : public AtomicListHead {
ThreadCachedLists* parent_;
public:
TLHead(ThreadCachedLists* parent) : parent_(parent) {}
~TLHead() {
parent_->ghead_.splice(*this);
}
};
folly::ThreadLocalPtr<TLHead, Tag> lhead_;
};
// push() and splice() are optimistic w.r.t setting the list head: The
// first pusher cas's the list head, which functions as a lock until
// tail != null. The first pusher then sets tail_ = head_.
//
// splice() does the opposite: steals the tail_ via exchange, then
// unlocks the list again by setting head_ to null.
template <typename Tag>
void ThreadCachedLists<Tag>::push(Node* node) {
DCHECK(node->next_ == nullptr);
static thread_local TLHead* cache_{nullptr};
if (!cache_) {
auto l = lhead_.get();
if (!l) {
lhead_.reset(new TLHead(this));
l = lhead_.get();
DCHECK(l);
}
cache_ = l;
}
while (true) {
auto head = cache_->head_.load(std::memory_order_relaxed);
if (!head) {
node->next_ = nullptr;
if (cache_->head_.compare_exchange_weak(head, node)) {
cache_->tail_.store(node);
break;
}
} else {
auto tail = cache_->tail_.load(std::memory_order_relaxed);
if (tail) {
node->next_ = tail;
if (cache_->tail_.compare_exchange_weak(node->next_, node)) {
break;
}
}
}
}
}
template <typename Tag>
void ThreadCachedLists<Tag>::collect(ListHead& list) {
auto acc = lhead_.accessAllThreads();
for (auto& thr : acc) {
list.splice(thr);
}
list.splice(ghead_);
}
template <typename Tag>
void ThreadCachedLists<Tag>::ListHead::splice(ListHead& other) {
if (other.head_ != nullptr) {
DCHECK(other.tail_ != nullptr);
} else {
DCHECK(other.tail_ == nullptr);
return;
}
if (head_) {
DCHECK(tail_ != nullptr);
DCHECK(head_->next_ == nullptr);
head_->next_ = other.tail_;
head_ = other.head_;
} else {
DCHECK(head_ == nullptr);
head_ = other.head_;
tail_ = other.tail_;
}
other.head_ = nullptr;
other.tail_ = nullptr;
}
template <typename Tag>
void ThreadCachedLists<Tag>::ListHead::splice(AtomicListHead& list) {
ListHead local;
auto tail = list.tail_.load();
if (tail) {
local.tail_ = list.tail_.exchange(nullptr);
local.head_ = list.head_.exchange(nullptr);
splice(local);
}
}
} // namespace detail
} // namespace folly
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/synchronization/Rcu.h>
#include <thread>
#include <vector>
#include <glog/logging.h>
#include <folly/Benchmark.h>
#include <folly/Random.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <folly/synchronization/Baton.h>
using namespace folly;
DEFINE_int64(iters, 100000, "Number of iterations");
DEFINE_int64(threads, 32, "Number of threads");
TEST(RcuTest, Basic) {
auto foo = new int(2);
rcu_retire(foo);
}
class des {
bool* d_;
public:
des(bool* d) : d_(d) {}
~des() {
*d_ = true;
}
};
TEST(RcuTest, Guard) {
bool del = false;
auto foo = new des(&del);
{ rcu_reader g; }
rcu_retire(foo);
synchronize_rcu();
EXPECT_TRUE(del);
}
TEST(RcuTest, Perf) {
long i = FLAGS_iters;
auto start = std::chrono::steady_clock::now();
while (i-- > 0) {
rcu_reader g;
}
auto diff = std::chrono::steady_clock::now() - start;
printf(
"Total time %li ns \n",
std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count() /
FLAGS_iters);
}
TEST(RcuTest, ResetPerf) {
long i = FLAGS_iters;
auto start = std::chrono::steady_clock::now();
while (i-- > 0) {
rcu_retire<int>(nullptr, [](int*) {});
}
auto diff = std::chrono::steady_clock::now() - start;
printf(
"Total time %li ns \n",
std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count() /
FLAGS_iters);
}
TEST(RcuTest, SlowReader) {
std::thread t;
{
rcu_reader g;
t = std::thread([&]() { synchronize_rcu(); });
usleep(100); // Wait for synchronize to start
}
t.join();
}
rcu_reader tryretire(des* obj) {
rcu_reader g;
rcu_retire(obj);
return g;
}
TEST(RcuTest, CopyGuard) {
bool del = false;
auto foo = new des(&del);
{
auto res = tryretire(foo);
EXPECT_FALSE(del);
}
rcu_barrier();
EXPECT_TRUE(del);
}
TEST(RcuTest, Stress) {
std::vector<std::thread> threads;
constexpr uint32_t sz = 1000;
std::atomic<int*> ints[sz];
for (uint i = 0; i < sz; i++) {
ints[i].store(new int(0));
}
for (int th = 0; th < FLAGS_threads; th++) {
threads.push_back(std::thread([&]() {
for (int i = 0; i < FLAGS_iters / 100; i++) {
rcu_reader g;
int sum = 0;
int* ptrs[sz];
for (uint j = 0; j < sz; j++) {
ptrs[j] = ints[j].load(std::memory_order_acquire);
}
for (uint j = 0; j < sz; j++) {
sum += *ptrs[j];
}
EXPECT_EQ(sum, 0);
}
}));
}
std::atomic<bool> done{false};
std::thread updater([&]() {
while (!done.load()) {
auto newint = new int(0);
auto oldint = ints[folly::Random::rand32() % sz].exchange(newint);
rcu_retire<int>(oldint, [](int* obj) {
*obj = folly::Random::rand32();
delete obj;
});
}
});
for (auto& t : threads) {
t.join();
}
done = true;
updater.join();
}
TEST(RcuTest, Synchronize) {
std::vector<std::thread> threads;
for (int th = 0; th < FLAGS_threads; th++) {
threads.push_back(std::thread([&]() {
for (int i = 0; i < 10; i++) {
synchronize_rcu();
}
}));
}
for (auto& t : threads) {
t.join();
}
}
TEST(RcuTest, NewDomainTest) {
struct UniqueTag;
rcu_domain<UniqueTag> newdomain(nullptr);
synchronize_rcu();
}
TEST(RcuTest, MovableReader) {
{
rcu_reader g;
rcu_reader f(std::move(g));
}
synchronize_rcu();
{
rcu_reader g(std::defer_lock);
rcu_reader f;
g = std::move(f);
}
synchronize_rcu();
}
TEST(RcuTest, SynchronizeInCall) {
rcu_default_domain()->call([]() { synchronize_rcu(); });
synchronize_rcu();
}
TEST(RcuTest, MoveReaderBetweenThreads) {
rcu_reader g;
std::thread t([f = std::move(g)] {});
t.join();
synchronize_rcu();
}
TEST(RcuTest, ForkTest) {
folly::Baton<> b;
rcu_token epoch;
std::thread t([&]() {
epoch = rcu_default_domain()->lock_shared();
b.post();
});
t.detach();
b.wait();
auto pid = fork();
if (pid) {
// parent
rcu_default_domain()->unlock_shared(std::move(epoch));
synchronize_rcu();
int status;
auto pid2 = wait(&status);
EXPECT_EQ(status, 0);
EXPECT_EQ(pid, pid2);
} else {
// child
synchronize_rcu();
exit(0); // Do not print gtest results
}
}
TEST(RcuTest, CoreLocalList) {
struct TTag;
folly::detail::ThreadCachedLists<TTag> lists;
int numthreads = 32;
std::vector<std::thread> threads;
std::atomic<int> done{0};
for (int tr = 0; tr < numthreads; tr++) {
threads.push_back(std::thread([&]() {
for (int i = 0; i < FLAGS_iters; i++) {
auto node = new folly::detail::ThreadCachedListsBase::Node;
lists.push(node);
}
done++;
}));
}
while (done.load() != numthreads) {
folly::detail::ThreadCachedLists<TTag>::ListHead list{};
lists.collect(list);
list.forEach([](folly::detail::ThreadCachedLists<TTag>::Node* node) {
delete node;
});
}
for (auto& thread : threads) {
thread.join();
}
}
TEST(RcuTest, ThreadDeath) {
bool del = false;
std::thread t([&] {
auto foo = new des(&del);
rcu_retire(foo);
});
t.join();
synchronize_rcu();
EXPECT_TRUE(del);
}
TEST(RcuTest, RcuObjBase) {
bool retired = false;
struct base_test : rcu_obj_base<base_test> {
bool* ret_;
base_test(bool* ret) : ret_(ret) {}
~base_test() {
(*ret_) = true;
}
};
auto foo = new base_test(&retired);
foo->retire();
synchronize_rcu();
EXPECT_TRUE(retired);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment