Commit 8757861f authored by Andrii Grynenko's avatar Andrii Grynenko Committed by Facebook Github Bot

Fix a race in TLRefCount

Summary:
After count_.store was done it's possible that the thread performing collect observed the value, and then it successfully decremented the counter to 0 and destroyed TLRefCount. The rest of the code in update method then couldn't safely assume that the TLRefCount object is still alive.
This fixes collect to actually wait for update to complete (if we detect that we actually captured the new value written by such update).

Reviewed By: davidtgoldblatt

Differential Revision: D13696608

fbshipit-source-id: bd1a69ea3cc005b90ff481705fdffb83d8a9077a
parent f7548ce4
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <folly/ThreadLocal.h> #include <folly/ThreadLocal.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h> #include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <folly/synchronization/detail/Sleeper.h>
namespace folly { namespace folly {
...@@ -146,6 +147,7 @@ class TLRefCount { ...@@ -146,6 +147,7 @@ class TLRefCount {
} }
void collect() { void collect() {
{
std::lock_guard<std::mutex> lg(collectMutex_); std::lock_guard<std::mutex> lg(collectMutex_);
if (!collectGuard_) { if (!collectGuard_) {
...@@ -156,6 +158,15 @@ class TLRefCount { ...@@ -156,6 +158,15 @@ class TLRefCount {
refCount_.globalCount_.fetch_add(collectCount_); refCount_.globalCount_.fetch_add(collectCount_);
collectGuard_.reset(); collectGuard_.reset();
} }
// We only care about seeing inUpdate if we've observed the new count_
// value set by the update() call, so memory_order_relaxed is enough.
if (inUpdate_.load(std::memory_order_relaxed)) {
folly::detail::Sleeper sleeper;
while (inUpdate_.load(std::memory_order_acquire)) {
sleeper.wait();
}
}
}
bool operator++() { bool operator++() {
return update(1); return update(1);
...@@ -176,7 +187,11 @@ class TLRefCount { ...@@ -176,7 +187,11 @@ class TLRefCount {
// makes things faster than atomic fetch_add on platforms with native // makes things faster than atomic fetch_add on platforms with native
// support. // support.
auto count = count_.load(std::memory_order_relaxed) + delta; auto count = count_.load(std::memory_order_relaxed) + delta;
count_.store(count, std::memory_order_relaxed); inUpdate_.store(true, std::memory_order_relaxed);
SCOPE_EXIT {
inUpdate_.store(false, std::memory_order_release);
};
count_.store(count, std::memory_order_release);
asymmetricLightBarrier(); asymmetricLightBarrier();
...@@ -195,6 +210,7 @@ class TLRefCount { ...@@ -195,6 +210,7 @@ class TLRefCount {
} }
AtomicInt count_{0}; AtomicInt count_{0};
std::atomic<bool> inUpdate_{false};
TLRefCount& refCount_; TLRefCount& refCount_;
std::mutex collectMutex_; std::mutex collectMutex_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment