Commit 91a2c0c0 authored by Maged Michael's avatar Maged Michael Committed by Facebook GitHub Bot

RequestContext: Remove read-lock-based implementation

Summary: Remove implementation based on read locking (replaced by implementation based on hazard pointers).

Reviewed By: yfeldblum

Differential Revision: D20881677

fbshipit-source-id: da89eee6a92fa34fa1c111a421bf5368873ccd57
parent 1be2277a
...@@ -24,11 +24,6 @@ ...@@ -24,11 +24,6 @@
#include <folly/MapUtil.h> #include <folly/MapUtil.h>
#include <folly/SingletonThreadLocal.h> #include <folly/SingletonThreadLocal.h>
DEFINE_bool(
folly_reqctx_use_hazptr,
true,
"RequestContext implementation using hazard pointers");
namespace folly { namespace folly {
namespace { namespace {
...@@ -118,33 +113,10 @@ void RequestData::releaseRefClearDeleteSlow() { ...@@ -118,33 +113,10 @@ void RequestData::releaseRefClearDeleteSlow() {
releaseRefDeleteOnly(); releaseRefDeleteOnly();
} }
void RequestData::DestructPtr::operator()(RequestData* ptr) {
if (ptr) {
auto keepAliveCounter =
ptr->keepAliveCounter_.fetch_sub(1, std::memory_order_acq_rel);
// Note: this is the value before decrement, hence == 1 check
DCHECK(keepAliveCounter > 0);
if (keepAliveCounter == 1) {
ptr->onClear();
delete ptr;
}
}
}
/* static */ RequestData::SharedPtr RequestData::constructPtr(
RequestData* ptr) {
if (ptr) {
auto keepAliveCounter =
ptr->keepAliveCounter_.fetch_add(1, std::memory_order_relaxed);
DCHECK(keepAliveCounter >= 0);
}
return SharedPtr(ptr);
}
// The Combined struct keeps the two structures for context data // The Combined struct keeps the two structures for context data
// and callbacks together, so that readers can protect consistent // and callbacks together, so that readers can protect consistent
// versions of the two structures together using hazard pointers. // versions of the two structures together using hazard pointers.
struct RequestContext::StateHazptr::Combined : hazptr_obj_base<Combined> { struct RequestContext::State::Combined : hazptr_obj_base<Combined> {
static constexpr size_t kInitialCapacity = 4; static constexpr size_t kInitialCapacity = 4;
static constexpr size_t kSlackReciprocal = 4; // unused >= 1/4 capacity static constexpr size_t kSlackReciprocal = 4; // unused >= 1/4 capacity
...@@ -218,10 +190,10 @@ struct RequestContext::StateHazptr::Combined : hazptr_obj_base<Combined> { ...@@ -218,10 +190,10 @@ struct RequestContext::StateHazptr::Combined : hazptr_obj_base<Combined> {
} }
}; // Combined }; // Combined
RequestContext::StateHazptr::StateHazptr() = default; RequestContext::State::State() = default;
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) { RequestContext::State::State(const State& o) {
Combined* oc = o.combined(); Combined* oc = o.combined();
if (oc) { if (oc) {
auto p = new Combined(*oc); auto p = new Combined(*oc);
...@@ -230,7 +202,7 @@ RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) { ...@@ -230,7 +202,7 @@ RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) {
} }
} }
RequestContext::StateHazptr::~StateHazptr() { RequestContext::State::~State() {
cohort_.shutdown_and_reclaim(); cohort_.shutdown_and_reclaim();
auto p = combined(); auto p = combined();
if (p) { if (p) {
...@@ -239,14 +211,12 @@ RequestContext::StateHazptr::~StateHazptr() { ...@@ -239,14 +211,12 @@ RequestContext::StateHazptr::~StateHazptr() {
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestContext::StateHazptr::Combined* RequestContext::StateHazptr::combined() RequestContext::State::Combined* RequestContext::State::combined() const {
const {
return combined_.load(std::memory_order_acquire); return combined_.load(std::memory_order_acquire);
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestContext::StateHazptr::Combined* RequestContext::State::Combined* RequestContext::State::ensureCombined() {
RequestContext::StateHazptr::ensureCombined() {
auto c = combined(); auto c = combined();
if (!c) { if (!c) {
c = new Combined; c = new Combined;
...@@ -256,13 +226,13 @@ RequestContext::StateHazptr::ensureCombined() { ...@@ -256,13 +226,13 @@ RequestContext::StateHazptr::ensureCombined() {
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
void RequestContext::StateHazptr::setCombined(Combined* p) { void RequestContext::State::setCombined(Combined* p) {
p->set_cohort_tag(&cohort_); p->set_cohort_tag(&cohort_);
combined_.store(p, std::memory_order_release); combined_.store(p, std::memory_order_release);
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
bool RequestContext::StateHazptr::doSetContextData( bool RequestContext::State::doSetContextData(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData>& data, std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour, DoSetBehaviour behaviour,
...@@ -286,8 +256,8 @@ bool RequestContext::StateHazptr::doSetContextData( ...@@ -286,8 +256,8 @@ bool RequestContext::StateHazptr::doSetContextData(
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestContext::StateHazptr::SetContextDataResult RequestContext::State::SetContextDataResult
RequestContext::StateHazptr::doSetContextDataHelper( RequestContext::State::doSetContextDataHelper(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData>& data, std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour, DoSetBehaviour behaviour,
...@@ -341,9 +311,9 @@ RequestContext::StateHazptr::doSetContextDataHelper( ...@@ -341,9 +311,9 @@ RequestContext::StateHazptr::doSetContextDataHelper(
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestContext::StateHazptr::Combined* FOLLY_NULLABLE RequestContext::State::Combined* FOLLY_NULLABLE
RequestContext::StateHazptr::eraseOldData( RequestContext::State::eraseOldData(
RequestContext::StateHazptr::Combined* cur, RequestContext::State::Combined* cur,
const RequestToken& token, const RequestToken& token,
RequestData* olddata, RequestData* olddata,
bool safe) { bool safe) {
...@@ -375,9 +345,9 @@ RequestContext::StateHazptr::eraseOldData( ...@@ -375,9 +345,9 @@ RequestContext::StateHazptr::eraseOldData(
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestContext::StateHazptr::Combined* FOLLY_NULLABLE RequestContext::State::Combined* FOLLY_NULLABLE
RequestContext::StateHazptr::insertNewData( RequestContext::State::insertNewData(
RequestContext::StateHazptr::Combined* cur, RequestContext::State::Combined* cur,
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData>& data, std::unique_ptr<RequestData>& data,
bool found) { bool found) {
...@@ -404,8 +374,7 @@ RequestContext::StateHazptr::insertNewData( ...@@ -404,8 +374,7 @@ RequestContext::StateHazptr::insertNewData(
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
bool RequestContext::StateHazptr::hasContextData( bool RequestContext::State::hasContextData(const RequestToken& token) const {
const RequestToken& token) const {
hazptr_local<1> h; hazptr_local<1> h;
Combined* combined = h[0].get_protected(combined_); Combined* combined = h[0].get_protected(combined_);
return combined ? combined->requestData_.contains(token) : false; return combined ? combined->requestData_.contains(token) : false;
...@@ -413,7 +382,7 @@ bool RequestContext::StateHazptr::hasContextData( ...@@ -413,7 +382,7 @@ bool RequestContext::StateHazptr::hasContextData(
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
RequestData* FOLLY_NULLABLE RequestData* FOLLY_NULLABLE
RequestContext::StateHazptr::getContextData(const RequestToken& token) { RequestContext::State::getContextData(const RequestToken& token) {
hazptr_local<1> h; hazptr_local<1> h;
Combined* combined = h[0].get_protected(combined_); Combined* combined = h[0].get_protected(combined_);
if (!combined) { if (!combined) {
...@@ -426,7 +395,7 @@ RequestContext::StateHazptr::getContextData(const RequestToken& token) { ...@@ -426,7 +395,7 @@ RequestContext::StateHazptr::getContextData(const RequestToken& token) {
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
const RequestData* FOLLY_NULLABLE const RequestData* FOLLY_NULLABLE
RequestContext::StateHazptr::getContextData(const RequestToken& token) const { RequestContext::State::getContextData(const RequestToken& token) const {
hazptr_local<1> h; hazptr_local<1> h;
Combined* combined = h[0].get_protected(combined_); Combined* combined = h[0].get_protected(combined_);
if (!combined) { if (!combined) {
...@@ -438,7 +407,7 @@ RequestContext::StateHazptr::getContextData(const RequestToken& token) const { ...@@ -438,7 +407,7 @@ RequestContext::StateHazptr::getContextData(const RequestToken& token) const {
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
void RequestContext::StateHazptr::onSet() { void RequestContext::State::onSet() {
// Don't use hazptr_local because callback may use hazptr // Don't use hazptr_local because callback may use hazptr
hazptr_holder<> h; hazptr_holder<> h;
Combined* combined = h.get_protected(combined_); Combined* combined = h.get_protected(combined_);
...@@ -452,7 +421,7 @@ void RequestContext::StateHazptr::onSet() { ...@@ -452,7 +421,7 @@ void RequestContext::StateHazptr::onSet() {
} }
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
void RequestContext::StateHazptr::onUnset() { void RequestContext::State::onUnset() {
// Don't use hazptr_local because callback may use hazptr // Don't use hazptr_local because callback may use hazptr
hazptr_holder<> h; hazptr_holder<> h;
Combined* combined = h.get_protected(combined_); Combined* combined = h.get_protected(combined_);
...@@ -465,7 +434,7 @@ void RequestContext::StateHazptr::onUnset() { ...@@ -465,7 +434,7 @@ void RequestContext::StateHazptr::onUnset() {
} }
} }
void RequestContext::StateHazptr::clearContextData(const RequestToken& token) { void RequestContext::State::clearContextData(const RequestToken& token) {
RequestData* data; RequestData* data;
Combined* replaced = nullptr; Combined* replaced = nullptr;
{ // Lock mutex_ { // Lock mutex_
...@@ -503,8 +472,8 @@ void RequestContext::StateHazptr::clearContextData(const RequestToken& token) { ...@@ -503,8 +472,8 @@ void RequestContext::StateHazptr::clearContextData(const RequestToken& token) {
replaced->retire(); replaced->retire();
} }
RequestContext::StateHazptr::Combined* RequestContext::StateHazptr::expand( RequestContext::State::Combined* RequestContext::State::expand(
RequestContext::StateHazptr::Combined* c) { RequestContext::State::Combined* c) {
size_t dataCapacity = c->requestData_.capacity(); size_t dataCapacity = c->requestData_.capacity();
if (c->needExpandRequestData()) { if (c->needExpandRequestData()) {
dataCapacity *= 2; dataCapacity *= 2;
...@@ -516,12 +485,9 @@ RequestContext::StateHazptr::Combined* RequestContext::StateHazptr::expand( ...@@ -516,12 +485,9 @@ RequestContext::StateHazptr::Combined* RequestContext::StateHazptr::expand(
return new Combined(dataCapacity, callbackCapacity, *c); return new Combined(dataCapacity, callbackCapacity, *c);
} }
RequestContext::RequestContext() RequestContext::RequestContext() : rootId_(reinterpret_cast<intptr_t>(this)) {}
: useHazptr_(FLAGS_folly_reqctx_use_hazptr),
rootId_(reinterpret_cast<intptr_t>(this)) {}
RequestContext::RequestContext(intptr_t rootid) RequestContext::RequestContext(intptr_t rootid) : rootId_(rootid) {}
: useHazptr_(FLAGS_folly_reqctx_use_hazptr), rootId_(rootid) {}
RequestContext::RequestContext(const RequestContext& ctx, intptr_t rootid, Tag) RequestContext::RequestContext(const RequestContext& ctx, intptr_t rootid, Tag)
: RequestContext(ctx) { : RequestContext(ctx) {
...@@ -542,184 +508,52 @@ RequestContext::RequestContext(const RequestContext& ctx, Tag) ...@@ -542,184 +508,52 @@ RequestContext::RequestContext(const RequestContext& ctx, Tag)
return std::make_shared<RequestContext>(ctx, Tag{}); return std::make_shared<RequestContext>(ctx, Tag{});
} }
bool RequestContext::doSetContextDataLock(
const RequestToken& token,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour) {
auto wlock = state_.wlock();
auto& state = *wlock;
auto it = state.requestData_.find(token);
if (it != state.requestData_.end()) {
if (behaviour == DoSetBehaviour::SET_IF_ABSENT) {
return false;
}
if (it->second) {
if (it->second->hasCallback()) {
it->second->onUnset();
state.callbackData_.erase(it->second.get());
}
it->second.reset(nullptr);
}
if (behaviour == DoSetBehaviour::SET) {
LOG_FIRST_N(WARNING, 1)
<< "Calling RequestContext::setContextData for "
<< token.getDebugString() << " but it is already set";
return true;
}
DCHECK(behaviour == DoSetBehaviour::OVERWRITE);
}
if (data && data->hasCallback()) {
state.callbackData_.insert(data.get());
data->onSet();
}
auto ptr = RequestData::constructPtr(data.release());
if (it != state.requestData_.end()) {
it->second = std::move(ptr);
} else {
state.requestData_.insert(std::make_pair(token, std::move(ptr)));
}
return true;
}
void RequestContext::setContextData( void RequestContext::setContextData(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData> data) { std::unique_ptr<RequestData> data) {
if (useHazptr()) { state_.doSetContextData(token, data, DoSetBehaviour::SET, false);
stateHazptr_.doSetContextData(token, data, DoSetBehaviour::SET, false);
return;
}
doSetContextDataLock(token, data, DoSetBehaviour::SET);
} }
bool RequestContext::setContextDataIfAbsent( bool RequestContext::setContextDataIfAbsent(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData> data) { std::unique_ptr<RequestData> data) {
if (useHazptr()) { return state_.doSetContextData(
return stateHazptr_.doSetContextData(
token, data, DoSetBehaviour::SET_IF_ABSENT, false); token, data, DoSetBehaviour::SET_IF_ABSENT, false);
}
return doSetContextDataLock(token, data, DoSetBehaviour::SET_IF_ABSENT);
} }
void RequestContext::overwriteContextDataLock( void RequestContext::overwriteContextData(
const RequestToken& token,
std::unique_ptr<RequestData> data) {
doSetContextDataLock(token, data, DoSetBehaviour::OVERWRITE);
}
void RequestContext::overwriteContextDataHazptr(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData> data, std::unique_ptr<RequestData> data,
bool safe) { bool safe) {
stateHazptr_.doSetContextData(token, data, DoSetBehaviour::OVERWRITE, safe); state_.doSetContextData(token, data, DoSetBehaviour::OVERWRITE, safe);
} }
bool RequestContext::hasContextData(const RequestToken& val) const { bool RequestContext::hasContextData(const RequestToken& val) const {
if (useHazptr()) { return state_.hasContextData(val);
return stateHazptr_.hasContextData(val);
}
return state_.rlock()->requestData_.count(val);
} }
RequestData* FOLLY_NULLABLE RequestData* FOLLY_NULLABLE
RequestContext::getContextData(const RequestToken& val) { RequestContext::getContextData(const RequestToken& val) {
if (useHazptr()) { return state_.getContextData(val);
return stateHazptr_.getContextData(val);
}
const RequestData::SharedPtr dflt{nullptr};
return get_ref_default(state_.rlock()->requestData_, val, dflt).get();
} }
const RequestData* FOLLY_NULLABLE const RequestData* FOLLY_NULLABLE
RequestContext::getContextData(const RequestToken& val) const { RequestContext::getContextData(const RequestToken& val) const {
if (useHazptr()) { return state_.getContextData(val);
return stateHazptr_.getContextData(val);
}
const RequestData::SharedPtr dflt{nullptr};
return get_ref_default(state_.rlock()->requestData_, val, dflt).get();
} }
void RequestContext::onSet() { void RequestContext::onSet() {
if (useHazptr()) { state_.onSet();
stateHazptr_.onSet();
return;
}
auto rlock = state_.rlock();
for (const auto& data : rlock->callbackData_) {
data->onSet();
}
} }
void RequestContext::onUnset() { void RequestContext::onUnset() {
if (useHazptr()) { state_.onUnset();
stateHazptr_.onUnset();
return;
}
auto rlock = state_.rlock();
for (const auto& data : rlock->callbackData_) {
data->onUnset();
}
} }
void RequestContext::clearContextData(const RequestToken& val) { void RequestContext::clearContextData(const RequestToken& val) {
if (useHazptr()) { state_.clearContextData(val);
stateHazptr_.clearContextData(val);
return;
}
RequestData::SharedPtr requestData;
// Delete the RequestData after giving up the wlock just in case one of the
// RequestData destructors will try to grab the lock again.
{
auto ulock = state_.ulock();
// Need non-const iterators to use under write lock.
auto& state = ulock.asNonConstUnsafe();
auto it = state.requestData_.find(val);
if (it == state.requestData_.end()) {
return;
}
auto wlock = ulock.moveFromUpgradeToWrite();
if (it->second && it->second->hasCallback()) {
it->second->onUnset();
wlock->callbackData_.erase(it->second.get());
}
requestData = std::move(it->second);
wlock->requestData_.erase(it);
}
} }
namespace {
// Execute functor exec for all RequestData in data, which are not in other
// Similar to std::set_difference but avoid intermediate data structure
template <typename TData, typename TExec>
void exec_set_difference(const TData& data, const TData& other, TExec&& exec) {
auto diter = data.begin();
auto dend = data.end();
auto oiter = other.begin();
auto oend = other.end();
while (diter != dend) {
// Order of "if" optimizes for the 2 common cases:
// 1) empty other, switching to default context
// 2) identical other, switching to similar context with same callbacks
if (oiter == oend) {
exec(*diter);
++diter;
} else if (*diter == *oiter) {
++diter;
++oiter;
} else if (*diter < *oiter) {
exec(*diter);
++diter;
} else {
++oiter;
}
}
}
} // namespace
/* static */ std::shared_ptr<RequestContext> RequestContext::setContext( /* static */ std::shared_ptr<RequestContext> RequestContext::setContext(
std::shared_ptr<RequestContext> const& newCtx) { std::shared_ptr<RequestContext> const& newCtx) {
return setContext(copy(newCtx)); return setContext(copy(newCtx));
...@@ -742,62 +576,14 @@ void exec_set_difference(const TData& data, const TData& other, TExec&& exec) { ...@@ -742,62 +576,14 @@ void exec_set_difference(const TData& data, const TData& other, TExec&& exec) {
staticCtx.first ? staticCtx.first->getRootId() : 0, staticCtx.first ? staticCtx.first->getRootId() : 0,
newCtx ? newCtx->getRootId() : 0); newCtx ? newCtx->getRootId() : 0);
if ((newCtx.get() && newCtx->useHazptr()) ||
(staticCtx.first.get() && staticCtx.first->useHazptr())) {
DCHECK(!newCtx.get() || newCtx->useHazptr());
DCHECK(!staticCtx.first.get() || staticCtx.first->useHazptr());
return RequestContext::setContextHazptr(newCtx, staticCtx);
} else {
return RequestContext::setContextLock(newCtx, staticCtx);
}
}
FOLLY_ALWAYS_INLINE
/* static */ std::shared_ptr<RequestContext> RequestContext::setContextLock(
std::shared_ptr<RequestContext>& newCtx,
StaticContext& staticCtx) {
auto curCtx = staticCtx.first;
if (newCtx && curCtx) {
// Only call set/unset for all request data that differs
auto ret = folly::acquireLocked(
as_const(newCtx->state_), as_const(curCtx->state_));
auto& newLock = std::get<0>(ret);
auto& curLock = std::get<1>(ret);
auto& newData = newLock->callbackData_;
auto& curData = curLock->callbackData_;
exec_set_difference(
curData, newData, [](RequestData* data) { data->onUnset(); });
staticCtx.first = newCtx;
staticCtx.second.store(newCtx->rootId_, std::memory_order_relaxed);
exec_set_difference(
newData, curData, [](RequestData* data) { data->onSet(); });
} else {
if (curCtx) {
curCtx->onUnset();
}
staticCtx.first = newCtx;
if (newCtx) {
staticCtx.second.store(newCtx->rootId_, std::memory_order_relaxed);
newCtx->onSet();
} else {
staticCtx.second.store(0, std::memory_order_relaxed);
}
}
return curCtx;
}
FOLLY_ALWAYS_INLINE
/* static */ std::shared_ptr<RequestContext> RequestContext::setContextHazptr(
std::shared_ptr<RequestContext>& newCtx,
StaticContext& staticCtx) {
std::shared_ptr<RequestContext> prevCtx; std::shared_ptr<RequestContext> prevCtx;
auto curCtx = staticCtx.first.get(); RequestContext* curCtx = staticCtx.first.get();
bool checkCur = curCtx && curCtx->stateHazptr_.combined(); bool checkCur = curCtx && curCtx->state_.combined();
bool checkNew = newCtx && newCtx->stateHazptr_.combined(); bool checkNew = newCtx && newCtx->state_.combined();
if (checkCur && checkNew) { if (checkCur && checkNew) {
hazptr_array<2> h; hazptr_array<2> h;
auto curc = h[0].get_protected(curCtx->stateHazptr_.combined_); auto curc = h[0].get_protected(curCtx->state_.combined_);
auto newc = h[1].get_protected(newCtx->stateHazptr_.combined_); auto newc = h[1].get_protected(newCtx->state_.combined_);
auto& curcb = curc->callbackData_; auto& curcb = curc->callbackData_;
auto& newcb = newc->callbackData_; auto& newcb = newc->callbackData_;
for (auto it = curcb.begin(); it != curcb.end(); ++it) { for (auto it = curcb.begin(); it != curcb.end(); ++it) {
...@@ -819,14 +605,14 @@ FOLLY_ALWAYS_INLINE ...@@ -819,14 +605,14 @@ FOLLY_ALWAYS_INLINE
} }
} else { } else {
if (curCtx) { if (curCtx) {
curCtx->stateHazptr_.onUnset(); curCtx->state_.onUnset();
} }
prevCtx = std::move(staticCtx.first); prevCtx = std::move(staticCtx.first);
staticCtx.first = std::move(newCtx); staticCtx.first = std::move(newCtx);
if (staticCtx.first) { if (staticCtx.first) {
staticCtx.second.store( staticCtx.second.store(
staticCtx.first->rootId_, std::memory_order_relaxed); staticCtx.first->rootId_, std::memory_order_relaxed);
staticCtx.first->stateHazptr_.onSet(); staticCtx.first->state_.onSet();
} else { } else {
staticCtx.second.store(0, std::memory_order_relaxed); staticCtx.second.store(0, std::memory_order_relaxed);
} }
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <folly/Synchronized.h> #include <folly/Synchronized.h>
#include <folly/container/F14Map.h> #include <folly/container/F14Map.h>
#include <folly/portability/GFlags.h>
#include <folly/sorted_vector_types.h> #include <folly/sorted_vector_types.h>
#include <folly/synchronization/Hazptr.h> #include <folly/synchronization/Hazptr.h>
...@@ -27,8 +26,6 @@ ...@@ -27,8 +26,6 @@
#include <mutex> #include <mutex>
#include <string> #include <string>
DECLARE_bool(folly_reqctx_use_hazptr);
namespace folly { namespace folly {
/* /*
...@@ -72,18 +69,6 @@ struct hash<folly::RequestToken> { ...@@ -72,18 +69,6 @@ struct hash<folly::RequestToken> {
namespace folly { namespace folly {
// - A runtime flag GFLAGS_reqctx_use_hazptr determines the
// implementation of RequestContext.
// - The flag false implementation uses sequential data structures
// protected by a read-write lock.
// - The flag true implementation uses single-writer multi-readers
// data structures protected by hazard pointers for readers and a
// lock for writers.
// - Each RequestContext instances contains a bool member useHazptr_
// (readable by a public member function useHazptr()) that indicates
// the implementation of the instance depending on the value of the
// GFLAG at instance construction time..
// Some request context that follows an async request through a process // Some request context that follows an async request through a process
// Everything in the context must be thread safe // Everything in the context must be thread safe
...@@ -120,7 +105,6 @@ class RequestData { ...@@ -120,7 +105,6 @@ class RequestData {
} }
private: private:
// Start shallow copy implementation details:
// For efficiency, RequestContext provides a raw ptr interface. // For efficiency, RequestContext provides a raw ptr interface.
// To support shallow copy, we need a shared ptr. // To support shallow copy, we need a shared ptr.
// To keep it as safe as possible (even if a raw ptr is passed back), // To keep it as safe as possible (even if a raw ptr is passed back),
...@@ -131,8 +115,8 @@ class RequestData { ...@@ -131,8 +115,8 @@ class RequestData {
static constexpr int kDeleteCount = 0x1; static constexpr int kDeleteCount = 0x1;
static constexpr int kClearCount = 0x1000; static constexpr int kClearCount = 0x1000;
// Reference-counting functions used by the hazptr-based implementation. // Reference-counting functions.
// Increment the reference count // Increment the reference count.
void acquireRef(); void acquireRef();
// Decrement the reference count. Clear only if last. // Decrement the reference count. Clear only if last.
void releaseRefClearOnly(); void releaseRefClearOnly();
...@@ -142,27 +126,7 @@ class RequestData { ...@@ -142,27 +126,7 @@ class RequestData {
void releaseRefClearDelete(); void releaseRefClearDelete();
void releaseRefClearDeleteSlow(); void releaseRefClearDeleteSlow();
// Unique ptr with custom destructor, decrement the counter
// and only free if 0
struct DestructPtr {
void operator()(RequestData* ptr);
};
struct SharedPtr : public std::unique_ptr<RequestData, DestructPtr> {
SharedPtr() = default;
using std::unique_ptr<RequestData, DestructPtr>::unique_ptr;
SharedPtr(const SharedPtr& other) : SharedPtr(constructPtr(other.get())) {}
SharedPtr& operator=(const SharedPtr& other) {
return operator=(constructPtr(other.get()));
}
SharedPtr(SharedPtr&&) = default;
SharedPtr& operator=(SharedPtr&&) = default;
};
// Initialize the pseudo-shared ptr, increment the counter
static SharedPtr constructPtr(RequestData* ptr);
std::atomic<int> keepAliveCounter_{0}; std::atomic<int> keepAliveCounter_{0};
// End shallow copy
}; };
// If you do not call create() to create a unique request context, // If you do not call create() to create a unique request context,
...@@ -260,11 +224,6 @@ class RequestContext { ...@@ -260,11 +224,6 @@ class RequestContext {
void onSet(); void onSet();
void onUnset(); void onUnset();
// useHazptr
FOLLY_ALWAYS_INLINE bool useHazptr() const {
return useHazptr_;
}
// The following API is used to pass the context through queues / threads. // The following API is used to pass the context through queues / threads.
// saveContext is called to get a shared_ptr to the context, and // saveContext is called to get a shared_ptr to the context, and
// setContext is used to reset it on the other side of the queue. // setContext is used to reset it on the other side of the queue.
...@@ -297,11 +256,7 @@ class RequestContext { ...@@ -297,11 +256,7 @@ class RequestContext {
private: private:
static StaticContext& getStaticContext(); static StaticContext& getStaticContext();
static std::shared_ptr<RequestContext> setContextHelper(
static std::shared_ptr<RequestContext> setContextLock(
std::shared_ptr<RequestContext>& newCtx,
StaticContext& staticCtx);
static std::shared_ptr<RequestContext> setContextHazptr(
std::shared_ptr<RequestContext>& newCtx, std::shared_ptr<RequestContext>& newCtx,
StaticContext& staticCtx); StaticContext& staticCtx);
...@@ -313,30 +268,18 @@ class RequestContext { ...@@ -313,30 +268,18 @@ class RequestContext {
// then return the previous context (so it can be reset later). // then return the previous context (so it can be reset later).
static std::shared_ptr<RequestContext> setShallowCopyContext(); static std::shared_ptr<RequestContext> setShallowCopyContext();
// Similar to setContextData, except it overwrites the data
// if already set (instead of warn + reset ptr).
void overwriteContextDataLock(
const RequestToken& token,
std::unique_ptr<RequestData> data);
void overwriteContextDataLock(
const std::string& val,
std::unique_ptr<RequestData> data) {
overwriteContextDataLock(RequestToken(val), std::move(data));
}
// End shallow copy guard
// For functions with a parameter safe, if safe is true then the // For functions with a parameter safe, if safe is true then the
// caller guarantees that there are no concurrent readers or writers // caller guarantees that there are no concurrent readers or writers
// accessing the structure. // accessing the structure.
void overwriteContextDataHazptr( void overwriteContextData(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData> data, std::unique_ptr<RequestData> data,
bool safe = false); bool safe = false);
void overwriteContextDataHazptr( void overwriteContextData(
const std::string& val, const std::string& val,
std::unique_ptr<RequestData> data, std::unique_ptr<RequestData> data,
bool safe = false) { bool safe = false) {
overwriteContextDataHazptr(RequestToken(val), std::move(data), safe); overwriteContextData(RequestToken(val), std::move(data), safe);
} }
enum class DoSetBehaviour { enum class DoSetBehaviour {
...@@ -345,47 +288,23 @@ class RequestContext { ...@@ -345,47 +288,23 @@ class RequestContext {
OVERWRITE, OVERWRITE,
}; };
bool doSetContextDataLock( bool doSetContextDataHelper(
const RequestToken& token,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour);
bool doSetContextDataLock(
const std::string& val,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour) {
return doSetContextDataLock(RequestToken(val), data, behaviour);
}
bool doSetContextDataHazptr(
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData>& data, std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour, DoSetBehaviour behaviour,
bool safe = false); bool safe = false);
bool doSetContextDataHazptr( bool doSetContextDataHelper(
const std::string& val, const std::string& val,
std::unique_ptr<RequestData>& data, std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour, DoSetBehaviour behaviour,
bool safe = false) { bool safe = false) {
return doSetContextDataHazptr(RequestToken(val), data, behaviour, safe); return doSetContextDataHelper(RequestToken(val), data, behaviour, safe);
} }
// State immplementation with sequential data structures protected by a
// read-write locks.
struct State {
// This must be optimized for lookup, its hot path is getContextData
// Efficiency of copying the container also matters in setShallowCopyContext
F14FastMap<RequestToken, RequestData::SharedPtr> requestData_;
// This must be optimized for iteration, its hot path is setContext
// We also use the fact that it's ordered to efficiently compute
// the difference with previous context
sorted_vector_set<RequestData*> callbackData_;
};
folly::Synchronized<State> state_;
// State implementation with single-writer multi-reader data // State implementation with single-writer multi-reader data
// structures protected by hazard pointers for readers and a lock // structures protected by hazard pointers for readers and a lock
// for writers. // for writers.
struct StateHazptr { struct State {
// Hazard pointer-protected combined structure for request data // Hazard pointer-protected combined structure for request data
// and callbacks. // and callbacks.
struct Combined; struct Combined;
...@@ -393,12 +312,12 @@ class RequestContext { ...@@ -393,12 +312,12 @@ class RequestContext {
std::atomic<Combined*> combined_{nullptr}; std::atomic<Combined*> combined_{nullptr};
std::mutex mutex_; std::mutex mutex_;
StateHazptr(); State();
StateHazptr(const StateHazptr& o); State(const State& o);
StateHazptr(StateHazptr&&) = delete; State(State&&) = delete;
StateHazptr& operator=(const StateHazptr&) = delete; State& operator=(const State&) = delete;
StateHazptr& operator=(StateHazptr&&) = delete; State& operator=(State&&) = delete;
~StateHazptr(); ~State();
private: private:
friend class RequestContext; friend class RequestContext;
...@@ -439,9 +358,8 @@ class RequestContext { ...@@ -439,9 +358,8 @@ class RequestContext {
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData>& data, std::unique_ptr<RequestData>& data,
bool found); bool found);
}; // StateHazptr }; // State
StateHazptr stateHazptr_; State state_;
bool useHazptr_;
// Shallow copies keep a note of the root context // Shallow copies keep a note of the root context
intptr_t rootId_; intptr_t rootId_;
}; };
...@@ -499,23 +417,13 @@ struct ShallowCopyRequestContextScopeGuard { ...@@ -499,23 +417,13 @@ struct ShallowCopyRequestContextScopeGuard {
const RequestToken& token, const RequestToken& token,
std::unique_ptr<RequestData> data) std::unique_ptr<RequestData> data)
: ShallowCopyRequestContextScopeGuard() { : ShallowCopyRequestContextScopeGuard() {
auto ctx = RequestContext::get(); RequestContext::get()->overwriteContextData(token, std::move(data), true);
if (ctx->useHazptr()) {
ctx->overwriteContextDataHazptr(token, std::move(data), true);
} else {
ctx->overwriteContextDataLock(token, std::move(data));
}
} }
ShallowCopyRequestContextScopeGuard( ShallowCopyRequestContextScopeGuard(
const std::string& val, const std::string& val,
std::unique_ptr<RequestData> data) std::unique_ptr<RequestData> data)
: ShallowCopyRequestContextScopeGuard() { : ShallowCopyRequestContextScopeGuard() {
auto ctx = RequestContext::get(); RequestContext::get()->overwriteContextData(val, std::move(data), true);
if (ctx->useHazptr()) {
ctx->overwriteContextDataHazptr(val, std::move(data), true);
} else {
ctx->overwriteContextDataLock(val, std::move(data));
}
} }
~ShallowCopyRequestContextScopeGuard() { ~ShallowCopyRequestContextScopeGuard() {
......
...@@ -300,8 +300,8 @@ onSet 12 ns 12 ns 0 ns 12 ns ...@@ -300,8 +300,8 @@ onSet 12 ns 12 ns 0 ns 12 ns
onUnset 12 ns 12 ns 0 ns 12 ns onUnset 12 ns 12 ns 0 ns 12 ns
setContext 46 ns 44 ns 1 ns 42 ns setContext 46 ns 44 ns 1 ns 42 ns
RequestContextScopeGuard 113 ns 103 ns 3 ns 101 ns RequestContextScopeGuard 113 ns 103 ns 3 ns 101 ns
ShallowCopyRequestC...-replace 230 ns 221 ns 4 ns 217 ns ShallowCopyRequestC...-replace 213 ns 201 ns 5 ns 196 ns
ShallowCopyReq...-keep&replace 904 ns 893 ns 5 ns 886 ns ShallowCopyReq...-keep&replace 883 ns 835 ns 20 ns 814 ns
============================== 10 threads ============================== ============================== 10 threads ==============================
hasContextData 1 ns 1 ns 0 ns 1 ns hasContextData 1 ns 1 ns 0 ns 1 ns
getContextData 2 ns 1 ns 0 ns 1 ns getContextData 2 ns 1 ns 0 ns 1 ns
...@@ -309,7 +309,7 @@ onSet 2 ns 2 ns 0 ns 1 ns ...@@ -309,7 +309,7 @@ onSet 2 ns 2 ns 0 ns 1 ns
onUnset 2 ns 2 ns 0 ns 1 ns onUnset 2 ns 2 ns 0 ns 1 ns
setContext 11 ns 7 ns 2 ns 5 ns setContext 11 ns 7 ns 2 ns 5 ns
RequestContextScopeGuard 22 ns 15 ns 5 ns 11 ns RequestContextScopeGuard 22 ns 15 ns 5 ns 11 ns
ShallowCopyRequestC...-replace 51 ns 32 ns 11 ns 24 ns ShallowCopyRequestC...-replace 48 ns 30 ns 11 ns 21 ns
ShallowCopyReq...-keep&replace 102 ns 98 ns 2 ns 96 ns ShallowCopyReq...-keep&replace 98 ns 93 ns 2 ns 91 ns
======================================================================== ========================================================================
*/ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment