Commit 5d1b9736 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook GitHub Bot

prefer thread_local over FOLLY_TLS in SharedMutex

Reviewed By: Orvid

Differential Revision: D27559485

fbshipit-source-id: be2db59ef6d9b4cf178216a4b8851e27e7ecef93
parent 9ce8cd46
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <folly/detail/Futex.h> #include <folly/detail/Futex.h>
#include <folly/portability/Asm.h> #include <folly/portability/Asm.h>
#include <folly/portability/SysResource.h> #include <folly/portability/SysResource.h>
#include <folly/synchronization/AtomicRef.h>
#include <folly/synchronization/SanitizeThread.h> #include <folly/synchronization/SanitizeThread.h>
#include <folly/system/ThreadId.h> #include <folly/system/ThreadId.h>
...@@ -239,14 +238,6 @@ ...@@ -239,14 +238,6 @@
// overwhelming majority of SharedMutex instances use write priority, we // overwhelming majority of SharedMutex instances use write priority, we
// restrict the TSAN annotations to only SharedMutexWritePriority. // restrict the TSAN annotations to only SharedMutexWritePriority.
#ifndef FOLLY_SHAREDMUTEX_TLS
#if !FOLLY_MOBILE
#define FOLLY_SHAREDMUTEX_TLS FOLLY_TLS
#else
#define FOLLY_SHAREDMUTEX_TLS
#endif
#endif
namespace folly { namespace folly {
struct SharedMutexToken { struct SharedMutexToken {
...@@ -968,10 +959,20 @@ class SharedMutexImpl : std::conditional_t< ...@@ -968,10 +959,20 @@ class SharedMutexImpl : std::conditional_t<
static constexpr uintptr_t kTokenless = 0x1; static constexpr uintptr_t kTokenless = 0x1;
// This is the starting location for Token-less unlock_shared(). // This is the starting location for Token-less unlock_shared().
static FOLLY_SHAREDMUTEX_TLS uint32_t tls_lastTokenlessSlot; FOLLY_EXPORT FOLLY_ALWAYS_INLINE static std::atomic<uint32_t>&
tls_lastTokenlessSlot() {
static std::atomic<uint32_t> non_tl{};
static thread_local std::atomic<uint32_t> tl{};
return kIsMobile ? non_tl : tl;
}
// Last deferred reader slot used. // Last deferred reader slot used.
static FOLLY_SHAREDMUTEX_TLS uint32_t tls_lastDeferredReaderSlot; FOLLY_EXPORT FOLLY_ALWAYS_INLINE static std::atomic<uint32_t>&
tls_lastDeferredReaderSlot() {
static std::atomic<uint32_t> non_tl{};
static thread_local std::atomic<uint32_t> tl{};
return kIsMobile ? non_tl : tl;
}
// Only indexes divisible by kDeferredSeparationFactor are used. // Only indexes divisible by kDeferredSeparationFactor are used.
// If any of those elements points to a SharedMutexImpl, then it // If any of those elements points to a SharedMutexImpl, then it
...@@ -1612,38 +1613,6 @@ alignas(hardware_destructive_interference_size) typename SharedMutexImpl< ...@@ -1612,38 +1613,6 @@ alignas(hardware_destructive_interference_size) typename SharedMutexImpl<
[shared_mutex_detail::kMaxDeferredReadersAllocated * [shared_mutex_detail::kMaxDeferredReadersAllocated *
kDeferredSeparationFactor] = {}; kDeferredSeparationFactor] = {};
template <
bool ReaderPriority,
typename Tag_,
template <typename>
class Atom,
bool BlockImmediately,
bool AnnotateForThreadSanitizer,
bool TrackThreadId>
FOLLY_SHAREDMUTEX_TLS uint32_t SharedMutexImpl<
ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer,
TrackThreadId>::tls_lastTokenlessSlot = 0;
template <
bool ReaderPriority,
typename Tag_,
template <typename>
class Atom,
bool BlockImmediately,
bool AnnotateForThreadSanitizer,
bool TrackThreadId>
FOLLY_SHAREDMUTEX_TLS uint32_t SharedMutexImpl<
ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer,
TrackThreadId>::tls_lastDeferredReaderSlot = 0;
template < template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_, typename Tag_,
...@@ -1659,8 +1628,7 @@ bool SharedMutexImpl< ...@@ -1659,8 +1628,7 @@ bool SharedMutexImpl<
BlockImmediately, BlockImmediately,
AnnotateForThreadSanitizer, AnnotateForThreadSanitizer,
TrackThreadId>::tryUnlockTokenlessSharedDeferred() { TrackThreadId>::tryUnlockTokenlessSharedDeferred() {
auto bestSlot = auto bestSlot = tls_lastTokenlessSlot().load(std::memory_order_relaxed);
make_atomic_ref(tls_lastTokenlessSlot).load(std::memory_order_relaxed);
// use do ... while to avoid calling // use do ... while to avoid calling
// shared_mutex_detail::getMaxDeferredReaders() unless necessary // shared_mutex_detail::getMaxDeferredReaders() unless necessary
uint32_t i = 0; uint32_t i = 0;
...@@ -1669,8 +1637,7 @@ bool SharedMutexImpl< ...@@ -1669,8 +1637,7 @@ bool SharedMutexImpl<
auto slotValue = slotPtr->load(std::memory_order_relaxed); auto slotValue = slotPtr->load(std::memory_order_relaxed);
if (slotValue == tokenlessSlotValue() && if (slotValue == tokenlessSlotValue() &&
slotPtr->compare_exchange_strong(slotValue, 0)) { slotPtr->compare_exchange_strong(slotValue, 0)) {
make_atomic_ref(tls_lastTokenlessSlot) tls_lastTokenlessSlot().store(bestSlot ^ i, std::memory_order_relaxed);
.store(bestSlot ^ i, std::memory_order_relaxed);
return true; return true;
} }
++i; ++i;
...@@ -1703,8 +1670,8 @@ bool SharedMutexImpl< ...@@ -1703,8 +1670,8 @@ bool SharedMutexImpl<
return false; return false;
} }
uint32_t slot = make_atomic_ref(tls_lastDeferredReaderSlot) uint32_t slot =
.load(std::memory_order_relaxed); tls_lastDeferredReaderSlot().load(std::memory_order_relaxed);
uintptr_t slotValue = 1; // any non-zero value will do uintptr_t slotValue = 1; // any non-zero value will do
bool canAlreadyDefer = (state & kMayDefer) != 0; bool canAlreadyDefer = (state & kMayDefer) != 0;
...@@ -1728,8 +1695,7 @@ bool SharedMutexImpl< ...@@ -1728,8 +1695,7 @@ bool SharedMutexImpl<
slotValue = deferredReader(slot)->load(std::memory_order_relaxed); slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
if (slotValue == 0) { if (slotValue == 0) {
// found empty slot // found empty slot
make_atomic_ref(tls_lastDeferredReaderSlot) tls_lastDeferredReaderSlot().store(slot, std::memory_order_relaxed);
.store(slot, std::memory_order_relaxed);
break; break;
} }
} }
...@@ -1780,8 +1746,7 @@ bool SharedMutexImpl< ...@@ -1780,8 +1746,7 @@ bool SharedMutexImpl<
} }
if (token == nullptr) { if (token == nullptr) {
make_atomic_ref(tls_lastTokenlessSlot) tls_lastTokenlessSlot().store(slot, std::memory_order_relaxed);
.store(slot, std::memory_order_relaxed);
} }
if ((state & kMayDefer) != 0) { if ((state & kMayDefer) != 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment