Commit 2924934a authored by Kenny Yu's avatar Kenny Yu Committed by Facebook Github Bot

Annotate folly::SharedMutex with TSAN annotations

Summary:
This diff adds TSAN annotations to `folly::SharedMutex` to allow TSAN to find
lock inversions with this type of mutex. Note that only the WritePriority version
is annotated, and the ReadPriority version is not. See the comments in the source code
for an explanation of this.

Some notes about how the annotation was done:

- We always call the RELEASED annotations at the beginning of unlock(), and the ACQUIRED annotations at the end of lock().
  This prevents a double locking interleaving where thread 1 has unlocked it and before RELEASED is called, another thread
  has locked it and calls ACQUIRED first.
- These annotations treat upgrade locks as equivalent to a shared lock. This prevents any false positives, and keeps
  the annotations simple.
- We need to keep the constructor for SharedMutex as `constexpr` to avoid static initialization bugs. As a result, we need
  to lazily annotate creation of the mutex. To do this, this adds an extra bit to the `SharedMutex` state to keep track if
  initialization has been called. In TSAN builds, we have an array of mutexes to enforce that initialization is called
  at most once.
- This diff introduces a new template param AnnotateForThreadSanitizer (default is False). This allows users to use a new
  folly::SharedMutexSuppressTSAN version to avoid lock inversion detection if there are noisy lock inversions.
  However, this should be the exception and not the norm. In normal build modes, this is equivalent to a normal SharedMutex.

Reviewed By: nbronson

Differential Revision: D9677058

fbshipit-source-id: b0f5719a75024937fb81672435fb1c9802f255d7
parent e71ead7b
...@@ -20,4 +20,21 @@ namespace folly { ...@@ -20,4 +20,21 @@ namespace folly {
// Explicitly instantiate SharedMutex here: // Explicitly instantiate SharedMutex here:
template class SharedMutexImpl<true>; template class SharedMutexImpl<true>;
template class SharedMutexImpl<false>; template class SharedMutexImpl<false>;
namespace detail {
std::unique_lock<std::mutex> sharedMutexAnnotationGuard(void* ptr) {
if (folly::kIsSanitizeThread) {
// On TSAN builds, we have an array of mutexes and index into them based on
// the address. If the array is of prime size things will work out okay
// without a complicated hash function.
static constexpr std::size_t kNumAnnotationMutexes = 251;
static std::array<std::mutex, kNumAnnotationMutexes> kAnnotationMutexes{};
auto index = reinterpret_cast<uintptr_t>(ptr) % kNumAnnotationMutexes;
return std::unique_lock<std::mutex>(kAnnotationMutexes[index]);
} else {
return std::unique_lock<std::mutex>();
}
}
} // namespace detail
} // namespace folly } // namespace folly
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <thread> #include <thread>
#include <type_traits> #include <type_traits>
#include <folly/CPortability.h>
#include <folly/Likely.h> #include <folly/Likely.h>
#include <folly/concurrency/CacheLocality.h> #include <folly/concurrency/CacheLocality.h>
#include <folly/detail/Futex.h> #include <folly/detail/Futex.h>
...@@ -215,6 +216,26 @@ ...@@ -215,6 +216,26 @@
// SharedMutex's use of thread local storage is an optimization, so // SharedMutex's use of thread local storage is an optimization, so
// for the case where thread local storage is not supported, define it // for the case where thread local storage is not supported, define it
// away. // away.
// Note about TSAN (ThreadSanitizer): the SharedMutexWritePriority version
// (the default) of this mutex is annotated appropriately so that TSAN can
// perform lock inversion analysis. However, the SharedMutexReadPriority version
// is not annotated. This is because TSAN's lock order heuristic
// assumes that two calls to lock_shared must be ordered, which leads
// to too many false positives for the reader-priority case.
//
// Suppose thread A holds a SharedMutexWritePriority lock in shared mode and an
// independent thread B is waiting for exclusive access. Then a thread C's
// lock_shared can't proceed until A has released the lock. Discounting
// situations that never use exclusive mode (so no lock is necessary at all)
// this means that without higher-level reasoning it is not safe to ignore
// reader <-> reader interactions.
//
// This reasoning does not apply to SharedMutexReadPriority, because there are
// no actions by a thread B that can make C need to wait for A. Since the
// overwhelming majority of SharedMutex instances use write priority, we
// restrict the TSAN annotations to only SharedMutexWritePriority.
#ifndef FOLLY_SHAREDMUTEX_TLS #ifndef FOLLY_SHAREDMUTEX_TLS
#if !FOLLY_MOBILE #if !FOLLY_MOBILE
#define FOLLY_SHAREDMUTEX_TLS FOLLY_TLS #define FOLLY_SHAREDMUTEX_TLS FOLLY_TLS
...@@ -236,14 +257,23 @@ struct SharedMutexToken { ...@@ -236,14 +257,23 @@ struct SharedMutexToken {
uint16_t slot_; uint16_t slot_;
}; };
namespace detail {
// Returns a guard that gives permission for the current thread to
// call FOLLY_ANNOTATE_RWLOCK_CREATE and adjust the annotation bits
// in the SharedMutex at ptr.
std::unique_lock<std::mutex> sharedMutexAnnotationGuard(void* ptr);
} // namespace detail
template < template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_ = void, typename Tag_ = void,
template <typename> class Atom = std::atomic, template <typename> class Atom = std::atomic,
bool BlockImmediately = false> bool BlockImmediately = false,
bool AnnotateForThreadSanitizer = kIsSanitizeThread && !ReaderPriority>
class SharedMutexImpl { class SharedMutexImpl {
public: public:
static constexpr bool kReaderPriority = ReaderPriority; static constexpr bool kReaderPriority = ReaderPriority;
typedef Tag_ Tag; typedef Tag_ Tag;
typedef SharedMutexToken Token; typedef SharedMutexToken Token;
...@@ -281,7 +311,7 @@ class SharedMutexImpl { ...@@ -281,7 +311,7 @@ class SharedMutexImpl {
// if a futexWait fails to go to sleep because the value has been // if a futexWait fails to go to sleep because the value has been
// changed, we don't necessarily clean up the wait bits, so it is // changed, we don't necessarily clean up the wait bits, so it is
// possible they will be set here in a correct system // possible they will be set here in a correct system
assert((state & ~(kWaitingAny | kMayDefer)) == 0); assert((state & ~(kWaitingAny | kMayDefer | kAnnotationCreated)) == 0);
if ((state & kMayDefer) != 0) { if ((state & kMayDefer) != 0) {
for (uint32_t slot = 0; slot < kMaxDeferredReaders; ++slot) { for (uint32_t slot = 0; slot < kMaxDeferredReaders; ++slot) {
auto slotValue = deferredReader(slot)->load(std::memory_order_relaxed); auto slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
...@@ -289,37 +319,46 @@ class SharedMutexImpl { ...@@ -289,37 +319,46 @@ class SharedMutexImpl {
} }
} }
#endif #endif
annotateDestroy();
} }
void lock() { void lock() {
WaitForever ctx; WaitForever ctx;
(void)lockExclusiveImpl(kHasSolo, ctx); (void)lockExclusiveImpl(kHasSolo, ctx);
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_WRLOCK);
} }
bool try_lock() { bool try_lock() {
WaitNever ctx; WaitNever ctx;
return lockExclusiveImpl(kHasSolo, ctx); auto result = lockExclusiveImpl(kHasSolo, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_WRLOCK);
return result;
} }
template <class Rep, class Period> template <class Rep, class Period>
bool try_lock_for(const std::chrono::duration<Rep, Period>& duration) { bool try_lock_for(const std::chrono::duration<Rep, Period>& duration) {
WaitForDuration<Rep, Period> ctx(duration); WaitForDuration<Rep, Period> ctx(duration);
return lockExclusiveImpl(kHasSolo, ctx); auto result = lockExclusiveImpl(kHasSolo, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_WRLOCK);
return result;
} }
template <class Clock, class Duration> template <class Clock, class Duration>
bool try_lock_until( bool try_lock_until(
const std::chrono::time_point<Clock, Duration>& absDeadline) { const std::chrono::time_point<Clock, Duration>& absDeadline) {
WaitUntilDeadline<Clock, Duration> ctx{absDeadline}; WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
return lockExclusiveImpl(kHasSolo, ctx); auto result = lockExclusiveImpl(kHasSolo, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_WRLOCK);
return result;
} }
void unlock() { void unlock() {
annotateReleased(FOLLY_ANNOTATE_RWLOCK_WRLOCK);
// It is possible that we have a left-over kWaitingNotS if the last // It is possible that we have a left-over kWaitingNotS if the last
// unlock_shared() that let our matching lock() complete finished // unlock_shared() that let our matching lock() complete finished
// releasing before lock()'s futexWait went to sleep. Clean it up now // releasing before lock()'s futexWait went to sleep. Clean it up now
auto state = (state_ &= ~(kWaitingNotS | kPrevDefer | kHasE)); auto state = (state_ &= ~(kWaitingNotS | kPrevDefer | kHasE));
assert((state & ~kWaitingAny) == 0); assert((state & ~(kWaitingAny | kAnnotationCreated)) == 0);
wakeRegisteredWaiters(state, kWaitingE | kWaitingU | kWaitingS); wakeRegisteredWaiters(state, kWaitingE | kWaitingU | kWaitingS);
} }
...@@ -328,27 +367,35 @@ class SharedMutexImpl { ...@@ -328,27 +367,35 @@ class SharedMutexImpl {
void lock_shared() { void lock_shared() {
WaitForever ctx; WaitForever ctx;
(void)lockSharedImpl(nullptr, ctx); (void)lockSharedImpl(nullptr, ctx);
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
} }
void lock_shared(Token& token) { void lock_shared(Token& token) {
WaitForever ctx; WaitForever ctx;
(void)lockSharedImpl(&token, ctx); (void)lockSharedImpl(&token, ctx);
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
} }
bool try_lock_shared() { bool try_lock_shared() {
WaitNever ctx; WaitNever ctx;
return lockSharedImpl(nullptr, ctx); auto result = lockSharedImpl(nullptr, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
bool try_lock_shared(Token& token) { bool try_lock_shared(Token& token) {
WaitNever ctx; WaitNever ctx;
return lockSharedImpl(&token, ctx); auto result = lockSharedImpl(&token, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
template <class Rep, class Period> template <class Rep, class Period>
bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration) { bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration) {
WaitForDuration<Rep, Period> ctx(duration); WaitForDuration<Rep, Period> ctx(duration);
return lockSharedImpl(nullptr, ctx); auto result = lockSharedImpl(nullptr, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
template <class Rep, class Period> template <class Rep, class Period>
...@@ -356,14 +403,18 @@ class SharedMutexImpl { ...@@ -356,14 +403,18 @@ class SharedMutexImpl {
const std::chrono::duration<Rep, Period>& duration, const std::chrono::duration<Rep, Period>& duration,
Token& token) { Token& token) {
WaitForDuration<Rep, Period> ctx(duration); WaitForDuration<Rep, Period> ctx(duration);
return lockSharedImpl(&token, ctx); auto result = lockSharedImpl(&token, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
template <class Clock, class Duration> template <class Clock, class Duration>
bool try_lock_shared_until( bool try_lock_shared_until(
const std::chrono::time_point<Clock, Duration>& absDeadline) { const std::chrono::time_point<Clock, Duration>& absDeadline) {
WaitUntilDeadline<Clock, Duration> ctx{absDeadline}; WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
return lockSharedImpl(nullptr, ctx); auto result = lockSharedImpl(nullptr, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
template <class Clock, class Duration> template <class Clock, class Duration>
...@@ -371,10 +422,14 @@ class SharedMutexImpl { ...@@ -371,10 +422,14 @@ class SharedMutexImpl {
const std::chrono::time_point<Clock, Duration>& absDeadline, const std::chrono::time_point<Clock, Duration>& absDeadline,
Token& token) { Token& token) {
WaitUntilDeadline<Clock, Duration> ctx{absDeadline}; WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
return lockSharedImpl(&token, ctx); auto result = lockSharedImpl(&token, ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
void unlock_shared() { void unlock_shared() {
annotateReleased(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
auto state = state_.load(std::memory_order_acquire); auto state = state_.load(std::memory_order_acquire);
// kPrevDefer can only be set if HasE or BegunE is set // kPrevDefer can only be set if HasE or BegunE is set
...@@ -392,6 +447,8 @@ class SharedMutexImpl { ...@@ -392,6 +447,8 @@ class SharedMutexImpl {
} }
void unlock_shared(Token& token) { void unlock_shared(Token& token) {
annotateReleased(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
assert( assert(
token.type_ == Token::Type::INLINE_SHARED || token.type_ == Token::Type::INLINE_SHARED ||
token.type_ == Token::Type::DEFERRED_SHARED); token.type_ == Token::Type::DEFERRED_SHARED);
...@@ -406,6 +463,8 @@ class SharedMutexImpl { ...@@ -406,6 +463,8 @@ class SharedMutexImpl {
} }
void unlock_and_lock_shared() { void unlock_and_lock_shared() {
annotateReleased(FOLLY_ANNOTATE_RWLOCK_WRLOCK);
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
// We can't use state_ -=, because we need to clear 2 bits (1 of which // We can't use state_ -=, because we need to clear 2 bits (1 of which
// has an uncertain initial state) and set 1 other. We might as well // has an uncertain initial state) and set 1 other. We might as well
// clear the relevant wake bits at the same time. Note that since S // clear the relevant wake bits at the same time. Note that since S
...@@ -414,7 +473,8 @@ class SharedMutexImpl { ...@@ -414,7 +473,8 @@ class SharedMutexImpl {
// S) we need to wake E as well. // S) we need to wake E as well.
auto state = state_.load(std::memory_order_acquire); auto state = state_.load(std::memory_order_acquire);
do { do {
assert((state & ~(kWaitingAny | kPrevDefer)) == kHasE); assert(
(state & ~(kWaitingAny | kPrevDefer | kAnnotationCreated)) == kHasE);
} while (!state_.compare_exchange_strong( } while (!state_.compare_exchange_strong(
state, (state & ~(kWaitingAny | kPrevDefer | kHasE)) + kIncrHasS)); state, (state & ~(kWaitingAny | kPrevDefer | kHasE)) + kIncrHasS));
if ((state & (kWaitingE | kWaitingU | kWaitingS)) != 0) { if ((state & (kWaitingE | kWaitingU | kWaitingS)) != 0) {
...@@ -430,28 +490,37 @@ class SharedMutexImpl { ...@@ -430,28 +490,37 @@ class SharedMutexImpl {
void lock_upgrade() { void lock_upgrade() {
WaitForever ctx; WaitForever ctx;
(void)lockUpgradeImpl(ctx); (void)lockUpgradeImpl(ctx);
// For TSAN: treat upgrade locks as equivalent to read locks
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
} }
bool try_lock_upgrade() { bool try_lock_upgrade() {
WaitNever ctx; WaitNever ctx;
return lockUpgradeImpl(ctx); auto result = lockUpgradeImpl(ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
template <class Rep, class Period> template <class Rep, class Period>
bool try_lock_upgrade_for( bool try_lock_upgrade_for(
const std::chrono::duration<Rep, Period>& duration) { const std::chrono::duration<Rep, Period>& duration) {
WaitForDuration<Rep, Period> ctx(duration); WaitForDuration<Rep, Period> ctx(duration);
return lockUpgradeImpl(ctx); auto result = lockUpgradeImpl(ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
template <class Clock, class Duration> template <class Clock, class Duration>
bool try_lock_upgrade_until( bool try_lock_upgrade_until(
const std::chrono::time_point<Clock, Duration>& absDeadline) { const std::chrono::time_point<Clock, Duration>& absDeadline) {
WaitUntilDeadline<Clock, Duration> ctx{absDeadline}; WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
return lockUpgradeImpl(ctx); auto result = lockUpgradeImpl(ctx);
annotateTryAcquired(result, FOLLY_ANNOTATE_RWLOCK_RDLOCK);
return result;
} }
void unlock_upgrade() { void unlock_upgrade() {
annotateReleased(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
auto state = (state_ -= kHasU); auto state = (state_ -= kHasU);
assert((state & (kWaitingNotS | kHasSolo)) == 0); assert((state & (kWaitingNotS | kHasSolo)) == 0);
wakeRegisteredWaiters(state, kWaitingE | kWaitingU); wakeRegisteredWaiters(state, kWaitingE | kWaitingU);
...@@ -461,9 +530,13 @@ class SharedMutexImpl { ...@@ -461,9 +530,13 @@ class SharedMutexImpl {
// no waiting necessary, so waitMask is empty // no waiting necessary, so waitMask is empty
WaitForever ctx; WaitForever ctx;
(void)lockExclusiveImpl(0, ctx); (void)lockExclusiveImpl(0, ctx);
annotateReleased(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_WRLOCK);
} }
void unlock_upgrade_and_lock_shared() { void unlock_upgrade_and_lock_shared() {
// No need to annotate for TSAN here because we model upgrade and shared
// locks as the same.
auto state = (state_ -= kHasU - kIncrHasS); auto state = (state_ -= kHasU - kIncrHasS);
assert((state & (kWaitingNotS | kHasSolo)) == 0); assert((state & (kWaitingNotS | kHasSolo)) == 0);
wakeRegisteredWaiters(state, kWaitingE | kWaitingU); wakeRegisteredWaiters(state, kWaitingE | kWaitingU);
...@@ -475,12 +548,15 @@ class SharedMutexImpl { ...@@ -475,12 +548,15 @@ class SharedMutexImpl {
} }
void unlock_and_lock_upgrade() { void unlock_and_lock_upgrade() {
annotateReleased(FOLLY_ANNOTATE_RWLOCK_WRLOCK);
annotateAcquired(FOLLY_ANNOTATE_RWLOCK_RDLOCK);
// We can't use state_ -=, because we need to clear 2 bits (1 of // We can't use state_ -=, because we need to clear 2 bits (1 of
// which has an uncertain initial state) and set 1 other. We might // which has an uncertain initial state) and set 1 other. We might
// as well clear the relevant wake bits at the same time. // as well clear the relevant wake bits at the same time.
auto state = state_.load(std::memory_order_acquire); auto state = state_.load(std::memory_order_acquire);
while (true) { while (true) {
assert((state & ~(kWaitingAny | kPrevDefer)) == kHasE); assert(
(state & ~(kWaitingAny | kPrevDefer | kAnnotationCreated)) == kHasE);
auto after = auto after =
(state & ~(kWaitingNotS | kWaitingS | kPrevDefer | kHasE)) + kHasU; (state & ~(kWaitingNotS | kWaitingS | kPrevDefer | kHasE)) + kHasU;
if (state_.compare_exchange_strong(state, after)) { if (state_.compare_exchange_strong(state, after)) {
...@@ -593,6 +669,49 @@ class SharedMutexImpl { ...@@ -593,6 +669,49 @@ class SharedMutexImpl {
} }
}; };
void annotateLazyCreate() {
if (AnnotateForThreadSanitizer &&
(state_.load() & kAnnotationCreated) == 0) {
auto guard = detail::sharedMutexAnnotationGuard(this);
// check again
if ((state_.load() & kAnnotationCreated) == 0) {
state_.fetch_or(kAnnotationCreated);
FOLLY_ANNOTATE_BENIGN_RACE_SIZED(&state_, sizeof(state_), "init TSAN");
FOLLY_ANNOTATE_RWLOCK_CREATE(this);
}
}
}
void annotateDestroy() {
if (AnnotateForThreadSanitizer) {
annotateLazyCreate();
FOLLY_ANNOTATE_RWLOCK_DESTROY(this);
}
}
void annotateAcquired(long w) {
if (AnnotateForThreadSanitizer) {
annotateLazyCreate();
FOLLY_ANNOTATE_RWLOCK_ACQUIRED(this, w);
}
}
void annotateTryAcquired(bool result, long w) {
if (AnnotateForThreadSanitizer) {
annotateLazyCreate();
if (result) {
FOLLY_ANNOTATE_RWLOCK_ACQUIRED(this, w);
}
}
}
void annotateReleased(long w) {
if (AnnotateForThreadSanitizer) {
assert((state_.load() & kAnnotationCreated) != 0);
FOLLY_ANNOTATE_RWLOCK_RELEASED(this, w);
}
}
// 32 bits of state // 32 bits of state
Futex state_{}; Futex state_{};
...@@ -604,9 +723,15 @@ class SharedMutexImpl { ...@@ -604,9 +723,15 @@ class SharedMutexImpl {
// the first unlock_shared() is scanning. The former case is cleaned // the first unlock_shared() is scanning. The former case is cleaned
// up before we finish applying the locks. The latter case can persist // up before we finish applying the locks. The latter case can persist
// until destruction, when it is cleaned up. // until destruction, when it is cleaned up.
static constexpr uint32_t kIncrHasS = 1 << 10; static constexpr uint32_t kIncrHasS = 1 << 11;
static constexpr uint32_t kHasS = ~(kIncrHasS - 1); static constexpr uint32_t kHasS = ~(kIncrHasS - 1);
// Set if it a call to FOLLY_ANNOTATE_RWLOCK_CREATE has been
// completed for this instance. That annotation call (and setting
// this bit afterward) must be guarded by one of the mutexes in
// annotationCreationGuards.
static constexpr uint32_t kAnnotationCreated = 1 << 10;
// If false, then there are definitely no deferred read locks for this // If false, then there are definitely no deferred read locks for this
// instance. Cleared after initialization and when exclusively locked. // instance. Cleared after initialization and when exclusively locked.
static constexpr uint32_t kMayDefer = 1 << 9; static constexpr uint32_t kMayDefer = 1 << 9;
...@@ -1386,6 +1511,8 @@ class SharedMutexImpl { ...@@ -1386,6 +1511,8 @@ class SharedMutexImpl {
typedef SharedMutexImpl<true> SharedMutexReadPriority; typedef SharedMutexImpl<true> SharedMutexReadPriority;
typedef SharedMutexImpl<false> SharedMutexWritePriority; typedef SharedMutexImpl<false> SharedMutexWritePriority;
typedef SharedMutexWritePriority SharedMutex; typedef SharedMutexWritePriority SharedMutex;
typedef SharedMutexImpl<false, void, std::atomic, false, false>
SharedMutexSuppressTSAN;
// Prevent the compiler from instantiating these in other translation units. // Prevent the compiler from instantiating these in other translation units.
// They are instantiated once in SharedMutex.cpp // They are instantiated once in SharedMutex.cpp
...@@ -1396,38 +1523,60 @@ template < ...@@ -1396,38 +1523,60 @@ template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_, typename Tag_,
template <typename> class Atom, template <typename> class Atom,
bool BlockImmediately> bool BlockImmediately,
alignas(hardware_destructive_interference_size) bool AnnotateForThreadSanitizer>
typename SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: alignas(hardware_destructive_interference_size) typename SharedMutexImpl<
DeferredReaderSlot ReaderPriority,
SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: Tag_,
deferredReaders[kMaxDeferredReaders * kDeferredSeparationFactor] = {}; Atom,
BlockImmediately,
AnnotateForThreadSanitizer>::DeferredReaderSlot
SharedMutexImpl<
ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer>::deferredReaders
[kMaxDeferredReaders * kDeferredSeparationFactor] = {};
template < template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_, typename Tag_,
template <typename> class Atom, template <typename> class Atom,
bool BlockImmediately> bool BlockImmediately,
FOLLY_SHAREDMUTEX_TLS uint32_t bool AnnotateForThreadSanitizer>
SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: FOLLY_SHAREDMUTEX_TLS uint32_t SharedMutexImpl<
tls_lastTokenlessSlot = 0; ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer>::tls_lastTokenlessSlot = 0;
template < template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_, typename Tag_,
template <typename> class Atom, template <typename> class Atom,
bool BlockImmediately> bool BlockImmediately,
FOLLY_SHAREDMUTEX_TLS uint32_t bool AnnotateForThreadSanitizer>
SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: FOLLY_SHAREDMUTEX_TLS uint32_t SharedMutexImpl<
tls_lastDeferredReaderSlot = 0; ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer>::tls_lastDeferredReaderSlot = 0;
template < template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_, typename Tag_,
template <typename> class Atom, template <typename> class Atom,
bool BlockImmediately> bool BlockImmediately,
bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: bool AnnotateForThreadSanitizer>
tryUnlockTokenlessSharedDeferred() { bool SharedMutexImpl<
ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer>::tryUnlockTokenlessSharedDeferred() {
auto bestSlot = tls_lastTokenlessSlot; auto bestSlot = tls_lastTokenlessSlot;
for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) { for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
auto slotPtr = deferredReader(bestSlot ^ i); auto slotPtr = deferredReader(bestSlot ^ i);
...@@ -1445,9 +1594,15 @@ template < ...@@ -1445,9 +1594,15 @@ template <
bool ReaderPriority, bool ReaderPriority,
typename Tag_, typename Tag_,
template <typename> class Atom, template <typename> class Atom,
bool BlockImmediately> bool BlockImmediately,
bool AnnotateForThreadSanitizer>
template <class WaitContext> template <class WaitContext>
bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: bool SharedMutexImpl<
ReaderPriority,
Tag_,
Atom,
BlockImmediately,
AnnotateForThreadSanitizer>::
lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx) { lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx) {
while (true) { while (true) {
if (UNLIKELY((state & kHasE) != 0) && if (UNLIKELY((state & kHasE) != 0) &&
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <folly/portability/GTest.h> #include <folly/portability/GTest.h>
#include <folly/synchronization/RWSpinLock.h> #include <folly/synchronization/RWSpinLock.h>
#include <folly/test/DeterministicSchedule.h> #include <folly/test/DeterministicSchedule.h>
#include <folly/test/TestUtils.h>
using namespace folly; using namespace folly;
using namespace folly::test; using namespace folly::test;
...@@ -78,6 +79,7 @@ void runBasicTest() { ...@@ -78,6 +79,7 @@ void runBasicTest() {
TEST(SharedMutex, basic) { TEST(SharedMutex, basic) {
runBasicTest<SharedMutexReadPriority>(); runBasicTest<SharedMutexReadPriority>();
runBasicTest<SharedMutexWritePriority>(); runBasicTest<SharedMutexWritePriority>();
runBasicTest<SharedMutexSuppressTSAN>();
} }
template <typename Lock> template <typename Lock>
...@@ -145,6 +147,7 @@ void runBasicHoldersTest() { ...@@ -145,6 +147,7 @@ void runBasicHoldersTest() {
TEST(SharedMutex, basic_holders) { TEST(SharedMutex, basic_holders) {
runBasicHoldersTest<SharedMutexReadPriority>(); runBasicHoldersTest<SharedMutexReadPriority>();
runBasicHoldersTest<SharedMutexWritePriority>(); runBasicHoldersTest<SharedMutexWritePriority>();
runBasicHoldersTest<SharedMutexSuppressTSAN>();
} }
template <typename Lock> template <typename Lock>
...@@ -164,8 +167,12 @@ void runManyReadLocksTestWithTokens() { ...@@ -164,8 +167,12 @@ void runManyReadLocksTestWithTokens() {
} }
TEST(SharedMutex, many_read_locks_with_tokens) { TEST(SharedMutex, many_read_locks_with_tokens) {
// This test fails in an assertion in the TSAN library because there are too
// many mutexes
SKIP_IF(folly::kIsSanitizeThread);
runManyReadLocksTestWithTokens<SharedMutexReadPriority>(); runManyReadLocksTestWithTokens<SharedMutexReadPriority>();
runManyReadLocksTestWithTokens<SharedMutexWritePriority>(); runManyReadLocksTestWithTokens<SharedMutexWritePriority>();
runManyReadLocksTestWithTokens<SharedMutexSuppressTSAN>();
} }
template <typename Lock> template <typename Lock>
...@@ -183,8 +190,12 @@ void runManyReadLocksTestWithoutTokens() { ...@@ -183,8 +190,12 @@ void runManyReadLocksTestWithoutTokens() {
} }
TEST(SharedMutex, many_read_locks_without_tokens) { TEST(SharedMutex, many_read_locks_without_tokens) {
// This test fails in an assertion in the TSAN library because there are too
// many mutexes
SKIP_IF(folly::kIsSanitizeThread);
runManyReadLocksTestWithoutTokens<SharedMutexReadPriority>(); runManyReadLocksTestWithoutTokens<SharedMutexReadPriority>();
runManyReadLocksTestWithoutTokens<SharedMutexWritePriority>(); runManyReadLocksTestWithoutTokens<SharedMutexWritePriority>();
runManyReadLocksTestWithoutTokens<SharedMutexSuppressTSAN>();
} }
template <typename Lock> template <typename Lock>
...@@ -214,6 +225,7 @@ void runTimeoutInPastTest() { ...@@ -214,6 +225,7 @@ void runTimeoutInPastTest() {
TEST(SharedMutex, timeout_in_past) { TEST(SharedMutex, timeout_in_past) {
runTimeoutInPastTest<SharedMutexReadPriority>(); runTimeoutInPastTest<SharedMutexReadPriority>();
runTimeoutInPastTest<SharedMutexWritePriority>(); runTimeoutInPastTest<SharedMutexWritePriority>();
runTimeoutInPastTest<SharedMutexSuppressTSAN>();
} }
template <class Func> template <class Func>
...@@ -300,6 +312,7 @@ void runFailingTryTimeoutTest() { ...@@ -300,6 +312,7 @@ void runFailingTryTimeoutTest() {
TEST(SharedMutex, failing_try_timeout) { TEST(SharedMutex, failing_try_timeout) {
runFailingTryTimeoutTest<SharedMutexReadPriority>(); runFailingTryTimeoutTest<SharedMutexReadPriority>();
runFailingTryTimeoutTest<SharedMutexWritePriority>(); runFailingTryTimeoutTest<SharedMutexWritePriority>();
runFailingTryTimeoutTest<SharedMutexSuppressTSAN>();
} }
template <typename Lock> template <typename Lock>
...@@ -336,6 +349,7 @@ void runBasicUpgradeTest() { ...@@ -336,6 +349,7 @@ void runBasicUpgradeTest() {
TEST(SharedMutex, basic_upgrade_tests) { TEST(SharedMutex, basic_upgrade_tests) {
runBasicUpgradeTest<SharedMutexReadPriority>(); runBasicUpgradeTest<SharedMutexReadPriority>();
runBasicUpgradeTest<SharedMutexWritePriority>(); runBasicUpgradeTest<SharedMutexWritePriority>();
runBasicUpgradeTest<SharedMutexSuppressTSAN>();
} }
TEST(SharedMutex, read_has_prio) { TEST(SharedMutex, read_has_prio) {
...@@ -1071,17 +1085,26 @@ TEST(SharedMutex, deterministic_lost_wakeup_write_prio) { ...@@ -1071,17 +1085,26 @@ TEST(SharedMutex, deterministic_lost_wakeup_write_prio) {
} }
} }
// In TSAN, tests run a lot slower. To avoid test timeouts, adjust the number
// of repetitions we need for tests.
static std::size_t adjustReps(std::size_t reps) {
if (folly::kIsSanitizeThread) {
return reps / 10;
}
return reps;
}
TEST(SharedMutex, mixed_mostly_write_read_prio) { TEST(SharedMutex, mixed_mostly_write_read_prio) {
for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) { for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) {
runMixed<atomic, SharedMutexReadPriority, TokenLocker>( runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
50000, 300, 0.9, false); adjustReps(50000), adjustReps(300), 0.9, false);
} }
} }
TEST(SharedMutex, mixed_mostly_write_write_prio) { TEST(SharedMutex, mixed_mostly_write_write_prio) {
for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) { for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) {
runMixed<atomic, SharedMutexWritePriority, TokenLocker>( runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
50000, 300, 0.9, false); adjustReps(50000), adjustReps(300), 0.9, false);
} }
} }
...@@ -1093,6 +1116,8 @@ TEST(SharedMutex, deterministic_all_ops_read_prio) { ...@@ -1093,6 +1116,8 @@ TEST(SharedMutex, deterministic_all_ops_read_prio) {
} }
TEST(SharedMutex, deterministic_all_ops_write_prio) { TEST(SharedMutex, deterministic_all_ops_write_prio) {
// This test fails in TSAN because of noisy lock ordering inversions.
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < 5; ++pass) { for (int pass = 0; pass < 5; ++pass) {
DSched sched(DSched::uniform(pass)); DSched sched(DSched::uniform(pass));
runAllAndValidate<DSharedMutexWritePriority, DeterministicAtomic>(1000, 8); runAllAndValidate<DSharedMutexWritePriority, DeterministicAtomic>(1000, 8);
...@@ -1106,6 +1131,8 @@ TEST(SharedMutex, all_ops_read_prio) { ...@@ -1106,6 +1131,8 @@ TEST(SharedMutex, all_ops_read_prio) {
} }
TEST(SharedMutex, all_ops_write_prio) { TEST(SharedMutex, all_ops_write_prio) {
// This test fails in TSAN because of noisy lock ordering inversions.
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < 5; ++pass) { for (int pass = 0; pass < 5; ++pass) {
runAllAndValidate<SharedMutexWritePriority, atomic>(100000, 32); runAllAndValidate<SharedMutexWritePriority, atomic>(100000, 32);
} }
...@@ -1227,6 +1254,9 @@ static void runRemoteUnlock( ...@@ -1227,6 +1254,9 @@ static void runRemoteUnlock(
} }
TEST(SharedMutex, deterministic_remote_write_prio) { TEST(SharedMutex, deterministic_remote_write_prio) {
// This test fails in an assertion in the TSAN library because there are too
// many mutexes
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < 1; ++pass) { for (int pass = 0; pass < 1; ++pass) {
DSched sched(DSched::uniform(pass)); DSched sched(DSched::uniform(pass));
runRemoteUnlock<DSharedMutexWritePriority, DeterministicAtomic>( runRemoteUnlock<DSharedMutexWritePriority, DeterministicAtomic>(
...@@ -1243,12 +1273,18 @@ TEST(SharedMutex, deterministic_remote_read_prio) { ...@@ -1243,12 +1273,18 @@ TEST(SharedMutex, deterministic_remote_read_prio) {
} }
TEST(SharedMutex, remote_write_prio) { TEST(SharedMutex, remote_write_prio) {
// This test fails in an assertion in the TSAN library because there are too
// many mutexes
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < 10; ++pass) { for (int pass = 0; pass < 10; ++pass) {
runRemoteUnlock<SharedMutexWritePriority, atomic>(100000, 0.1, 0.1, 5, 5); runRemoteUnlock<SharedMutexWritePriority, atomic>(100000, 0.1, 0.1, 5, 5);
} }
} }
TEST(SharedMutex, remote_read_prio) { TEST(SharedMutex, remote_read_prio) {
// This test fails in an assertion in the TSAN library because there are too
// many mutexes
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 100); ++pass) { for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 100); ++pass) {
runRemoteUnlock<SharedMutexReadPriority, atomic>(100000, 0.1, 0.1, 5, 5); runRemoteUnlock<SharedMutexReadPriority, atomic>(100000, 0.1, 0.1, 5, 5);
} }
...@@ -1343,6 +1379,9 @@ static void pthrd_rwlock_ping_pong(size_t n, size_t scale, size_t burnCount) { ...@@ -1343,6 +1379,9 @@ static void pthrd_rwlock_ping_pong(size_t n, size_t scale, size_t burnCount) {
} }
TEST(SharedMutex, deterministic_ping_pong_write_prio) { TEST(SharedMutex, deterministic_ping_pong_write_prio) {
// This test fails in TSAN because some mutexes are lock_shared() in one
// thread and unlock_shared() in a different thread.
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < 1; ++pass) { for (int pass = 0; pass < 1; ++pass) {
DSched sched(DSched::uniform(pass)); DSched sched(DSched::uniform(pass));
runPingPong<DSharedMutexWritePriority, DeterministicAtomic>(500, 0); runPingPong<DSharedMutexWritePriority, DeterministicAtomic>(500, 0);
...@@ -1357,6 +1396,9 @@ TEST(SharedMutex, deterministic_ping_pong_read_prio) { ...@@ -1357,6 +1396,9 @@ TEST(SharedMutex, deterministic_ping_pong_read_prio) {
} }
TEST(SharedMutex, ping_pong_write_prio) { TEST(SharedMutex, ping_pong_write_prio) {
// This test fails in TSAN because some mutexes are lock_shared() in one
// thread and unlock_shared() in a different thread.
SKIP_IF(folly::kIsSanitizeThread);
for (int pass = 0; pass < 1; ++pass) { for (int pass = 0; pass < 1; ++pass) {
runPingPong<SharedMutexWritePriority, atomic>(50000, 0); runPingPong<SharedMutexWritePriority, atomic>(50000, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment