Commit 83bddb4d authored by Aaryaman Sagar's avatar Aaryaman Sagar Committed by Facebook Github Bot

Fix DistributedMutex TSAN failures for rocksdb

Summary:
RocksDB's TSAN version was not able to instrument atomic_fetch_set and
atomic_fetch_reset; and as a result the DistributedMutex tests were failing
under TSAN

Also TSAN seems to have some issue when and exception_ptr is placed into an
aligned_storage_t instance. This verifiably correct program (I think) also fails
with the same TSAN error P78696506. So
disable that test

This is the code with the same false-negative
```
namespace {
class ExceptionWithConstructionTrack : public std::exception {
 public:
  explicit ExceptionWithConstructionTrack(int id)
      : id_{folly::to<std::string>(id)}, constructionTrack_{id} {}

  const char* what() const noexcept override {
    return id_.c_str();
  }

 private:
  std::string id_;
  TestConstruction constructionTrack_;
};

template <typename Storage, typename Atomic>
void transferCurrentException(Storage& storage, Atomic& produced) {
  assert(std::current_exception());
  new (&storage) std::exception_ptr(std::current_exception());
  produced->store(true, std::memory_order_release);
}

void concurrentExceptionPropagationStress(
    int numThreads,
    std::chrono::milliseconds milliseconds) {
  auto&& stop = std::atomic<bool>{false};
  auto&& exceptions = std::vector<std::aligned_storage<48, 8>::type>{};
  auto&& produced = std::vector<std::unique_ptr<std::atomic<bool>>>{};
  auto&& consumed = std::vector<std::unique_ptr<std::atomic<bool>>>{};
  auto&& consumers = std::vector<std::thread>{};
  for (auto i = 0; i < numThreads; ++i) {
    produced.emplace_back(new std::atomic<bool>{false});
    consumed.emplace_back(new std::atomic<bool>{false});
    exceptions.push_back({});
  }

  auto producer = std::thread{[&]() {
    auto counter = std::vector<int>(numThreads, 0);
    for (auto i = 0; true; i = ((i + 1) % numThreads)) {
      try {
        throw ExceptionWithConstructionTrack{counter.at(i)++};
      } catch (...) {
        transferCurrentException(exceptions.at(i), produced.at(i));
      }

      while (!consumed.at(i)->load(std::memory_order_acquire)) {
        if (stop.load(std::memory_order_acquire)) {
          return;
        }
      }

      consumed.at(i)->store(false, std::memory_order_release);
    }
  }};

  for (auto i = 0; i < numThreads; ++i) {
    consumers.emplace_back([&, i]() {
      auto counter = 0;
      while (true) {
        while (!produced.at(i)->load(std::memory_order_acquire)) {
          if (stop.load(std::memory_order_acquire)) {
            return;
          }
        }
        produced.at(i)->store(false, std::memory_order_release);

        try {
          auto storage = &exceptions.at(i);
          auto exc = folly::launder(
            reinterpret_cast<std::exception_ptr*>(storage));
          auto copy = std::move(*exc);
          exc->std::exception_ptr::~exception_ptr();
          std::rethrow_exception(std::move(copy));
        } catch (std::exception& exc) {
          auto value = std::stoi(exc.what());
          EXPECT_EQ(value, counter++);
        }

        consumed.at(i)->store(true, std::memory_order_release);
      }
    });
  }

  std::this_thread::sleep_for(milliseconds);
  stop.store(true);
  producer.join();
  for (auto& thread : consumers) {
    thread.join();
  }
}
} // namespace
```

Reviewed By: yfeldblum

Differential Revision: D16739396

fbshipit-source-id: 0eba191ffc07d2e64a3ea550c5fe9a7d01ce2f18
parent d5c4abb3
......@@ -242,8 +242,9 @@ bool atomic_fetch_set(Atomic& atomic, std::size_t bit, std::memory_order mo) {
static_assert(!std::is_const<Atomic>{}, "");
assert(bit < (sizeof(Integer) * 8));
if (folly::kIsArchAmd64) {
// do the optimized thing on x86 builds
// do the optimized thing on x86 builds. Also, some versions of TSAN do not
// properly instrument the inline assembly, so avoid it when TSAN is enabled
if (folly::kIsArchAmd64 && !folly::kIsSanitizeThread) {
return detail::atomic_fetch_set_x86(atomic, bit, mo);
} else {
// otherwise default to the default implementation using fetch_or()
......@@ -258,8 +259,9 @@ bool atomic_fetch_reset(Atomic& atomic, std::size_t bit, std::memory_order mo) {
static_assert(!std::is_const<Atomic>{}, "");
assert(bit < (sizeof(Integer) * 8));
if (folly::kIsArchAmd64) {
// do the optimized thing on x86 builds
// do the optimized thing on x86 builds. Also, some versions of TSAN do not
// properly instrument the inline assembly, so avoid it when TSAN is enabled
if (folly::kIsArchAmd64 && !folly::kIsSanitizeThread) {
return detail::atomic_fetch_reset_x86(atomic, bit, mo);
} else {
// otherwise default to the default implementation using fetch_and()
......
......@@ -1793,6 +1793,10 @@ template <template <typename> class Atom = std::atomic>
void concurrentExceptionPropagationStress(
int numThreads,
std::chrono::milliseconds t) {
// this test passes normally and under recent or Clang TSAN, but inexplicably
// TSAN-aborts under some older non-Clang TSAN versions
SKIP_IF(folly::kIsSanitizeThread && !folly::kIsClang);
TestConstruction::reset();
auto&& mutex = detail::distributed_mutex::DistributedMutex<Atom>{};
auto&& threads = std::vector<std::thread>{};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment