Commit aeaa6cf9 authored by Marc Celani's avatar Marc Celani Committed by Facebook Github Bot

Performance improvements for BufferedStat and DigestBuilder

Summary:
* Relax some loads in BufferedStat
* Cache which cpu local buffer to use, similar to SharedMutex

Reviewed By: djwatson

Differential Revision: D7986764

fbshipit-source-id: 581266abc60e3f23f24181264da163baee9fce3f
parent 7b7397fe
...@@ -36,7 +36,7 @@ BufferedStat<DigestT, ClockT>::BufferedStat( ...@@ -36,7 +36,7 @@ BufferedStat<DigestT, ClockT>::BufferedStat(
template <typename DigestT, typename ClockT> template <typename DigestT, typename ClockT>
void BufferedStat<DigestT, ClockT>::append(double value, TimePoint now) { void BufferedStat<DigestT, ClockT>::append(double value, TimePoint now) {
if (UNLIKELY(now > expiry_.load(std::memory_order_acquire).tp)) { if (UNLIKELY(now > expiry_.load(std::memory_order_relaxed).tp)) {
std::unique_lock<SharedMutex> g(mutex_, std::try_to_lock_t()); std::unique_lock<SharedMutex> g(mutex_, std::try_to_lock_t());
if (g.owns_lock()) { if (g.owns_lock()) {
doUpdate(now, g); doUpdate(now, g);
...@@ -69,10 +69,10 @@ void BufferedStat<DigestT, ClockT>::doUpdate( ...@@ -69,10 +69,10 @@ void BufferedStat<DigestT, ClockT>::doUpdate(
const std::unique_lock<SharedMutex>& g) { const std::unique_lock<SharedMutex>& g) {
DCHECK(g.owns_lock()); DCHECK(g.owns_lock());
// Check that no other thread has performed the slide after the check // Check that no other thread has performed the slide after the check
auto oldExpiry = expiry_.load(std::memory_order_acquire).tp; auto oldExpiry = expiry_.load(std::memory_order_relaxed).tp;
if (now > oldExpiry) { if (now > oldExpiry) {
now = roundUp(now); now = roundUp(now);
expiry_.store(TimePointHolder(now), std::memory_order_release); expiry_.store(TimePointHolder(now), std::memory_order_relaxed);
onNewDigest(digestBuilder_.build(), now, oldExpiry, g); onNewDigest(digestBuilder_.build(), now, oldExpiry, g);
} }
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
namespace folly { namespace folly {
namespace detail { namespace detail {
static FOLLY_TLS uint32_t tls_lastCpuBufferSlot = 0;
template <typename DigestT> template <typename DigestT>
DigestBuilder<DigestT>::DigestBuilder(size_t bufferSize, size_t digestSize) DigestBuilder<DigestT>::DigestBuilder(size_t bufferSize, size_t digestSize)
: bufferSize_(bufferSize), digestSize_(digestSize) { : bufferSize_(bufferSize), digestSize_(digestSize) {
...@@ -71,17 +73,22 @@ DigestT DigestBuilder<DigestT>::build() { ...@@ -71,17 +73,22 @@ DigestT DigestBuilder<DigestT>::build() {
template <typename DigestT> template <typename DigestT>
void DigestBuilder<DigestT>::append(double value) { void DigestBuilder<DigestT>::append(double value) {
auto which = AccessSpreader<>::current(cpuLocalBuffers_.size()); auto& which = tls_lastCpuBufferSlot;
auto& cpuLocalBuf = cpuLocalBuffers_[which]; auto cpuLocalBuf = &cpuLocalBuffers_[which];
SpinLockGuard g(cpuLocalBuf.mutex); std::unique_lock<SpinLock> g(cpuLocalBuf->mutex, std::try_to_lock_t());
cpuLocalBuf.buffer.push_back(value); if (!g.owns_lock()) {
if (cpuLocalBuf.buffer.size() == bufferSize_) { which = AccessSpreader<>::current(cpuLocalBuffers_.size());
std::sort(cpuLocalBuf.buffer.begin(), cpuLocalBuf.buffer.end()); cpuLocalBuf = &cpuLocalBuffers_[which];
if (!cpuLocalBuf.digest) { g = std::unique_lock<SpinLock>(cpuLocalBuf->mutex);
cpuLocalBuf.digest = std::make_unique<DigestT>(digestSize_); }
cpuLocalBuf->buffer.push_back(value);
if (cpuLocalBuf->buffer.size() == bufferSize_) {
std::sort(cpuLocalBuf->buffer.begin(), cpuLocalBuf->buffer.end());
if (!cpuLocalBuf->digest) {
cpuLocalBuf->digest = std::make_unique<DigestT>(digestSize_);
} }
*cpuLocalBuf.digest = cpuLocalBuf.digest->merge(cpuLocalBuf.buffer); *cpuLocalBuf->digest = cpuLocalBuf->digest->merge(cpuLocalBuf->buffer);
cpuLocalBuf.buffer.clear(); cpuLocalBuf->buffer.clear();
} }
} }
......
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <condition_variable> #include <condition_variable>
#include <thread> #include <thread>
#include <boost/thread/barrier.hpp>
#include <folly/Benchmark.h> #include <folly/Benchmark.h>
#include <folly/Range.h> #include <folly/Range.h>
#include <folly/portability/GFlags.h> #include <folly/portability/GFlags.h>
...@@ -46,42 +48,23 @@ unsigned int append(unsigned int iters, size_t bufSize, size_t nThreads) { ...@@ -46,42 +48,23 @@ unsigned int append(unsigned int iters, size_t bufSize, size_t nThreads) {
iters = 1000000; iters = 1000000;
auto buffer = std::make_shared<DigestBuilder<FreeDigest>>(bufSize, 100); auto buffer = std::make_shared<DigestBuilder<FreeDigest>>(bufSize, 100);
std::atomic<size_t> numDone{0}; auto barrier = std::make_shared<boost::barrier>(nThreads + 1);
std::mutex m;
size_t numWaiters = 0;
std::condition_variable cv;
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(nThreads); threads.reserve(nThreads);
BENCHMARK_SUSPEND { BENCHMARK_SUSPEND {
for (size_t i = 0; i < nThreads; ++i) { for (size_t i = 0; i < nThreads; ++i) {
threads.emplace_back([&]() { threads.emplace_back([&]() {
{ barrier->wait();
std::unique_lock<std::mutex> g(m);
++numWaiters;
cv.wait(g);
}
for (size_t iter = 0; iter < iters; ++iter) { for (size_t iter = 0; iter < iters; ++iter) {
buffer->append(iter); buffer->append(iter);
} }
++numDone; barrier->wait();
}); });
} }
while (true) { barrier->wait();
{
std::unique_lock<std::mutex> g(m);
if (numWaiters < nThreads) {
continue;
}
}
cv.notify_all();
break;
}
}
while (numDone < nThreads) {
} }
barrier->wait();
BENCHMARK_SUSPEND { BENCHMARK_SUSPEND {
for (auto& thread : threads) { for (auto& thread : threads) {
...@@ -110,19 +93,19 @@ BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(append, 10000x32, 10000, 32) ...@@ -110,19 +93,19 @@ BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(append, 10000x32, 10000, 32)
* ============================================================================ * ============================================================================
* folly/stats/test/DigestBuilderBenchmark.cpp relative time/iter iters/s * folly/stats/test/DigestBuilderBenchmark.cpp relative time/iter iters/s
* ============================================================================ * ============================================================================
* append(1000x1) 39.55ns 25.28M * append(1000x1) 25.90ns 38.61M
* append(1000x2) 97.52% 40.56ns 24.66M * append(1000x2) 99.27% 26.09ns 38.33M
* append(1000x4) 95.11% 41.59ns 24.05M * append(1000x4) 99.82% 25.95ns 38.54M
* append(1000x8) 92.80% 42.62ns 23.46M * append(1000x8) 98.54% 26.28ns 38.05M
* append(1000x16) 49.93% 79.21ns 12.62M * append(1000x16) 84.07% 30.81ns 32.46M
* append(1000x32) 35.70% 110.78ns 9.03M * append(1000x32) 82.58% 31.36ns 31.88M
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* append(10000x1) 41.01ns 24.38M * append(10000x1) 25.34ns 39.46M
* append(10000x2) 98.91% 41.46ns 24.12M * append(10000x2) 99.75% 25.41ns 39.36M
* append(10000x4) 94.80% 43.26ns 23.12M * append(10000x4) 99.24% 25.54ns 39.16M
* append(10000x8) 92.04% 44.56ns 22.44M * append(10000x8) 106.97% 23.69ns 42.21M
* append(10000x16) 49.19% 83.37ns 12.00M * append(10000x16) 87.82% 28.86ns 34.65M
* append(10000x32) 33.38% 122.84ns 8.14M * append(10000x32) 72.99% 34.72ns 28.80M
* ============================================================================ * ============================================================================
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment