Commit 64072ab5 authored by Stepan Palamarchuk's avatar Stepan Palamarchuk Committed by Facebook Github Bot

Improve QueueAppender/IOBufQueue performance

Summary:
Currently QueueAppender needs to follow a chain of 4 indirections (QueueAppender->IOBufQueue->IOBuf(head)->IOBuf(tail)->data).
This diff adds a cache of writable tail range in IOBufQueue and allows it to be placed externally.

Before this diff on hot path QueueAppender::write<signed char> was ~167 bytes of code (with majority being actually executed), after this diff it's down to ~30 bytes:
  0x0000000000419d10 <+0>:     mov    (%rdi),%rax
  0x0000000000419d13 <+3>:     cmp    %rax,0x8(%rdi)
  0x0000000000419d17 <+7>:     je     0x419d28 <folly::io::QueueAppender::write<signed char>(signed char)+24>
  0x0000000000419d19 <+9>:     mov    %sil,(%rax)
  0x0000000000419d1c <+12>:    addq   $0x1,(%rdi)
  0x0000000000419d20 <+16>:    retq
  0x0000000000419d21 <+17>:    nopl   0x0(%rax)
  0x0000000000419d28 <+24>:    movsbl %sil,%esi
  0x0000000000419d2c <+28>:    jmpq   0x419ca0 <folly::io::QueueAppender::writeSlow<signed char>(signed char)>

With this diff, Thrift serialization performance is improved up to 2x with production workloads (2x for compact, 3x for binary).

Thrift benchmark output:
Before:
  ============================================================================
  thrift/lib/cpp2/test/ProtocolBench.cpp          relative  time/iter  iters/s
  ============================================================================
  BinaryProtocol_write_Empty                                  58.05ns   17.23M
  BinaryProtocol_write_SmallInt                               75.17ns   13.30M
  BinaryProtocol_write_BigInt                                 74.60ns   13.41M
  BinaryProtocol_write_SmallString                            85.12ns   11.75M
  BinaryProtocol_write_BigString                             802.96ns    1.25M
  BinaryProtocol_write_BigBinary                             174.69ns    5.72M
  BinaryProtocol_write_LargeBinary                           171.81ns    5.82M
  BinaryProtocol_write_Mixed                                 130.97ns    7.64M
  BinaryProtocol_write_SmallListInt                          123.99ns    8.06M
  BinaryProtocol_write_BigListInt                             40.72us   24.56K
  BinaryProtocol_write_BigListMixed                          784.78us    1.27K
  BinaryProtocol_write_LargeListMixed                         98.84ms    10.12
  CompactProtocol_write_Empty                                 64.38ns   15.53M
  CompactProtocol_write_SmallInt                              76.74ns   13.03M
  CompactProtocol_write_BigInt                                83.62ns   11.96M
  CompactProtocol_write_SmallString                           86.05ns   11.62M
  CompactProtocol_write_BigString                            786.18ns    1.27M
  CompactProtocol_write_BigBinary                            184.91ns    5.41M
  CompactProtocol_write_LargeBinary                          182.12ns    5.49M
  CompactProtocol_write_Mixed                                120.89ns    8.27M
  CompactProtocol_write_SmallListInt                         119.74ns    8.35M
  CompactProtocol_write_BigListInt                            43.76us   22.85K
  CompactProtocol_write_BigListMixed                         595.90us    1.68K
  CompactProtocol_write_LargeListMixed                        72.80ms    13.74
  ============================================================================
After:
  ============================================================================
  thrift/lib/cpp2/test/ProtocolBench.cpp          relative  time/iter  iters/s
  ============================================================================
  BinaryProtocol_write_Empty                                  65.97ns   15.16M
  BinaryProtocol_write_SmallInt                               72.31ns   13.83M
  BinaryProtocol_write_BigInt                                 72.67ns   13.76M
  BinaryProtocol_write_SmallString                            77.56ns   12.89M
  BinaryProtocol_write_BigString                             782.07ns    1.28M
  BinaryProtocol_write_BigBinary                             179.69ns    5.57M
  BinaryProtocol_write_LargeBinary                           182.62ns    5.48M
  BinaryProtocol_write_Mixed                                  91.62ns   10.92M
  BinaryProtocol_write_SmallListInt                           96.22ns   10.39M
  BinaryProtocol_write_BigListInt                             19.65us   50.90K
  BinaryProtocol_write_BigListMixed                          245.69us    4.07K
  BinaryProtocol_write_LargeListMixed                         46.56ms    21.48
  CompactProtocol_write_Empty                                 74.44ns   13.43M
  CompactProtocol_write_SmallInt                              80.35ns   12.45M
  CompactProtocol_write_BigInt                                85.30ns   11.72M
  CompactProtocol_write_SmallString                           82.61ns   12.10M
  CompactProtocol_write_BigString                            784.77ns    1.27M
  CompactProtocol_write_BigBinary                            193.20ns    5.18M
  CompactProtocol_write_LargeBinary                          192.53ns    5.19M
  CompactProtocol_write_Mixed                                 99.78ns   10.02M
  CompactProtocol_write_SmallListInt                         104.77ns    9.54M
  CompactProtocol_write_BigListInt                            25.62us   39.03K
  CompactProtocol_write_BigListMixed                         272.42us    3.67K
  CompactProtocol_write_LargeListMixed                        38.32ms    26.09
  ============================================================================

QueueAppender Benchmark output (although not very representative due to a tight loop):
Before:
  ============================================================================
  folly/io/test/QueueAppenderBenchmark.cpp        relative  time/iter  iters/s
  ============================================================================
  write_uint8                                                 10.50us   95.20K
  write_uint16                                                 5.48us  182.49K
  write_uint32                                                 2.73us  366.22K
  push_64b                                                     9.77us  102.36K
  push_1024b                                                 112.87us    8.86K
  append                                                      64.21us   15.57K
  preallocate_postallocate_1b                                 16.34us   61.19K
  preallocate_postallocate_4b                                 15.56us   64.26K
  preallocate_postallocate_32b                                22.17us   45.11K
  preallocate_postallocate_256b                              149.55us    6.69K
  ============================================================================

After:
  ============================================================================
  folly/io/test/QueueAppenderBenchmark.cpp        relative  time/iter  iters/s
  ============================================================================
  write_uint8                                                  8.86us  112.81K
  write_uint16                                                 3.91us  255.68K
  write_uint32                                                 2.08us  481.78K
  push_64b                                                     8.24us  121.39K
  push_1024b                                                 115.50us    8.66K
  append                                                      67.52us   14.81K
  preallocate_postallocate_1b                                 13.86us   72.17K
  preallocate_postallocate_4b                                 11.67us   85.71K
  preallocate_postallocate_32b                                20.35us   49.14K
  preallocate_postallocate_256b                              148.57us    6.73K
  ============================================================================

Reviewed By: yfeldblum

Differential Revision: D6427749

fbshipit-source-id: 8495cc74b6106b15d201e37533ae4c0a1abc9d74
parent 7acf192d
......@@ -982,34 +982,43 @@ class QueueAppender : public detail::Writable<QueueAppender> {
* space in the queue, we grow no more than growth bytes at once
* (unless you call ensure() with a bigger value yourself).
*/
QueueAppender(IOBufQueue* queue, uint64_t growth) {
reset(queue, growth);
}
QueueAppender(IOBufQueue* queue, uint64_t growth)
: queueCache_(queue), growth_(growth) {}
void reset(IOBufQueue* queue, uint64_t growth) {
queue_ = queue;
queueCache_.reset(queue);
growth_ = growth;
}
uint8_t* writableData() {
return static_cast<uint8_t*>(queue_->writableTail());
return queueCache_.writableData();
}
size_t length() const { return queue_->tailroom(); }
size_t length() {
return queueCache_.length();
}
void append(size_t n) { queue_->postallocate(n); }
void append(size_t n) {
queueCache_.append(n);
}
// Ensure at least n contiguous; can go above growth_, throws if
// not enough room.
void ensure(uint64_t n) { queue_->preallocate(n, growth_); }
void ensure(size_t n) {
if (length() < n) {
ensureSlow(n);
}
}
template <class T>
typename std::enable_if<std::is_arithmetic<T>::value>::type
write(T value) {
typename std::enable_if<std::is_arithmetic<T>::value>::type write(T value) {
// We can't fail.
auto p = queue_->preallocate(sizeof(T), growth_);
storeUnaligned(p.first, value);
queue_->postallocate(sizeof(T));
if (length() >= sizeof(T)) {
storeUnaligned(queueCache_.writableData(), value);
queueCache_.appendUnsafe(sizeof(T));
} else {
writeSlow<T>(value);
}
}
using detail::Writable<QueueAppender>::pushAtMost;
......@@ -1018,27 +1027,25 @@ class QueueAppender : public detail::Writable<QueueAppender> {
const size_t copyLength = std::min(len, length());
if (copyLength != 0) {
memcpy(writableData(), buf, copyLength);
append(copyLength);
queueCache_.appendUnsafe(copyLength);
buf += copyLength;
}
// Allocate more buffers as necessary
size_t remaining = len - copyLength;
// Allocate more buffers as necessary
while (remaining != 0) {
auto p = queue_->preallocate(std::min(remaining, growth_),
growth_,
remaining);
auto p = queueCache_.queue()->preallocate(
std::min(remaining, growth_), growth_, remaining);
memcpy(p.first, buf, p.second);
queue_->postallocate(p.second);
queueCache_.queue()->postallocate(p.second);
buf += p.second;
remaining -= p.second;
}
return len;
}
void insert(std::unique_ptr<folly::IOBuf> buf) {
if (buf) {
queue_->append(std::move(buf), true);
queueCache_.queue()->append(std::move(buf), true);
}
}
......@@ -1047,9 +1054,25 @@ class QueueAppender : public detail::Writable<QueueAppender> {
}
private:
folly::IOBufQueue* queue_;
size_t growth_;
folly::IOBufQueue::WritableRangeCache queueCache_{nullptr};
size_t growth_{0};
FOLLY_NOINLINE void ensureSlow(size_t n) {
queueCache_.queue()->preallocate(n, growth_);
queueCache_.fillCache();
}
template <class T>
typename std::enable_if<std::is_arithmetic<T>::value>::type FOLLY_NOINLINE
writeSlow(T value) {
queueCache_.queue()->preallocate(sizeof(T), growth_);
queueCache_.fillCache();
storeUnaligned(queueCache_.writableData(), value);
queueCache_.appendUnsafe(sizeof(T));
}
};
} // namespace io
} // namespace folly
......
......@@ -67,29 +67,53 @@ appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
namespace folly {
IOBufQueue::IOBufQueue(const Options& options)
: options_(options),
chainLength_(0) {
: options_(options), cachePtr_(&localCache_) {
localCache_.attached = true;
}
IOBufQueue::~IOBufQueue() {
clearWritableRangeCache();
}
IOBufQueue::IOBufQueue(IOBufQueue&& other) noexcept
: options_(other.options_),
chainLength_(other.chainLength_),
head_(std::move(other.head_)) {
: options_(other.options_), cachePtr_(&localCache_) {
other.clearWritableRangeCache();
head_ = std::move(other.head_);
chainLength_ = other.chainLength_;
tailStart_ = other.tailStart_;
localCache_.cachedRange = other.localCache_.cachedRange;
localCache_.attached = true;
other.chainLength_ = 0;
other.tailStart_ = nullptr;
other.localCache_.cachedRange = {nullptr, nullptr};
}
IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
if (&other != this) {
other.clearWritableRangeCache();
clearWritableRangeCache();
options_ = other.options_;
chainLength_ = other.chainLength_;
head_ = std::move(other.head_);
chainLength_ = other.chainLength_;
tailStart_ = other.tailStart_;
localCache_.cachedRange = other.localCache_.cachedRange;
localCache_.attached = true;
other.chainLength_ = 0;
other.tailStart_ = nullptr;
other.localCache_.cachedRange = {nullptr, nullptr};
}
return *this;
}
std::pair<void*, uint64_t>
IOBufQueue::headroom() {
// Note, headroom is independent from the tail, so we don't need to flush the
// cache.
if (head_) {
return std::make_pair(head_->writableBuffer(), head_->headroom());
} else {
......@@ -102,6 +126,8 @@ IOBufQueue::markPrepended(uint64_t n) {
if (n == 0) {
return;
}
// Note, headroom is independent from the tail, so we don't need to flush the
// cache.
assert(head_);
head_->prepend(n);
chainLength_ += n;
......@@ -109,12 +135,14 @@ IOBufQueue::markPrepended(uint64_t n) {
void
IOBufQueue::prepend(const void* buf, uint64_t n) {
auto p = headroom();
if (n > p.second) {
// We're not touching the tail, so we don't need to flush the cache.
auto hroom = head_->headroom();
if (!head_ || hroom < n) {
throw std::overflow_error("Not enough room to prepend");
}
memcpy(static_cast<char*>(p.first) + p.second - n, buf, n);
markPrepended(n);
memcpy(head_->writableBuffer() + hroom - n, buf, n);
head_->prepend(n);
chainLength_ += n;
}
void
......@@ -122,6 +150,7 @@ IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
if (!buf) {
return;
}
auto guard = updateGuard();
if (options_.cacheChainLength) {
chainLength_ += buf->computeChainDataLength();
}
......@@ -133,6 +162,9 @@ IOBufQueue::append(IOBufQueue& other, bool pack) {
if (!other.head_) {
return;
}
// We're going to chain other, thus we need to grab both guards.
auto otherGuard = other.updateGuard();
auto guard = updateGuard();
if (options_.cacheChainLength) {
if (other.options_.cacheChainLength) {
chainLength_ += other.chainLength_;
......@@ -146,6 +178,7 @@ IOBufQueue::append(IOBufQueue& other, bool pack) {
void
IOBufQueue::append(const void* buf, size_t len) {
auto guard = updateGuard();
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
if ((head_ == nullptr) || head_->prev()->isSharedOne() ||
......@@ -179,15 +212,20 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
pair<void*,uint64_t>
IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize,
uint64_t max) {
// Avoid grabbing update guard, since we're manually setting the cache ptrs.
flushCache();
// Allocate a new buffer of the requested max size.
unique_ptr<IOBuf> newBuf(IOBuf::create(std::max(min, newAllocationSize)));
tailStart_ = newBuf->writableTail();
cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>(
tailStart_, tailStart_ + newBuf->tailroom());
appendToChain(head_, std::move(newBuf), false);
IOBuf* last = head_->prev();
return make_pair(last->writableTail(),
std::min(max, last->tailroom()));
return make_pair(writableTail(), std::min<uint64_t>(max, tailroom()));
}
unique_ptr<IOBuf> IOBufQueue::split(size_t n, bool throwOnUnderflow) {
auto guard = updateGuard();
unique_ptr<IOBuf> result;
while (n != 0) {
if (head_ == nullptr) {
......@@ -227,6 +265,7 @@ void IOBufQueue::trimStart(size_t amount) {
}
size_t IOBufQueue::trimStartAtMost(size_t amount) {
auto guard = updateGuard();
auto original = amount;
while (amount > 0) {
if (!head_) {
......@@ -254,6 +293,7 @@ void IOBufQueue::trimEnd(size_t amount) {
}
size_t IOBufQueue::trimEndAtMost(size_t amount) {
auto guard = updateGuard();
auto original = amount;
while (amount > 0) {
if (!head_) {
......@@ -278,6 +318,7 @@ size_t IOBufQueue::trimEndAtMost(size_t amount) {
}
std::unique_ptr<folly::IOBuf> IOBufQueue::pop_front() {
auto guard = updateGuard();
if (!head_) {
return nullptr;
}
......@@ -291,6 +332,7 @@ void IOBufQueue::clear() {
if (!head_) {
return;
}
auto guard = updateGuard();
IOBuf* buf = head_.get();
do {
buf->clear();
......@@ -303,16 +345,25 @@ void IOBufQueue::appendToString(std::string& out) const {
if (!head_) {
return;
}
auto len =
options_.cacheChainLength ? chainLength_ : head_->computeChainDataLength();
auto len = options_.cacheChainLength
? chainLength_ + (cachePtr_->cachedRange.first - tailStart_)
: head_->computeChainDataLength() +
(cachePtr_->cachedRange.first - tailStart_);
out.reserve(out.size() + len);
for (auto range : *head_) {
out.append(reinterpret_cast<const char*>(range.data()), range.size());
}
if (tailStart_ != cachePtr_->cachedRange.first) {
out.append(
reinterpret_cast<const char*>(tailStart_),
cachePtr_->cachedRange.first - tailStart_);
}
}
void IOBufQueue::gather(uint64_t maxLength) {
auto guard = updateGuard();
if (head_ != nullptr) {
head_->gather(maxLength);
}
......
This diff is collapsed.
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/Benchmark.h>
#include <folly/Format.h>
#include <folly/Range.h>
#include <folly/io/Cursor.h>
#include <folly/io/IOBufQueue.h>
DECLARE_bool(benchmark);
using namespace folly::io;
constexpr size_t kBenchmarkSize = 4096;
template <class T>
void runArithmeticBench(int64_t iters) {
while (iters--) {
folly::IOBufQueue queue;
QueueAppender appender(&queue, kBenchmarkSize);
for (size_t i = 0; i < kBenchmarkSize / sizeof(T); ++i) {
appender.write((T)0xFB);
}
folly::doNotOptimizeAway(queue.move());
}
}
BENCHMARK(write_uint8, iters) {
runArithmeticBench<uint8_t>(iters);
}
BENCHMARK(write_uint16, iters) {
runArithmeticBench<uint16_t>(iters);
}
BENCHMARK(write_uint32, iters) {
runArithmeticBench<uint32_t>(iters);
}
void runPushBenchmark(int64_t iters, const std::string& str) {
constexpr size_t kNumPushPerIter = 1024;
while (iters--) {
folly::IOBufQueue queue;
QueueAppender appender(&queue, kBenchmarkSize);
for (size_t i = 0; i < kNumPushPerIter; ++i) {
appender.push(reinterpret_cast<const uint8_t*>(str.data()), str.size());
}
folly::doNotOptimizeAway(queue.move());
}
}
BENCHMARK(push_64b, iters) {
std::string data;
BENCHMARK_SUSPEND {
data = std::string(64, 'f');
}
runPushBenchmark(iters, data);
}
BENCHMARK(push_1024b, iters) {
std::string data;
BENCHMARK_SUSPEND {
data = std::string(1024, 'b');
}
runPushBenchmark(iters, data);
}
BENCHMARK(append, iters) {
constexpr size_t kNumAppendPerIter = 1024;
std::unique_ptr<folly::IOBuf> largeBuffer;
BENCHMARK_SUSPEND {
largeBuffer = folly::IOBuf::create(1024);
largeBuffer->append(1024);
}
while (iters--) {
folly::IOBufQueue queue;
QueueAppender appender(&queue, kBenchmarkSize);
for (size_t i = 0; i < kNumAppendPerIter; ++i) {
appender.insert(largeBuffer->clone());
}
folly::doNotOptimizeAway(queue.move());
}
}
void preallocate_postallocate_bench(int64_t iters, size_t size) {
std::string data;
BENCHMARK_SUSPEND {
data = std::string(size, 'f');
}
while (iters--) {
folly::IOBufQueue queue;
for (size_t i = 0; i < kBenchmarkSize; ++i) {
auto range = queue.preallocate(size, kBenchmarkSize);
memcpy(range.first, data.data(), size);
queue.postallocate(size);
}
folly::doNotOptimizeAway(queue.move());
}
}
BENCHMARK(preallocate_postallocate_1b, iters) {
preallocate_postallocate_bench(iters, 1);
}
BENCHMARK(preallocate_postallocate_4b, iters) {
preallocate_postallocate_bench(iters, 4);
}
BENCHMARK(preallocate_postallocate_32b, iters) {
preallocate_postallocate_bench(iters, 32);
}
BENCHMARK(preallocate_postallocate_256b, iters) {
preallocate_postallocate_bench(iters, 256);
}
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
folly::runBenchmarks();
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment