Commit 7897d0e4 authored by Felix Handte's avatar Felix Handte Committed by Facebook Github Bot

Move the CompressionCoreLocalContextPool to Folly

Summary:
This reduces contention on the synchronized stack in the
`CompressionContextPool`, as well as potentially helping cache locality under
heavy compression workloads.

I have plans to make a singleton of this, and to use it in `folly::io::Codec`
and some other places, so they all use the same contexts.

Reviewed By: bimbashrestha

Differential Revision: D18863824

fbshipit-source-id: 63c2f3dd0abe0aa4c88350767e9e77acb763c9a5
parent 594f4d72
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/compression/CompressionContextPool.h>
#include <folly/concurrency/CacheLocality.h>
namespace folly {
namespace compression {
/**
* This class is intended to reduce contention on reserving a compression
* context and improve cache locality (but maybe not hotness) of the contexts
* it manages.
*
* This class uses folly::AccessSpreader to spread the managed object across
* NumStripes domains (which should correspond to a topologically close set of
* hardware threads). This cache is still backed by the basic locked stack in
* the folly::compression::CompressionContextPool.
*
* Note that there is a tradeoff in choosing the number of stripes. More stripes
* make for less contention, but mean that a context is less likely to be hot
* in cache.
*/
template <typename T, typename Creator, typename Deleter, size_t NumStripes = 8>
class CompressionCoreLocalContextPool {
private:
/**
* Force each pointer to be on a different cache line.
*/
class alignas(folly::hardware_destructive_interference_size) Storage {
public:
Storage() : ptr(nullptr) {}
std::atomic<T*> ptr;
};
class ReturnToPoolDeleter {
public:
using Pool =
CompressionCoreLocalContextPool<T, Creator, Deleter, NumStripes>;
explicit ReturnToPoolDeleter(Pool* pool) : pool_(pool) {
DCHECK(pool_);
}
void operator()(T* ptr) {
pool_->store(ptr);
}
private:
Pool* pool_;
};
using BackingPool = CompressionContextPool<T, Creator, Deleter>;
using BackingPoolRef = typename BackingPool::Ref;
public:
using Object = T;
using Ref = std::unique_ptr<T, ReturnToPoolDeleter>;
explicit CompressionCoreLocalContextPool(
Creator creator = Creator(),
Deleter deleter = Deleter())
: pool_(std::move(creator), std::move(deleter)), caches_() {}
~CompressionCoreLocalContextPool() {
for (auto& cache : caches_) {
// Return all cached contexts back to the backing pool.
auto ptr = cache.ptr.exchange(nullptr);
return_to_backing_pool(ptr);
}
}
Ref get() {
auto ptr = local().ptr.exchange(nullptr);
if (ptr == nullptr) {
// no local ctx, get from backing pool
ptr = pool_.get().release();
DCHECK(ptr);
}
return Ref(ptr, get_deleter());
}
private:
ReturnToPoolDeleter get_deleter() {
return ReturnToPoolDeleter(this);
}
void store(T* ptr) {
DCHECK(ptr);
T* expected = nullptr;
const bool stored = local().ptr.compare_exchange_weak(expected, ptr);
if (!stored) {
return_to_backing_pool(ptr);
}
}
void return_to_backing_pool(T* ptr) {
BackingPoolRef(ptr, pool_.get_deleter());
}
Storage& local() {
const auto idx = folly::AccessSpreader<>::cachedCurrent(NumStripes);
return caches_[idx];
}
BackingPool pool_;
std::array<Storage, NumStripes> caches_{};
};
} // namespace compression
} // namespace folly
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <folly/portability/GTest.h> #include <folly/portability/GTest.h>
#include <folly/compression/CompressionContextPool.h> #include <folly/compression/CompressionContextPool.h>
#include <folly/compression/CompressionCoreLocalContextPool.h>
using namespace testing; using namespace testing;
...@@ -178,5 +179,84 @@ TEST_F(CompressionContextPoolTest, testBadCreate) { ...@@ -178,5 +179,84 @@ TEST_F(CompressionContextPoolTest, testBadCreate) {
BadPool pool; BadPool pool;
EXPECT_THROW(pool.get(), std::bad_alloc); EXPECT_THROW(pool.get(), std::bad_alloc);
} }
class CompressionCoreLocalContextPoolTest : public testing::Test {
protected:
using Pool = CompressionCoreLocalContextPool<Foo, FooCreator, FooDeleter, 8>;
void SetUp() override {
pool_ = std::make_unique<Pool>();
}
void TearDown() override {
pool_.reset();
}
std::unique_ptr<Pool> pool_;
};
TEST_F(CompressionCoreLocalContextPoolTest, testGet) {
auto ptr = pool_->get();
EXPECT_TRUE(ptr);
}
TEST_F(CompressionCoreLocalContextPoolTest, testSame) {
Pool::Object* tmp;
{
auto ptr = pool_->get();
tmp = ptr.get();
}
{
auto ptr = pool_->get();
EXPECT_EQ(tmp, ptr.get());
}
}
TEST_F(CompressionCoreLocalContextPoolTest, testDifferent) {
auto ptr1 = pool_->get();
auto ptr2 = pool_->get();
EXPECT_NE(ptr1.get(), ptr2.get());
}
TEST_F(CompressionCoreLocalContextPoolTest, testSwap) {
auto ptr1 = pool_->get();
auto ptr2 = pool_->get();
EXPECT_NE(ptr1.get(), ptr2.get());
auto tmp1 = ptr1.get();
auto tmp2 = ptr2.get();
ptr2.reset();
ptr1.reset();
ptr2 = pool_->get();
EXPECT_EQ(ptr2.get(), tmp2);
ptr1 = pool_->get();
EXPECT_EQ(ptr1.get(), tmp1);
ptr1.reset();
ptr2.reset();
ptr1 = pool_->get();
EXPECT_EQ(ptr1.get(), tmp1);
ptr2 = pool_->get();
EXPECT_EQ(ptr2.get(), tmp2);
}
TEST_F(CompressionCoreLocalContextPoolTest, testMultithread) {
constexpr size_t numThreads = 64;
constexpr size_t numIters = 1 << 14;
std::vector<std::thread> ts;
for (size_t i = 0; i < numThreads; i++) {
ts.emplace_back([& pool = *pool_]() {
for (size_t n = 0; n < numIters; n++) {
auto ref = pool.get();
CHECK(ref);
ref.reset();
}
});
}
for (auto& t : ts) {
t.join();
}
EXPECT_LE(numFoos.load(), numThreads);
}
} // namespace compression } // namespace compression
} // namespace folly } // namespace folly
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment