Commit 0085c239 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Rewrite allocators for the era of std::allocator_traits

Summary:
[Folly] Rewrite allocators for the era of `std::allocator_traits`.

Provide minimal interfaces which comply with all required elements of C++ concept `Allocator`. Change all (*) uses to use `std::allocator_traits` consistently, as is required of all allocator-aware library types.

* Rename `SysAlloc`.
* Replace `StlAllocator` with `CxxAllocatorAdaptor` with stricter semantics.
* `Arena` is no longer a C++ Allocator because it is not freely copyable. Change code which used it as a C++ Allocator to use `CxxAllocatorAdaptor` instead.
* `ThreadCachedArena` likewise.

(*) Hopefully.

Reviewed By: nbronson

Differential Revision: D7208794

fbshipit-source-id: 270588c9c3d817f4abd9fb49eed5eb9f03f96da2
parent 99225754
...@@ -62,22 +62,24 @@ class SkipListNode : private boost::noncopyable { ...@@ -62,22 +62,24 @@ class SkipListNode : private boost::noncopyable {
size_t size = sizeof(SkipListNode) + size_t size = sizeof(SkipListNode) +
height * sizeof(std::atomic<SkipListNode*>); height * sizeof(std::atomic<SkipListNode*>);
auto* node = static_cast<SkipListNode*>(alloc.allocate(size)); auto storage = std::allocator_traits<NodeAlloc>::allocate(alloc, size);
// do placement new // do placement new
new (node) SkipListNode(uint8_t(height), std::forward<U>(data), isHead); return new (storage)
return node; SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
} }
template <typename NodeAlloc> template <typename NodeAlloc>
static void destroy(NodeAlloc& alloc, SkipListNode* node) { static void destroy(NodeAlloc& alloc, SkipListNode* node) {
size_t size = sizeof(SkipListNode) +
node->height_ * sizeof(std::atomic<SkipListNode*>);
node->~SkipListNode(); node->~SkipListNode();
alloc.deallocate(node); std::allocator_traits<NodeAlloc>::deallocate(alloc, node, size);
} }
template <typename NodeAlloc> template <typename NodeAlloc>
struct DestroyIsNoOp : std::integral_constant<bool, struct DestroyIsNoOp : StrictConjunction<
IsArenaAllocator<NodeAlloc>::value && AllocatorHasTrivialDeallocate<NodeAlloc>,
boost::has_trivial_destructor<SkipListNode>::value> { }; boost::has_trivial_destructor<SkipListNode>> {};
// copy the head node to a new head node assuming lock acquired // copy the head node to a new head node assuming lock acquired
SkipListNode* copyHead(SkipListNode* node) { SkipListNode* copyHead(SkipListNode* node) {
......
...@@ -138,9 +138,9 @@ namespace folly { ...@@ -138,9 +138,9 @@ namespace folly {
template < template <
typename T, typename T,
typename Comp = std::less<T>, typename Comp = std::less<T>,
// All nodes are allocated using provided SimpleAllocator, // All nodes are allocated using provided SysAllocator,
// it should be thread-safe. // it should be thread-safe.
typename NodeAlloc = SysAlloc, typename NodeAlloc = SysAllocator<void>,
int MAX_HEIGHT = 24> int MAX_HEIGHT = 24>
class ConcurrentSkipList { class ConcurrentSkipList {
// MAX_HEIGHT needs to be at least 2 to suppress compiler // MAX_HEIGHT needs to be at least 2 to suppress compiler
......
This diff is collapsed.
...@@ -405,7 +405,7 @@ class SimpleAllocator { ...@@ -405,7 +405,7 @@ class SimpleAllocator {
* Note that allocation and deallocation takes a per-sizeclass lock. * Note that allocation and deallocation takes a per-sizeclass lock.
*/ */
template <size_t Stripes> template <size_t Stripes>
class CoreAllocator { class CoreRawAllocator {
public: public:
class Allocator { class Allocator {
static constexpr size_t AllocSize{4096}; static constexpr size_t AllocSize{4096};
...@@ -443,7 +443,7 @@ class CoreAllocator { ...@@ -443,7 +443,7 @@ class CoreAllocator {
} }
return allocators_[cl].allocate(); return allocators_[cl].allocate();
} }
void deallocate(void* mem) { void deallocate(void* mem, size_t = 0) {
if (!mem) { if (!mem) {
return; return;
} }
...@@ -469,19 +469,14 @@ class CoreAllocator { ...@@ -469,19 +469,14 @@ class CoreAllocator {
Allocator allocators_[Stripes]; Allocator allocators_[Stripes];
}; };
template <size_t Stripes> template <typename T, size_t Stripes>
typename CoreAllocator<Stripes>::Allocator* getCoreAllocator(size_t stripe) { CxxAllocatorAdaptor<T, typename CoreRawAllocator<Stripes>::Allocator>
getCoreAllocator(size_t stripe) {
// We cannot make sure that the allocator will be destroyed after // We cannot make sure that the allocator will be destroyed after
// all the objects allocated with it, so we leak it. // all the objects allocated with it, so we leak it.
static Indestructible<CoreAllocator<Stripes>> allocator; static Indestructible<CoreRawAllocator<Stripes>> allocator;
return allocator->get(stripe); return CxxAllocatorAdaptor<T, typename CoreRawAllocator<Stripes>::Allocator>(
} *allocator->get(stripe));
template <typename T, size_t Stripes>
StlAllocator<typename CoreAllocator<Stripes>::Allocator, T> getCoreAllocatorStl(
size_t stripe) {
auto alloc = getCoreAllocator<Stripes>(stripe);
return StlAllocator<typename CoreAllocator<Stripes>::Allocator, T>(alloc);
} }
} // namespace folly } // namespace folly
...@@ -44,11 +44,11 @@ class CoreCachedSharedPtr { ...@@ -44,11 +44,11 @@ class CoreCachedSharedPtr {
} }
void reset(const std::shared_ptr<T>& p = nullptr) { void reset(const std::shared_ptr<T>& p = nullptr) {
// Allocate each Holder in a different CoreAllocator stripe to // Allocate each Holder in a different CoreRawAllocator stripe to
// prevent false sharing. Their control blocks will be adjacent // prevent false sharing. Their control blocks will be adjacent
// thanks to allocate_shared(). // thanks to allocate_shared().
for (auto slot : folly::enumerate(slots_)) { for (auto slot : folly::enumerate(slots_)) {
auto alloc = getCoreAllocatorStl<Holder, kNumSlots>(slot.index); auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
auto holder = std::allocate_shared<Holder>(alloc, p); auto holder = std::allocate_shared<Holder>(alloc, p);
*slot = std::shared_ptr<T>(holder, p.get()); *slot = std::shared_ptr<T>(holder, p.get());
} }
...@@ -114,11 +114,11 @@ class AtomicCoreCachedSharedPtr { ...@@ -114,11 +114,11 @@ class AtomicCoreCachedSharedPtr {
void reset(const std::shared_ptr<T>& p = nullptr) { void reset(const std::shared_ptr<T>& p = nullptr) {
auto newslots = folly::make_unique<Slots>(); auto newslots = folly::make_unique<Slots>();
// Allocate each Holder in a different CoreAllocator stripe to // Allocate each Holder in a different CoreRawAllocator stripe to
// prevent false sharing. Their control blocks will be adjacent // prevent false sharing. Their control blocks will be adjacent
// thanks to allocate_shared(). // thanks to allocate_shared().
for (auto slot : folly::enumerate(newslots->slots_)) { for (auto slot : folly::enumerate(newslots->slots_)) {
auto alloc = getCoreAllocatorStl<Holder, kNumSlots>(slot.index); auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
auto holder = std::allocate_shared<Holder>(alloc, p); auto holder = std::allocate_shared<Holder>(alloc, p);
*slot = std::shared_ptr<T>(holder, p.get()); *slot = std::shared_ptr<T>(holder, p.get());
} }
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <glog/logging.h> #include <glog/logging.h>
#include <memory> #include <memory>
#include <thread> #include <thread>
#include <type_traits>
#include <unordered_map> #include <unordered_map>
using namespace folly; using namespace folly;
...@@ -412,8 +411,8 @@ TEST(AccessSpreader, Wrapping) { ...@@ -412,8 +411,8 @@ TEST(AccessSpreader, Wrapping) {
} }
} }
TEST(CoreAllocator, Basic) { TEST(CoreRawAllocator, Basic) {
CoreAllocator<32> alloc; CoreRawAllocator<32> alloc;
auto a = alloc.get(0); auto a = alloc.get(0);
auto res = a->allocate(8); auto res = a->allocate(8);
memset(res, 0, 8); memset(res, 0, 8);
......
...@@ -151,7 +151,7 @@ void* JemallocNodumpAllocator::alloc( ...@@ -151,7 +151,7 @@ void* JemallocNodumpAllocator::alloc(
#endif // FOLLY_JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED #endif // FOLLY_JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
void JemallocNodumpAllocator::deallocate(void* p) { void JemallocNodumpAllocator::deallocate(void* p, size_t) {
dallocx != nullptr ? dallocx(p, flags_) : free(p); dallocx != nullptr ? dallocx(p, flags_) : free(p);
} }
......
...@@ -80,7 +80,7 @@ class JemallocNodumpAllocator { ...@@ -80,7 +80,7 @@ class JemallocNodumpAllocator {
void* allocate(size_t size); void* allocate(size_t size);
void* reallocate(void* p, size_t size); void* reallocate(void* p, size_t size);
void deallocate(void* p); void deallocate(void* p, size_t = 0);
unsigned getArenaIndex() const { return arena_index_; } unsigned getArenaIndex() const { return arena_index_; }
int getFlags() const { return flags_; } int getFlags() const { return flags_; }
......
...@@ -30,14 +30,14 @@ Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) { ...@@ -30,14 +30,14 @@ Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) {
allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize); allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize);
} }
void* mem = alloc.allocate(allocSize); void* mem = std::allocator_traits<Alloc>::allocate(alloc, allocSize);
return std::make_pair(new (mem) Block(), allocSize - sizeof(Block)); return std::make_pair(new (mem) Block(), allocSize - sizeof(Block));
} }
template <class Alloc> template <class Alloc>
void Arena<Alloc>::Block::deallocate(Alloc& alloc) { void Arena<Alloc>::Block::deallocate(Alloc& alloc) {
this->~Block(); this->~Block();
alloc.deallocate(this); std::allocator_traits<Alloc>::deallocate(alloc, this, 1);
} }
template <class Alloc> template <class Alloc>
......
...@@ -36,16 +36,10 @@ namespace folly { ...@@ -36,16 +36,10 @@ namespace folly {
* Simple arena: allocate memory which gets freed when the arena gets * Simple arena: allocate memory which gets freed when the arena gets
* destroyed. * destroyed.
* *
* The arena itself allocates memory using a custom allocator which provides * The arena itself allocates memory using a custom allocator which conforms
* the following interface (same as required by StlAllocator in StlAllocator.h) * to the C++ concept Allocator.
* *
* void* allocate(size_t size); * http://en.cppreference.com/w/cpp/concept/Allocator
* Allocate a block of size bytes, properly aligned to the maximum
* alignment required on your system; throw std::bad_alloc if the
* allocation can't be satisfied.
*
* void deallocate(void* ptr);
* Deallocate a previously allocated block.
* *
* You may also specialize ArenaAllocatorTraits for your allocator type to * You may also specialize ArenaAllocatorTraits for your allocator type to
* provide: * provide:
...@@ -101,7 +95,7 @@ class Arena { ...@@ -101,7 +95,7 @@ class Arena {
return r; return r;
} }
void deallocate(void* /* p */) { void deallocate(void* /* p */, size_t = 0) {
// Deallocate? Never! // Deallocate? Never!
} }
...@@ -121,13 +115,11 @@ class Arena { ...@@ -121,13 +115,11 @@ class Arena {
return bytesUsed_; return bytesUsed_;
} }
// not copyable // not copyable or movable
Arena(const Arena&) = delete; Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete; Arena& operator=(const Arena&) = delete;
Arena(Arena&&) = delete;
// movable Arena& operator=(Arena&&) = delete;
Arena(Arena&&) = default;
Arena& operator=(Arena&&) = default;
private: private:
struct Block; struct Block;
...@@ -184,7 +176,7 @@ class Arena { ...@@ -184,7 +176,7 @@ class Arena {
void* allocateSlow(size_t size); void* allocateSlow(size_t size);
// Empty member optimization: package Alloc with a non-empty member // Empty member optimization: package Alloc with a non-empty member
// in case Alloc is empty (as it is in the case of SysAlloc). // in case Alloc is empty (as it is in the case of SysAllocator).
struct AllocAndSize : public Alloc { struct AllocAndSize : public Alloc {
explicit AllocAndSize(const Alloc& a, size_t s) explicit AllocAndSize(const Alloc& a, size_t s)
: Alloc(a), minBlockSize(s) { : Alloc(a), minBlockSize(s) {
...@@ -210,7 +202,7 @@ class Arena { ...@@ -210,7 +202,7 @@ class Arena {
}; };
template <class Alloc> template <class Alloc>
struct IsArenaAllocator<Arena<Alloc>> : std::true_type { }; struct AllocatorHasTrivialDeallocate<Arena<Alloc>> : std::true_type {};
/** /**
* By default, don't pad the given size. * By default, don't pad the given size.
...@@ -221,8 +213,8 @@ struct ArenaAllocatorTraits { ...@@ -221,8 +213,8 @@ struct ArenaAllocatorTraits {
}; };
template <> template <>
struct ArenaAllocatorTraits<SysAlloc> { struct ArenaAllocatorTraits<SysAllocator<void>> {
static size_t goodSize(const SysAlloc& /* alloc */, size_t size) { static size_t goodSize(const SysAllocator<void>& /* alloc */, size_t size) {
return goodMallocSize(size); return goodMallocSize(size);
} }
}; };
...@@ -230,17 +222,23 @@ struct ArenaAllocatorTraits<SysAlloc> { ...@@ -230,17 +222,23 @@ struct ArenaAllocatorTraits<SysAlloc> {
/** /**
* Arena that uses the system allocator (malloc / free) * Arena that uses the system allocator (malloc / free)
*/ */
class SysArena : public Arena<SysAlloc> { class SysArena : public Arena<SysAllocator<void>> {
public: public:
explicit SysArena(size_t minBlockSize = kDefaultMinBlockSize, explicit SysArena(
size_t sizeLimit = kNoSizeLimit, size_t minBlockSize = kDefaultMinBlockSize,
size_t maxAlign = kDefaultMaxAlign) size_t sizeLimit = kNoSizeLimit,
: Arena<SysAlloc>(SysAlloc(), minBlockSize, sizeLimit, maxAlign) { size_t maxAlign = kDefaultMaxAlign)
} : Arena<SysAllocator<void>>({}, minBlockSize, sizeLimit, maxAlign) {}
}; };
template <> template <>
struct IsArenaAllocator<SysArena> : std::true_type { }; struct AllocatorHasTrivialDeallocate<SysArena> : std::true_type {};
template <typename T, typename Alloc>
using ArenaAllocator = CxxAllocatorAdaptor<T, Arena<Alloc>>;
template <typename T>
using SysArenaAllocator = ArenaAllocator<T, SysAllocator<void>>;
} // namespace folly } // namespace folly
......
...@@ -51,7 +51,7 @@ class ThreadCachedArena { ...@@ -51,7 +51,7 @@ class ThreadCachedArena {
return arena->allocate(size); return arena->allocate(size);
} }
void deallocate(void* /* p */) { void deallocate(void* /* p */, size_t = 0) {
// Deallocate? Never! // Deallocate? Never!
} }
...@@ -82,6 +82,9 @@ class ThreadCachedArena { ...@@ -82,6 +82,9 @@ class ThreadCachedArena {
}; };
template <> template <>
struct IsArenaAllocator<ThreadCachedArena> : std::true_type { }; struct AllocatorHasTrivialDeallocate<ThreadCachedArena> : std::true_type {};
template <typename T>
using ThreadCachedArenaAllocator = CxxAllocatorAdaptor<T, ThreadCachedArena>;
} // namespace folly } // namespace folly
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
using namespace folly; using namespace folly;
static_assert(IsArenaAllocator<SysArena>::value, ""); static_assert(AllocatorHasTrivialDeallocate<SysArena>::value, "");
TEST(Arena, SizeSanity) { TEST(Arena, SizeSanity) {
std::set<size_t*> allocatedItems; std::set<size_t*> allocatedItems;
...@@ -79,7 +79,7 @@ TEST(Arena, SizeSanity) { ...@@ -79,7 +79,7 @@ TEST(Arena, SizeSanity) {
// Nuke 'em all // Nuke 'em all
for (const auto& item : allocatedItems) { for (const auto& item : allocatedItems) {
arena.deallocate(item); arena.deallocate(item, 0 /* unused */);
} }
//The total size should be the same //The total size should be the same
EXPECT_TRUE(arena.totalSize() >= minimum_size); EXPECT_TRUE(arena.totalSize() >= minimum_size);
...@@ -134,8 +134,8 @@ TEST(Arena, Vector) { ...@@ -134,8 +134,8 @@ TEST(Arena, Vector) {
EXPECT_EQ(arena.totalSize(), sizeof(SysArena)); EXPECT_EQ(arena.totalSize(), sizeof(SysArena));
std::vector<size_t, StlAllocator<SysArena, size_t>> std::vector<size_t, SysArenaAllocator<size_t>> vec{
vec { {}, StlAllocator<SysArena, size_t>(&arena) }; {}, SysArenaAllocator<size_t>(arena)};
for (size_t i = 0; i < 1000; i++) { for (size_t i = 0; i < 1000; i++) {
vec.push_back(i); vec.push_back(i);
...@@ -157,17 +157,6 @@ TEST(Arena, SizeLimit) { ...@@ -157,17 +157,6 @@ TEST(Arena, SizeLimit) {
EXPECT_THROW(arena.allocate(maxSize + 1), std::bad_alloc); EXPECT_THROW(arena.allocate(maxSize + 1), std::bad_alloc);
} }
TEST(Arena, MoveArena) {
SysArena arena(sizeof(size_t) * 2);
arena.allocate(sizeof(size_t));
auto totalSize = arena.totalSize();
auto bytesUsed = arena.bytesUsed();
SysArena moved(std::move(arena));
EXPECT_EQ(totalSize, moved.totalSize());
EXPECT_EQ(bytesUsed, moved.bytesUsed());
}
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true); gflags::ParseCommandLineFlags(&argc, &argv, true);
......
...@@ -154,16 +154,21 @@ TEST(ThreadCachedArena, MultiThreaded) { ...@@ -154,16 +154,21 @@ TEST(ThreadCachedArena, MultiThreaded) {
mainTester.verify(); mainTester.verify();
} }
TEST(ThreadCachedArena, StlAllocator) { TEST(ThreadCachedArena, ThreadCachedArenaAllocator) {
typedef std::unordered_map< using Map = std::unordered_map<
int, int, std::hash<int>, std::equal_to<int>, int,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map; int,
std::hash<int>,
std::equal_to<int>,
ThreadCachedArenaAllocator<std::pair<const int, int>>>;
static const size_t requestedBlockSize = 64; static const size_t requestedBlockSize = 64;
ThreadCachedArena arena(requestedBlockSize); ThreadCachedArena arena(requestedBlockSize);
Map map {0, std::hash<int>(), std::equal_to<int>(), Map map{0,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>(&arena)}; std::hash<int>(),
std::equal_to<int>(),
ThreadCachedArenaAllocator<std::pair<const int, int>>(arena)};
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
map[i] = i; map[i] = i;
...@@ -179,7 +184,7 @@ namespace { ...@@ -179,7 +184,7 @@ namespace {
static const int kNumValues = 10000; static const int kNumValues = 10000;
BENCHMARK(bmUMStandard, iters) { BENCHMARK(bmUMStandard, iters) {
typedef std::unordered_map<int, int> Map; using Map = std::unordered_map<int, int>;
while (iters--) { while (iters--) {
Map map {0}; Map map {0};
...@@ -190,16 +195,20 @@ BENCHMARK(bmUMStandard, iters) { ...@@ -190,16 +195,20 @@ BENCHMARK(bmUMStandard, iters) {
} }
BENCHMARK(bmUMArena, iters) { BENCHMARK(bmUMArena, iters) {
typedef std::unordered_map< using Map = std::unordered_map<
int, int, std::hash<int>, std::equal_to<int>, int,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map; int,
std::hash<int>,
std::equal_to<int>,
ThreadCachedArenaAllocator<std::pair<const int, int>>>;
while (iters--) { while (iters--) {
ThreadCachedArena arena; ThreadCachedArena arena;
Map map {0, std::hash<int>(), std::equal_to<int>(), Map map{0,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>( std::hash<int>(),
&arena)}; std::equal_to<int>(),
ThreadCachedArenaAllocator<std::pair<const int, int>>(arena)};
for (int i = 0; i < kNumValues; i++) { for (int i = 0; i < kNumValues; i++) {
map[i] = i; map[i] = i;
...@@ -210,7 +219,7 @@ BENCHMARK(bmUMArena, iters) { ...@@ -210,7 +219,7 @@ BENCHMARK(bmUMArena, iters) {
BENCHMARK_DRAW_LINE() BENCHMARK_DRAW_LINE()
BENCHMARK(bmMStandard, iters) { BENCHMARK(bmMStandard, iters) {
typedef std::map<int, int> Map; using Map = std::map<int, int>;
while (iters--) { while (iters--) {
Map map; Map map;
...@@ -223,16 +232,17 @@ BENCHMARK(bmMStandard, iters) { ...@@ -223,16 +232,17 @@ BENCHMARK(bmMStandard, iters) {
BENCHMARK_DRAW_LINE() BENCHMARK_DRAW_LINE()
BENCHMARK(bmMArena, iters) { BENCHMARK(bmMArena, iters) {
typedef std::map< using Map = std::map<
int, int, std::less<int>, int,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map; int,
std::less<int>,
ThreadCachedArenaAllocator<std::pair<const int, int>>>;
while (iters--) { while (iters--) {
ThreadCachedArena arena; ThreadCachedArena arena;
Map map {std::less<int>(), Map map{std::less<int>(),
StlAllocator<ThreadCachedArena, std::pair<const int, int>>( ThreadCachedArenaAllocator<std::pair<const int, int>>(arena)};
&arena)};
for (int i = 0; i < kNumValues; i++) { for (int i = 0; i < kNumValues; i++) {
map[i] = i; map[i] = i;
......
...@@ -24,11 +24,6 @@ ...@@ -24,11 +24,6 @@
using namespace folly; using namespace folly;
static_assert(
is_simple_allocator<SysArena, int>::value,
"SysArena should be a simple allocator"
);
struct global_counter { struct global_counter {
global_counter(): count_(0) {} global_counter(): count_(0) {}
...@@ -61,7 +56,7 @@ struct Foo { ...@@ -61,7 +56,7 @@ struct Foo {
template <typename Allocator> template <typename Allocator>
void unique_ptr_test(Allocator& allocator) { void unique_ptr_test(Allocator& allocator) {
typedef typename AllocatorUniquePtr<Foo, Allocator>::type ptr_type; using ptr_type = std::unique_ptr<Foo, allocator_delete<Allocator>>;
global_counter counter; global_counter counter;
EXPECT_EQ(counter.count(), 0); EXPECT_EQ(counter.count(), 0);
...@@ -95,18 +90,13 @@ void unique_ptr_test(Allocator& allocator) { ...@@ -95,18 +90,13 @@ void unique_ptr_test(Allocator& allocator) {
} }
EXPECT_EQ(counter.count(), 1); EXPECT_EQ(counter.count(), 1);
StlAllocator<Allocator, Foo>().destroy(foo); std::allocator_traits<Allocator>::destroy(allocator, foo);
EXPECT_EQ(counter.count(), 0); EXPECT_EQ(counter.count(), 0);
} }
TEST(ArenaSmartPtr, unique_ptr_SysArena) { TEST(ArenaSmartPtr, unique_ptr_SysArena) {
SysArena arena; SysArena arena;
unique_ptr_test(arena); SysArenaAllocator<Foo> alloc(arena);
}
TEST(ArenaSmartPtr, unique_ptr_StlAlloc_SysArena) {
SysArena arena;
StlAllocator<SysArena, Foo> alloc(&arena);
unique_ptr_test(alloc); unique_ptr_test(alloc);
} }
...@@ -122,7 +112,7 @@ void shared_ptr_test(Allocator& allocator) { ...@@ -122,7 +112,7 @@ void shared_ptr_test(Allocator& allocator) {
EXPECT_EQ(foo.use_count(), 0); EXPECT_EQ(foo.use_count(), 0);
{ {
auto p = folly::allocate_shared<Foo>(allocator, counter); auto p = std::allocate_shared<Foo>(allocator, counter);
EXPECT_EQ(counter.count(), 1); EXPECT_EQ(counter.count(), 1);
EXPECT_EQ(p.use_count(), 1); EXPECT_EQ(p.use_count(), 1);
...@@ -130,7 +120,7 @@ void shared_ptr_test(Allocator& allocator) { ...@@ -130,7 +120,7 @@ void shared_ptr_test(Allocator& allocator) {
EXPECT_EQ(counter.count(), 0); EXPECT_EQ(counter.count(), 0);
EXPECT_EQ(p.use_count(), 0); EXPECT_EQ(p.use_count(), 0);
p = folly::allocate_shared<Foo>(allocator, counter); p = std::allocate_shared<Foo>(allocator, counter);
EXPECT_EQ(counter.count(), 1); EXPECT_EQ(counter.count(), 1);
EXPECT_EQ(p.use_count(), 1); EXPECT_EQ(p.use_count(), 1);
...@@ -167,12 +157,7 @@ void shared_ptr_test(Allocator& allocator) { ...@@ -167,12 +157,7 @@ void shared_ptr_test(Allocator& allocator) {
TEST(ArenaSmartPtr, shared_ptr_SysArena) { TEST(ArenaSmartPtr, shared_ptr_SysArena) {
SysArena arena; SysArena arena;
shared_ptr_test(arena); SysArenaAllocator<Foo> alloc(arena);
}
TEST(ArenaSmartPtr, shared_ptr_StlAlloc_SysArena) {
SysArena arena;
StlAllocator<SysArena, Foo> alloc(&arena);
shared_ptr_test(alloc); shared_ptr_test(alloc);
} }
......
...@@ -40,29 +40,34 @@ namespace { ...@@ -40,29 +40,34 @@ namespace {
template <typename ParentAlloc> template <typename ParentAlloc>
struct ParanoidArenaAlloc { struct ParanoidArenaAlloc {
explicit ParanoidArenaAlloc(ParentAlloc* arena) : arena_(arena) {} explicit ParanoidArenaAlloc(ParentAlloc& arena) : arena_(arena) {}
ParanoidArenaAlloc(ParanoidArenaAlloc const&) = delete;
ParanoidArenaAlloc(ParanoidArenaAlloc&&) = delete;
ParanoidArenaAlloc& operator=(ParanoidArenaAlloc const&) = delete;
ParanoidArenaAlloc& operator=(ParanoidArenaAlloc&&) = delete;
void* allocate(size_t size) { void* allocate(size_t size) {
void* result = arena_->allocate(size); void* result = arena_.get().allocate(size);
allocated_.insert(result); allocated_.insert(result);
return result; return result;
} }
void deallocate(void* ptr) { void deallocate(void* ptr, size_t n) {
EXPECT_EQ(1, allocated_.erase(ptr)); EXPECT_EQ(1, allocated_.erase(ptr));
arena_->deallocate(ptr); arena_.get().deallocate(ptr, n);
} }
bool isEmpty() const { return allocated_.empty(); } bool isEmpty() const { return allocated_.empty(); }
ParentAlloc* arena_; std::reference_wrapper<ParentAlloc> arena_;
std::set<void*> allocated_; std::set<void*> allocated_;
}; };
} // namespace } // namespace
namespace folly { namespace folly {
template <> template <typename ParentAlloc>
struct IsArenaAllocator<ParanoidArenaAlloc<SysArena>> : std::true_type {}; struct AllocatorHasTrivialDeallocate<ParanoidArenaAlloc<ParentAlloc>>
: AllocatorHasTrivialDeallocate<ParentAlloc> {};
} // namespace folly } // namespace folly
namespace { namespace {
...@@ -472,29 +477,37 @@ void TestNonTrivialDeallocation(SkipListPtrType& list) { ...@@ -472,29 +477,37 @@ void TestNonTrivialDeallocation(SkipListPtrType& list) {
} }
template <typename ParentAlloc> template <typename ParentAlloc>
void NonTrivialDeallocationWithParanoid() { void NonTrivialDeallocationWithParanoid(ParentAlloc& parentAlloc) {
using Alloc = ParanoidArenaAlloc<ParentAlloc>; using ParanoidAlloc = ParanoidArenaAlloc<ParentAlloc>;
using Alloc = CxxAllocatorAdaptor<void, ParanoidAlloc>;
using ParanoidSkipListType = using ParanoidSkipListType =
ConcurrentSkipList<NonTrivialValue, std::less<NonTrivialValue>, Alloc>; ConcurrentSkipList<NonTrivialValue, std::less<NonTrivialValue>, Alloc>;
ParentAlloc parentAlloc; ParanoidAlloc paranoidAlloc(parentAlloc);
Alloc paranoidAlloc(&parentAlloc); Alloc alloc(paranoidAlloc);
auto list = ParanoidSkipListType::createInstance(10, paranoidAlloc); auto list = ParanoidSkipListType::createInstance(10, alloc);
TestNonTrivialDeallocation(list); TestNonTrivialDeallocation(list);
EXPECT_TRUE(paranoidAlloc.isEmpty()); EXPECT_TRUE(paranoidAlloc.isEmpty());
} }
TEST(ConcurrentSkipList, NonTrivialDeallocationWithParanoidSysAlloc) { TEST(ConcurrentSkipList, NonTrivialDeallocationWithParanoidSysAlloc) {
NonTrivialDeallocationWithParanoid<SysAlloc>(); SysAllocator<void> alloc;
NonTrivialDeallocationWithParanoid(alloc);
} }
TEST(ConcurrentSkipList, NonTrivialDeallocationWithParanoidSysArena) { TEST(ConcurrentSkipList, NonTrivialDeallocationWithParanoidSysArena) {
NonTrivialDeallocationWithParanoid<SysArena>(); SysArena arena;
SysArenaAllocator<void> alloc(arena);
NonTrivialDeallocationWithParanoid(alloc);
} }
TEST(ConcurrentSkipList, NonTrivialDeallocationWithSysArena) { TEST(ConcurrentSkipList, NonTrivialDeallocationWithSysArena) {
using SysArenaSkipListType = using SysArenaSkipListType = ConcurrentSkipList<
ConcurrentSkipList<NonTrivialValue, std::less<NonTrivialValue>, SysArena>; NonTrivialValue,
auto list = SysArenaSkipListType::createInstance(10); std::less<NonTrivialValue>,
SysArenaAllocator<void>>;
SysArena arena;
SysArenaAllocator<void> alloc(arena);
auto list = SysArenaSkipListType::createInstance(10, alloc);
TestNonTrivialDeallocation(list); TestNonTrivialDeallocation(list);
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <folly/String.h> #include <folly/String.h>
#include <folly/memory/Arena.h> #include <folly/memory/Arena.h>
#include <folly/portability/GMock.h>
#include <folly/portability/GTest.h> #include <folly/portability/GTest.h>
using namespace folly; using namespace folly;
...@@ -57,70 +58,39 @@ TEST(to_weak_ptr, example) { ...@@ -57,70 +58,39 @@ TEST(to_weak_ptr, example) {
EXPECT_EQ(3, (to_weak_ptr(decltype(s)(s)).lock(), s.use_count())) << "rvalue"; EXPECT_EQ(3, (to_weak_ptr(decltype(s)(s)).lock(), s.use_count())) << "rvalue";
} }
TEST(allocate_sys_buffer, compiles) { TEST(SysAllocator, allocate_unique) {
auto buf = allocate_sys_buffer(256); SysAllocator<float> alloc;
// Freed at the end of the scope. auto ptr = allocate_unique<float>(alloc, 3.);
EXPECT_EQ(3., *ptr);
} }
template <std::size_t> struct T {}; TEST(SysAllocator, example_vector) {
template <std::size_t> struct S {}; SysAllocator<float> alloc;
template <std::size_t> struct P {}; std::vector<float, SysAllocator<float>> nums(alloc);
nums.push_back(3.);
TEST(as_stl_allocator, sanity_check) { nums.push_back(5.);
typedef StlAllocator<SysArena, int> stl_arena_alloc; EXPECT_THAT(nums, testing::ElementsAreArray({3., 5.}));
EXPECT_TRUE((std::is_same<
as_stl_allocator<int, SysArena>::type,
stl_arena_alloc
>::value));
EXPECT_TRUE((std::is_same<
as_stl_allocator<int, stl_arena_alloc>::type,
stl_arena_alloc
>::value));
} }
TEST(StlAllocator, void_allocator) { TEST(AlignedSysAllocator, allocate_unique) {
typedef StlAllocator<SysArena, void> void_allocator; AlignedSysAllocator<float> alloc(1024);
SysArena arena; auto ptr = allocate_unique<float>(alloc, 3.);
void_allocator valloc(&arena); EXPECT_EQ(3., *ptr);
EXPECT_EQ(0, std::uintptr_t(ptr.get()) % 1024);
typedef void_allocator::rebind<int>::other int_allocator; }
int_allocator ialloc(valloc);
auto i = std::allocate_shared<int>(ialloc, 10); TEST(AlignedSysAllocator, example_vector) {
ASSERT_NE(nullptr, i.get()); AlignedSysAllocator<float> alloc(1024);
EXPECT_EQ(10, *i); std::vector<float, AlignedSysAllocator<float>> nums(alloc);
i.reset(); nums.push_back(3.);
ASSERT_EQ(nullptr, i.get()); nums.push_back(5.);
EXPECT_THAT(nums, testing::ElementsAreArray({3., 5.}));
EXPECT_EQ(0, std::uintptr_t(nums.data()) % 1024);
} }
TEST(rebind_allocator, sanity_check) { TEST(allocate_sys_buffer, compiles) {
std::allocator<long> alloc; auto buf = allocate_sys_buffer(256);
// Freed at the end of the scope.
auto i = std::allocate_shared<int>(
rebind_allocator<int, decltype(alloc)>(alloc), 10
);
ASSERT_NE(nullptr, i.get());
EXPECT_EQ(10, *i);
i.reset();
ASSERT_EQ(nullptr, i.get());
auto d = std::allocate_shared<double>(
rebind_allocator<double>(alloc), 5.6
);
ASSERT_NE(nullptr, d.get());
EXPECT_EQ(5.6, *d);
d.reset();
ASSERT_EQ(nullptr, d.get());
auto s = std::allocate_shared<std::string>(
rebind_allocator<std::string>(alloc), "HELLO, WORLD"
);
ASSERT_NE(nullptr, s.get());
EXPECT_EQ("HELLO, WORLD", *s);
s.reset();
ASSERT_EQ(nullptr, s.get());
} }
template <typename C> template <typename C>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment