Commit 0085c239 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Rewrite allocators for the era of std::allocator_traits

Summary:
[Folly] Rewrite allocators for the era of `std::allocator_traits`.

Provide minimal interfaces which comply with all required elements of C++ concept `Allocator`. Change all (*) uses to use `std::allocator_traits` consistently, as is required of all allocator-aware library types.

* Rename `SysAlloc`.
* Replace `StlAllocator` with `CxxAllocatorAdaptor` with stricter semantics.
* `Arena` is no longer a C++ Allocator because it is not freely copyable. Change code which used it as a C++ Allocator to use `CxxAllocatorAdaptor` instead.
* `ThreadCachedArena` likewise.

(*) Hopefully.

Reviewed By: nbronson

Differential Revision: D7208794

fbshipit-source-id: 270588c9c3d817f4abd9fb49eed5eb9f03f96da2
parent 99225754
......@@ -62,22 +62,24 @@ class SkipListNode : private boost::noncopyable {
size_t size = sizeof(SkipListNode) +
height * sizeof(std::atomic<SkipListNode*>);
auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
auto storage = std::allocator_traits<NodeAlloc>::allocate(alloc, size);
// do placement new
new (node) SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
return node;
return new (storage)
SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
}
template <typename NodeAlloc>
static void destroy(NodeAlloc& alloc, SkipListNode* node) {
size_t size = sizeof(SkipListNode) +
node->height_ * sizeof(std::atomic<SkipListNode*>);
node->~SkipListNode();
alloc.deallocate(node);
std::allocator_traits<NodeAlloc>::deallocate(alloc, node, size);
}
template <typename NodeAlloc>
struct DestroyIsNoOp : std::integral_constant<bool,
IsArenaAllocator<NodeAlloc>::value &&
boost::has_trivial_destructor<SkipListNode>::value> { };
struct DestroyIsNoOp : StrictConjunction<
AllocatorHasTrivialDeallocate<NodeAlloc>,
boost::has_trivial_destructor<SkipListNode>> {};
// copy the head node to a new head node assuming lock acquired
SkipListNode* copyHead(SkipListNode* node) {
......
......@@ -138,9 +138,9 @@ namespace folly {
template <
typename T,
typename Comp = std::less<T>,
// All nodes are allocated using provided SimpleAllocator,
// All nodes are allocated using provided SysAllocator,
// it should be thread-safe.
typename NodeAlloc = SysAlloc,
typename NodeAlloc = SysAllocator<void>,
int MAX_HEIGHT = 24>
class ConcurrentSkipList {
// MAX_HEIGHT needs to be at least 2 to suppress compiler
......
This diff is collapsed.
......@@ -405,7 +405,7 @@ class SimpleAllocator {
* Note that allocation and deallocation takes a per-sizeclass lock.
*/
template <size_t Stripes>
class CoreAllocator {
class CoreRawAllocator {
public:
class Allocator {
static constexpr size_t AllocSize{4096};
......@@ -443,7 +443,7 @@ class CoreAllocator {
}
return allocators_[cl].allocate();
}
void deallocate(void* mem) {
void deallocate(void* mem, size_t = 0) {
if (!mem) {
return;
}
......@@ -469,19 +469,14 @@ class CoreAllocator {
Allocator allocators_[Stripes];
};
template <size_t Stripes>
typename CoreAllocator<Stripes>::Allocator* getCoreAllocator(size_t stripe) {
template <typename T, size_t Stripes>
CxxAllocatorAdaptor<T, typename CoreRawAllocator<Stripes>::Allocator>
getCoreAllocator(size_t stripe) {
// We cannot make sure that the allocator will be destroyed after
// all the objects allocated with it, so we leak it.
static Indestructible<CoreAllocator<Stripes>> allocator;
return allocator->get(stripe);
}
template <typename T, size_t Stripes>
StlAllocator<typename CoreAllocator<Stripes>::Allocator, T> getCoreAllocatorStl(
size_t stripe) {
auto alloc = getCoreAllocator<Stripes>(stripe);
return StlAllocator<typename CoreAllocator<Stripes>::Allocator, T>(alloc);
static Indestructible<CoreRawAllocator<Stripes>> allocator;
return CxxAllocatorAdaptor<T, typename CoreRawAllocator<Stripes>::Allocator>(
*allocator->get(stripe));
}
} // namespace folly
......@@ -44,11 +44,11 @@ class CoreCachedSharedPtr {
}
void reset(const std::shared_ptr<T>& p = nullptr) {
// Allocate each Holder in a different CoreAllocator stripe to
// Allocate each Holder in a different CoreRawAllocator stripe to
// prevent false sharing. Their control blocks will be adjacent
// thanks to allocate_shared().
for (auto slot : folly::enumerate(slots_)) {
auto alloc = getCoreAllocatorStl<Holder, kNumSlots>(slot.index);
auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
auto holder = std::allocate_shared<Holder>(alloc, p);
*slot = std::shared_ptr<T>(holder, p.get());
}
......@@ -114,11 +114,11 @@ class AtomicCoreCachedSharedPtr {
void reset(const std::shared_ptr<T>& p = nullptr) {
auto newslots = folly::make_unique<Slots>();
// Allocate each Holder in a different CoreAllocator stripe to
// Allocate each Holder in a different CoreRawAllocator stripe to
// prevent false sharing. Their control blocks will be adjacent
// thanks to allocate_shared().
for (auto slot : folly::enumerate(newslots->slots_)) {
auto alloc = getCoreAllocatorStl<Holder, kNumSlots>(slot.index);
auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
auto holder = std::allocate_shared<Holder>(alloc, p);
*slot = std::shared_ptr<T>(holder, p.get());
}
......
......@@ -21,7 +21,6 @@
#include <glog/logging.h>
#include <memory>
#include <thread>
#include <type_traits>
#include <unordered_map>
using namespace folly;
......@@ -412,8 +411,8 @@ TEST(AccessSpreader, Wrapping) {
}
}
TEST(CoreAllocator, Basic) {
CoreAllocator<32> alloc;
TEST(CoreRawAllocator, Basic) {
CoreRawAllocator<32> alloc;
auto a = alloc.get(0);
auto res = a->allocate(8);
memset(res, 0, 8);
......
......@@ -151,7 +151,7 @@ void* JemallocNodumpAllocator::alloc(
#endif // FOLLY_JEMALLOC_NODUMP_ALLOCATOR_SUPPORTED
void JemallocNodumpAllocator::deallocate(void* p) {
void JemallocNodumpAllocator::deallocate(void* p, size_t) {
dallocx != nullptr ? dallocx(p, flags_) : free(p);
}
......
......@@ -80,7 +80,7 @@ class JemallocNodumpAllocator {
void* allocate(size_t size);
void* reallocate(void* p, size_t size);
void deallocate(void* p);
void deallocate(void* p, size_t = 0);
unsigned getArenaIndex() const { return arena_index_; }
int getFlags() const { return flags_; }
......
......@@ -30,14 +30,14 @@ Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) {
allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize);
}
void* mem = alloc.allocate(allocSize);
void* mem = std::allocator_traits<Alloc>::allocate(alloc, allocSize);
return std::make_pair(new (mem) Block(), allocSize - sizeof(Block));
}
template <class Alloc>
void Arena<Alloc>::Block::deallocate(Alloc& alloc) {
this->~Block();
alloc.deallocate(this);
std::allocator_traits<Alloc>::deallocate(alloc, this, 1);
}
template <class Alloc>
......
......@@ -36,16 +36,10 @@ namespace folly {
* Simple arena: allocate memory which gets freed when the arena gets
* destroyed.
*
* The arena itself allocates memory using a custom allocator which provides
* the following interface (same as required by StlAllocator in StlAllocator.h)
* The arena itself allocates memory using a custom allocator which conforms
* to the C++ concept Allocator.
*
* void* allocate(size_t size);
* Allocate a block of size bytes, properly aligned to the maximum
* alignment required on your system; throw std::bad_alloc if the
* allocation can't be satisfied.
*
* void deallocate(void* ptr);
* Deallocate a previously allocated block.
* http://en.cppreference.com/w/cpp/concept/Allocator
*
* You may also specialize ArenaAllocatorTraits for your allocator type to
* provide:
......@@ -101,7 +95,7 @@ class Arena {
return r;
}
void deallocate(void* /* p */) {
void deallocate(void* /* p */, size_t = 0) {
// Deallocate? Never!
}
......@@ -121,13 +115,11 @@ class Arena {
return bytesUsed_;
}
// not copyable
// not copyable or movable
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
// movable
Arena(Arena&&) = default;
Arena& operator=(Arena&&) = default;
Arena(Arena&&) = delete;
Arena& operator=(Arena&&) = delete;
private:
struct Block;
......@@ -184,7 +176,7 @@ class Arena {
void* allocateSlow(size_t size);
// Empty member optimization: package Alloc with a non-empty member
// in case Alloc is empty (as it is in the case of SysAlloc).
// in case Alloc is empty (as it is in the case of SysAllocator).
struct AllocAndSize : public Alloc {
explicit AllocAndSize(const Alloc& a, size_t s)
: Alloc(a), minBlockSize(s) {
......@@ -210,7 +202,7 @@ class Arena {
};
template <class Alloc>
struct IsArenaAllocator<Arena<Alloc>> : std::true_type { };
struct AllocatorHasTrivialDeallocate<Arena<Alloc>> : std::true_type {};
/**
* By default, don't pad the given size.
......@@ -221,8 +213,8 @@ struct ArenaAllocatorTraits {
};
template <>
struct ArenaAllocatorTraits<SysAlloc> {
static size_t goodSize(const SysAlloc& /* alloc */, size_t size) {
struct ArenaAllocatorTraits<SysAllocator<void>> {
static size_t goodSize(const SysAllocator<void>& /* alloc */, size_t size) {
return goodMallocSize(size);
}
};
......@@ -230,17 +222,23 @@ struct ArenaAllocatorTraits<SysAlloc> {
/**
* Arena that uses the system allocator (malloc / free)
*/
class SysArena : public Arena<SysAlloc> {
class SysArena : public Arena<SysAllocator<void>> {
public:
explicit SysArena(size_t minBlockSize = kDefaultMinBlockSize,
size_t sizeLimit = kNoSizeLimit,
size_t maxAlign = kDefaultMaxAlign)
: Arena<SysAlloc>(SysAlloc(), minBlockSize, sizeLimit, maxAlign) {
}
explicit SysArena(
size_t minBlockSize = kDefaultMinBlockSize,
size_t sizeLimit = kNoSizeLimit,
size_t maxAlign = kDefaultMaxAlign)
: Arena<SysAllocator<void>>({}, minBlockSize, sizeLimit, maxAlign) {}
};
template <>
struct IsArenaAllocator<SysArena> : std::true_type { };
struct AllocatorHasTrivialDeallocate<SysArena> : std::true_type {};
template <typename T, typename Alloc>
using ArenaAllocator = CxxAllocatorAdaptor<T, Arena<Alloc>>;
template <typename T>
using SysArenaAllocator = ArenaAllocator<T, SysAllocator<void>>;
} // namespace folly
......
......@@ -51,7 +51,7 @@ class ThreadCachedArena {
return arena->allocate(size);
}
void deallocate(void* /* p */) {
void deallocate(void* /* p */, size_t = 0) {
// Deallocate? Never!
}
......@@ -82,6 +82,9 @@ class ThreadCachedArena {
};
template <>
struct IsArenaAllocator<ThreadCachedArena> : std::true_type { };
struct AllocatorHasTrivialDeallocate<ThreadCachedArena> : std::true_type {};
template <typename T>
using ThreadCachedArenaAllocator = CxxAllocatorAdaptor<T, ThreadCachedArena>;
} // namespace folly
......@@ -25,7 +25,7 @@
using namespace folly;
static_assert(IsArenaAllocator<SysArena>::value, "");
static_assert(AllocatorHasTrivialDeallocate<SysArena>::value, "");
TEST(Arena, SizeSanity) {
std::set<size_t*> allocatedItems;
......@@ -79,7 +79,7 @@ TEST(Arena, SizeSanity) {
// Nuke 'em all
for (const auto& item : allocatedItems) {
arena.deallocate(item);
arena.deallocate(item, 0 /* unused */);
}
//The total size should be the same
EXPECT_TRUE(arena.totalSize() >= minimum_size);
......@@ -134,8 +134,8 @@ TEST(Arena, Vector) {
EXPECT_EQ(arena.totalSize(), sizeof(SysArena));
std::vector<size_t, StlAllocator<SysArena, size_t>>
vec { {}, StlAllocator<SysArena, size_t>(&arena) };
std::vector<size_t, SysArenaAllocator<size_t>> vec{
{}, SysArenaAllocator<size_t>(arena)};
for (size_t i = 0; i < 1000; i++) {
vec.push_back(i);
......@@ -157,17 +157,6 @@ TEST(Arena, SizeLimit) {
EXPECT_THROW(arena.allocate(maxSize + 1), std::bad_alloc);
}
TEST(Arena, MoveArena) {
SysArena arena(sizeof(size_t) * 2);
arena.allocate(sizeof(size_t));
auto totalSize = arena.totalSize();
auto bytesUsed = arena.bytesUsed();
SysArena moved(std::move(arena));
EXPECT_EQ(totalSize, moved.totalSize());
EXPECT_EQ(bytesUsed, moved.bytesUsed());
}
int main(int argc, char *argv[]) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
......
......@@ -154,16 +154,21 @@ TEST(ThreadCachedArena, MultiThreaded) {
mainTester.verify();
}
TEST(ThreadCachedArena, StlAllocator) {
typedef std::unordered_map<
int, int, std::hash<int>, std::equal_to<int>,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
TEST(ThreadCachedArena, ThreadCachedArenaAllocator) {
using Map = std::unordered_map<
int,
int,
std::hash<int>,
std::equal_to<int>,
ThreadCachedArenaAllocator<std::pair<const int, int>>>;
static const size_t requestedBlockSize = 64;
ThreadCachedArena arena(requestedBlockSize);
Map map {0, std::hash<int>(), std::equal_to<int>(),
StlAllocator<ThreadCachedArena, std::pair<const int, int>>(&arena)};
Map map{0,
std::hash<int>(),
std::equal_to<int>(),
ThreadCachedArenaAllocator<std::pair<const int, int>>(arena)};
for (int i = 0; i < 1000; i++) {
map[i] = i;
......@@ -179,7 +184,7 @@ namespace {
static const int kNumValues = 10000;
BENCHMARK(bmUMStandard, iters) {
typedef std::unordered_map<int, int> Map;
using Map = std::unordered_map<int, int>;
while (iters--) {
Map map {0};
......@@ -190,16 +195,20 @@ BENCHMARK(bmUMStandard, iters) {
}
BENCHMARK(bmUMArena, iters) {
typedef std::unordered_map<
int, int, std::hash<int>, std::equal_to<int>,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
using Map = std::unordered_map<
int,
int,
std::hash<int>,
std::equal_to<int>,
ThreadCachedArenaAllocator<std::pair<const int, int>>>;
while (iters--) {
ThreadCachedArena arena;
Map map {0, std::hash<int>(), std::equal_to<int>(),
StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
&arena)};
Map map{0,
std::hash<int>(),
std::equal_to<int>(),
ThreadCachedArenaAllocator<std::pair<const int, int>>(arena)};
for (int i = 0; i < kNumValues; i++) {
map[i] = i;
......@@ -210,7 +219,7 @@ BENCHMARK(bmUMArena, iters) {
BENCHMARK_DRAW_LINE()
BENCHMARK(bmMStandard, iters) {
typedef std::map<int, int> Map;
using Map = std::map<int, int>;
while (iters--) {
Map map;
......@@ -223,16 +232,17 @@ BENCHMARK(bmMStandard, iters) {
BENCHMARK_DRAW_LINE()
BENCHMARK(bmMArena, iters) {
typedef std::map<
int, int, std::less<int>,
StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
using Map = std::map<
int,
int,
std::less<int>,
ThreadCachedArenaAllocator<std::pair<const int, int>>>;
while (iters--) {
ThreadCachedArena arena;
Map map {std::less<int>(),
StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
&arena)};
Map map{std::less<int>(),
ThreadCachedArenaAllocator<std::pair<const int, int>>(arena)};
for (int i = 0; i < kNumValues; i++) {
map[i] = i;
......
......@@ -24,11 +24,6 @@
using namespace folly;
static_assert(
is_simple_allocator<SysArena, int>::value,
"SysArena should be a simple allocator"
);
struct global_counter {
global_counter(): count_(0) {}
......@@ -61,7 +56,7 @@ struct Foo {
template <typename Allocator>
void unique_ptr_test(Allocator& allocator) {
typedef typename AllocatorUniquePtr<Foo, Allocator>::type ptr_type;
using ptr_type = std::unique_ptr<Foo, allocator_delete<Allocator>>;
global_counter counter;
EXPECT_EQ(counter.count(), 0);
......@@ -95,18 +90,13 @@ void unique_ptr_test(Allocator& allocator) {
}
EXPECT_EQ(counter.count(), 1);
StlAllocator<Allocator, Foo>().destroy(foo);
std::allocator_traits<Allocator>::destroy(allocator, foo);
EXPECT_EQ(counter.count(), 0);
}
TEST(ArenaSmartPtr, unique_ptr_SysArena) {
SysArena arena;
unique_ptr_test(arena);
}
TEST(ArenaSmartPtr, unique_ptr_StlAlloc_SysArena) {
SysArena arena;
StlAllocator<SysArena, Foo> alloc(&arena);
SysArenaAllocator<Foo> alloc(arena);
unique_ptr_test(alloc);
}
......@@ -122,7 +112,7 @@ void shared_ptr_test(Allocator& allocator) {
EXPECT_EQ(foo.use_count(), 0);
{
auto p = folly::allocate_shared<Foo>(allocator, counter);
auto p = std::allocate_shared<Foo>(allocator, counter);
EXPECT_EQ(counter.count(), 1);
EXPECT_EQ(p.use_count(), 1);
......@@ -130,7 +120,7 @@ void shared_ptr_test(Allocator& allocator) {
EXPECT_EQ(counter.count(), 0);
EXPECT_EQ(p.use_count(), 0);
p = folly::allocate_shared<Foo>(allocator, counter);
p = std::allocate_shared<Foo>(allocator, counter);
EXPECT_EQ(counter.count(), 1);
EXPECT_EQ(p.use_count(), 1);
......@@ -167,12 +157,7 @@ void shared_ptr_test(Allocator& allocator) {
TEST(ArenaSmartPtr, shared_ptr_SysArena) {
SysArena arena;
shared_ptr_test(arena);
}
TEST(ArenaSmartPtr, shared_ptr_StlAlloc_SysArena) {
SysArena arena;
StlAllocator<SysArena, Foo> alloc(&arena);
SysArenaAllocator<Foo> alloc(arena);
shared_ptr_test(alloc);
}
......
......@@ -40,29 +40,34 @@ namespace {
template <typename ParentAlloc>
struct ParanoidArenaAlloc {
explicit ParanoidArenaAlloc(ParentAlloc* arena) : arena_(arena) {}
explicit ParanoidArenaAlloc(ParentAlloc& arena) : arena_(arena) {}
ParanoidArenaAlloc(ParanoidArenaAlloc const&) = delete;
ParanoidArenaAlloc(ParanoidArenaAlloc&&) = delete;
ParanoidArenaAlloc& operator=(ParanoidArenaAlloc const&) = delete;
ParanoidArenaAlloc& operator=(ParanoidArenaAlloc&&) = delete;
void* allocate(size_t size) {
void* result = arena_->allocate(size);
void* result = arena_.get().allocate(size);
allocated_.insert(result);
return result;
}
void deallocate(void* ptr) {
void deallocate(void* ptr, size_t n) {
EXPECT_EQ(1, allocated_.erase(ptr));
arena_->deallocate(ptr);
arena_.get().deallocate(ptr, n);
}
bool isEmpty() const { return allocated_.empty(); }
ParentAlloc* arena_;
std::reference_wrapper<ParentAlloc> arena_;
std::set<void*> allocated_;
};
} // namespace
namespace folly {
template <>
struct IsArenaAllocator<ParanoidArenaAlloc<SysArena>> : std::true_type {};
template <typename ParentAlloc>
struct AllocatorHasTrivialDeallocate<ParanoidArenaAlloc<ParentAlloc>>
: AllocatorHasTrivialDeallocate<ParentAlloc> {};
} // namespace folly
namespace {
......@@ -472,29 +477,37 @@ void TestNonTrivialDeallocation(SkipListPtrType& list) {
}
template <typename ParentAlloc>
void NonTrivialDeallocationWithParanoid() {
using Alloc = ParanoidArenaAlloc<ParentAlloc>;
void NonTrivialDeallocationWithParanoid(ParentAlloc& parentAlloc) {
using ParanoidAlloc = ParanoidArenaAlloc<ParentAlloc>;
using Alloc = CxxAllocatorAdaptor<void, ParanoidAlloc>;
using ParanoidSkipListType =
ConcurrentSkipList<NonTrivialValue, std::less<NonTrivialValue>, Alloc>;
ParentAlloc parentAlloc;
Alloc paranoidAlloc(&parentAlloc);
auto list = ParanoidSkipListType::createInstance(10, paranoidAlloc);
ParanoidAlloc paranoidAlloc(parentAlloc);
Alloc alloc(paranoidAlloc);
auto list = ParanoidSkipListType::createInstance(10, alloc);
TestNonTrivialDeallocation(list);
EXPECT_TRUE(paranoidAlloc.isEmpty());
}
TEST(ConcurrentSkipList, NonTrivialDeallocationWithParanoidSysAlloc) {
NonTrivialDeallocationWithParanoid<SysAlloc>();
SysAllocator<void> alloc;
NonTrivialDeallocationWithParanoid(alloc);
}
TEST(ConcurrentSkipList, NonTrivialDeallocationWithParanoidSysArena) {
NonTrivialDeallocationWithParanoid<SysArena>();
SysArena arena;
SysArenaAllocator<void> alloc(arena);
NonTrivialDeallocationWithParanoid(alloc);
}
TEST(ConcurrentSkipList, NonTrivialDeallocationWithSysArena) {
using SysArenaSkipListType =
ConcurrentSkipList<NonTrivialValue, std::less<NonTrivialValue>, SysArena>;
auto list = SysArenaSkipListType::createInstance(10);
using SysArenaSkipListType = ConcurrentSkipList<
NonTrivialValue,
std::less<NonTrivialValue>,
SysArenaAllocator<void>>;
SysArena arena;
SysArenaAllocator<void> alloc(arena);
auto list = SysArenaSkipListType::createInstance(10, alloc);
TestNonTrivialDeallocation(list);
}
......
......@@ -23,6 +23,7 @@
#include <folly/String.h>
#include <folly/memory/Arena.h>
#include <folly/portability/GMock.h>
#include <folly/portability/GTest.h>
using namespace folly;
......@@ -57,70 +58,39 @@ TEST(to_weak_ptr, example) {
EXPECT_EQ(3, (to_weak_ptr(decltype(s)(s)).lock(), s.use_count())) << "rvalue";
}
TEST(allocate_sys_buffer, compiles) {
auto buf = allocate_sys_buffer(256);
// Freed at the end of the scope.
TEST(SysAllocator, allocate_unique) {
SysAllocator<float> alloc;
auto ptr = allocate_unique<float>(alloc, 3.);
EXPECT_EQ(3., *ptr);
}
template <std::size_t> struct T {};
template <std::size_t> struct S {};
template <std::size_t> struct P {};
TEST(as_stl_allocator, sanity_check) {
typedef StlAllocator<SysArena, int> stl_arena_alloc;
EXPECT_TRUE((std::is_same<
as_stl_allocator<int, SysArena>::type,
stl_arena_alloc
>::value));
EXPECT_TRUE((std::is_same<
as_stl_allocator<int, stl_arena_alloc>::type,
stl_arena_alloc
>::value));
TEST(SysAllocator, example_vector) {
SysAllocator<float> alloc;
std::vector<float, SysAllocator<float>> nums(alloc);
nums.push_back(3.);
nums.push_back(5.);
EXPECT_THAT(nums, testing::ElementsAreArray({3., 5.}));
}
TEST(StlAllocator, void_allocator) {
typedef StlAllocator<SysArena, void> void_allocator;
SysArena arena;
void_allocator valloc(&arena);
typedef void_allocator::rebind<int>::other int_allocator;
int_allocator ialloc(valloc);
TEST(AlignedSysAllocator, allocate_unique) {
AlignedSysAllocator<float> alloc(1024);
auto ptr = allocate_unique<float>(alloc, 3.);
EXPECT_EQ(3., *ptr);
EXPECT_EQ(0, std::uintptr_t(ptr.get()) % 1024);
}
auto i = std::allocate_shared<int>(ialloc, 10);
ASSERT_NE(nullptr, i.get());
EXPECT_EQ(10, *i);
i.reset();
ASSERT_EQ(nullptr, i.get());
TEST(AlignedSysAllocator, example_vector) {
AlignedSysAllocator<float> alloc(1024);
std::vector<float, AlignedSysAllocator<float>> nums(alloc);
nums.push_back(3.);
nums.push_back(5.);
EXPECT_THAT(nums, testing::ElementsAreArray({3., 5.}));
EXPECT_EQ(0, std::uintptr_t(nums.data()) % 1024);
}
TEST(rebind_allocator, sanity_check) {
std::allocator<long> alloc;
auto i = std::allocate_shared<int>(
rebind_allocator<int, decltype(alloc)>(alloc), 10
);
ASSERT_NE(nullptr, i.get());
EXPECT_EQ(10, *i);
i.reset();
ASSERT_EQ(nullptr, i.get());
auto d = std::allocate_shared<double>(
rebind_allocator<double>(alloc), 5.6
);
ASSERT_NE(nullptr, d.get());
EXPECT_EQ(5.6, *d);
d.reset();
ASSERT_EQ(nullptr, d.get());
auto s = std::allocate_shared<std::string>(
rebind_allocator<std::string>(alloc), "HELLO, WORLD"
);
ASSERT_NE(nullptr, s.get());
EXPECT_EQ("HELLO, WORLD", *s);
s.reset();
ASSERT_EQ(nullptr, s.get());
TEST(allocate_sys_buffer, compiles) {
auto buf = allocate_sys_buffer(256);
// Freed at the end of the scope.
}
template <typename C>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment