Commit 2129f9b5 authored by Max Wang's avatar Max Wang Committed by Jordan DeLong

Parametrize allocator in AtomicHash{Array,Map}

Summary: Maybe at some point somebody won't want malloc, e.g. me.

Test Plan: Ran AtomicHashArrayTest using an mmap allocator.

Reviewed By: delong.j@fb.com

FB internal diff: D960192
parent 4c787a9d
......@@ -24,8 +24,9 @@
namespace folly {
// AtomicHashArray private constructor --
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
KeyT erasedKey, double maxLoadFactor, size_t cacheSize)
: capacity_(capacity), maxEntries_(size_t(maxLoadFactor * capacity_ + 0.5)),
......@@ -41,9 +42,11 @@ AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
* of key and returns true, or if key does not exist returns false and
* ret.index is set to capacity_.
*/
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
typename AtomicHashArray<KeyT, ValueT,
HashFcn, EqualFcn, Allocator>::SimpleRetT
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
findInternal(const KeyT key_in) {
DCHECK_NE(key_in, kEmptyKey_);
DCHECK_NE(key_in, kLockedKey_);
......@@ -77,10 +80,12 @@ findInternal(const KeyT key_in) {
* this will be the previously inserted value, and if the map is full it is
* default.
*/
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
template <class T>
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
typename AtomicHashArray<KeyT, ValueT,
HashFcn, EqualFcn, Allocator>::SimpleRetT
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
insertInternal(KeyT key_in, T&& value) {
const short NO_NEW_INSERTS = 1;
const short NO_PENDING_INSERTS = 2;
......@@ -193,8 +198,9 @@ insertInternal(KeyT key_in, T&& value) {
* erased key will never be reused. If there's an associated value, we won't
* touch it either.
*/
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
erase(KeyT key_in) {
CHECK_NE(key_in, kEmptyKey_);
CHECK_NE(key_in, kLockedKey_);
......@@ -236,13 +242,17 @@ erase(KeyT key_in) {
}
}
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
const typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::Config
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::defaultConfig;
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::SmartPtr
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
const typename AtomicHashArray<KeyT, ValueT,
HashFcn, EqualFcn, Allocator>::Config
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::defaultConfig;
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
typename AtomicHashArray<KeyT, ValueT,
HashFcn, EqualFcn, Allocator>::SmartPtr
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
create(size_t maxSize, const Config& c) {
CHECK_LE(c.maxLoadFactor, 1.0);
CHECK_GT(c.maxLoadFactor, 0.0);
......@@ -250,10 +260,16 @@ create(size_t maxSize, const Config& c) {
size_t capacity = size_t(maxSize / c.maxLoadFactor);
size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity;
std::unique_ptr<void, void(*)(void*)> mem(malloc(sz), free);
new(mem.get()) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
c.maxLoadFactor, c.entryCountThreadCacheSize);
SmartPtr map(static_cast<AtomicHashArray*>(mem.release()));
auto const mem = Allocator().allocate(sz);
try {
new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
c.maxLoadFactor, c.entryCountThreadCacheSize);
} catch (...) {
Allocator().deallocate(mem, sz);
throw;
}
SmartPtr map(static_cast<AtomicHashArray*>((void *)mem));
/*
* Mark all cells as empty.
......@@ -274,22 +290,28 @@ create(size_t maxSize, const Config& c) {
return map;
}
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
destroy(AtomicHashArray* p) {
assert(p);
size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
FOR_EACH_RANGE(i, 0, p->capacity_) {
if (p->cells_[i].first != p->kEmptyKey_) {
p->cells_[i].~value_type();
}
}
p->~AtomicHashArray();
free(p);
Allocator().deallocate((char *)p, sz);
}
// clear -- clears all keys and values in the map and resets all counters
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
clear() {
FOR_EACH_RANGE(i, 0, capacity_) {
if (cells_[i].first != kEmptyKey_) {
......@@ -307,9 +329,10 @@ clear() {
// Iterator implementation
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
template <class ContT, class IterVal>
struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::aha_iterator
struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
: boost::iterator_facade<aha_iterator<ContT,IterVal>,
IterVal,
boost::forward_traversal_tag>
......
......@@ -43,11 +43,15 @@
namespace folly {
template <class KeyT, class ValueT,
class HashFcn = std::hash<KeyT>, class EqualFcn = std::equal_to<KeyT>>
class HashFcn = std::hash<KeyT>,
class EqualFcn = std::equal_to<KeyT>,
class Allocator = std::allocator<char>>
class AtomicHashMap;
template <class KeyT, class ValueT,
class HashFcn = std::hash<KeyT>, class EqualFcn = std::equal_to<KeyT>>
class HashFcn = std::hash<KeyT>,
class EqualFcn = std::equal_to<KeyT>,
class Allocator = std::allocator<char>>
class AtomicHashArray : boost::noncopyable {
static_assert((std::is_convertible<KeyT,int32_t>::value ||
std::is_convertible<KeyT,int64_t>::value ||
......@@ -215,7 +219,7 @@ class AtomicHashArray : boost::noncopyable {
/* Private data and helper functions... */
private:
friend class AtomicHashMap<KeyT,ValueT,HashFcn,EqualFcn>;
friend class AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>;
struct SimpleRetT { size_t idx; bool success;
SimpleRetT(size_t i, bool s) : idx(i), success(s) {}
......
......@@ -22,14 +22,16 @@
namespace folly {
template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
const typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::Config
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::defaultConfig;
template <class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
const typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::Config
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::defaultConfig;
// AtomicHashMap constructor -- Atomic wrapper that allows growth
// This class has a lot of overhead (184 Bytes) so only use for big maps
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
AtomicHashMap(size_t size, const Config& config)
: kGrowthFrac_(config.growthFactor < 0 ?
1.0 - config.maxLoadFactor : config.growthFactor) {
......@@ -44,9 +46,11 @@ AtomicHashMap(size_t size, const Config& config)
}
// insert --
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
std::pair<typename AtomicHashMap<KeyT,ValueT,HashFcn,EqualFcn>::iterator,bool>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
EqualFcn, Allocator>::iterator, bool>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
insert(key_type k, const mapped_type& v) {
SimpleRetT ret = insertInternal(k,v);
SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
......@@ -54,9 +58,11 @@ insert(key_type k, const mapped_type& v) {
ret.success);
}
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
std::pair<typename AtomicHashMap<KeyT,ValueT,HashFcn,EqualFcn>::iterator,bool>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
EqualFcn, Allocator>::iterator, bool>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
insert(key_type k, mapped_type&& v) {
SimpleRetT ret = insertInternal(k, std::move(v));
SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
......@@ -65,10 +71,11 @@ insert(key_type k, mapped_type&& v) {
}
// insertInternal -- Allocates new sub maps as existing ones fill up.
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
template <class T>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
insertInternal(key_type key, T&& value) {
beginInsertInternal:
int nextMapIdx = // this maintains our state
......@@ -142,9 +149,10 @@ insertInternal(key_type key, T&& value) {
}
// find --
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::iterator
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::iterator
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
find(KeyT k) {
SimpleRetT ret = findInternal(k);
if (ret.i >= numMapsAllocated_.load(std::memory_order_acquire)) {
......@@ -154,17 +162,20 @@ find(KeyT k) {
return iterator(this, ret.i, subMap->makeIter(ret.j));
}
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::const_iterator
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
typename AtomicHashMap<KeyT, ValueT,
HashFcn, EqualFcn, Allocator>::const_iterator
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
find(KeyT k) const {
return const_cast<AtomicHashMap*>(this)->find(k);
}
// findInternal --
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
findInternal(const KeyT k) const {
SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
typename SubMap::SimpleRetT ret = primaryMap->findInternal(k);
......@@ -185,9 +196,10 @@ findInternal(const KeyT k) const {
}
// findAtInternal -- see encodeIndex() for details.
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
findAtInternal(uint32_t idx) const {
uint32_t subMapIdx, subMapOffset;
if (idx & kSecondaryMapBit_) {
......@@ -205,9 +217,10 @@ findAtInternal(uint32_t idx) const {
}
// erase --
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::size_type
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::size_type
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
erase(const KeyT k) {
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
......@@ -221,8 +234,9 @@ erase(const KeyT k) {
}
// capacity -- summation of capacities of all submaps
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
capacity() const {
size_t totalCap(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
......@@ -234,8 +248,9 @@ capacity() const {
// spaceRemaining --
// number of new insertions until current submaps are all at max load
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
spaceRemaining() const {
size_t spaceRem(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
......@@ -251,8 +266,9 @@ spaceRemaining() const {
// clear -- Wipes all keys and values from primary map and destroys
// all secondary maps. Not thread safe.
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
clear() {
subMaps_[0].load(std::memory_order_relaxed)->clear();
int const numMaps = numMapsAllocated_
......@@ -267,8 +283,9 @@ clear() {
}
// size --
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
size() const {
size_t totalSize(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
......@@ -296,8 +313,9 @@ size() const {
// 31 1
// 27-30 which subMap
// 0-26 subMap offset (index_ret input)
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
inline uint32_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
inline uint32_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
encodeIndex(uint32_t subMap, uint32_t offset) {
DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
if (subMap == 0) return offset;
......@@ -313,9 +331,10 @@ encodeIndex(uint32_t subMap, uint32_t offset) {
// Iterator implementation
template <typename KeyT, typename ValueT, typename HashFcn, typename EqualFcn>
template <typename KeyT, typename ValueT,
typename HashFcn, typename EqualFcn, typename Allocator>
template<class ContT, class IterVal, class SubIt>
struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn>::ahm_iterator
struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::ahm_iterator
: boost::iterator_facade<ahm_iterator<ContT,IterVal,SubIt>,
IterVal,
boost::forward_traversal_tag>
......
......@@ -135,7 +135,9 @@ namespace folly {
* make_pair everywhere), and providing both can lead to some gross
* template error messages.
*
* - Not Allocator-aware.
* - The Allocator must not be stateful (a new instance will be spun up for
* each allocation), and its allocate() method must take a raw number of
* bytes.
*
* - KeyT must be a 32 bit or 64 bit atomic integer type, and you must
* define special 'locked' and 'empty' key values in the ctor
......@@ -153,9 +155,10 @@ struct AtomicHashMapFullError : std::runtime_error {
{}
};
template<class KeyT, class ValueT, class HashFcn, class EqualFcn>
template<class KeyT, class ValueT,
class HashFcn, class EqualFcn, class Allocator>
class AtomicHashMap : boost::noncopyable {
typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn> SubMap;
typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator> SubMap;
public:
typedef KeyT key_type;
......
......@@ -14,23 +14,86 @@
* limitations under the License.
*/
#include <sys/mman.h>
#include <cstddef>
#include <stdexcept>
#include "folly/AtomicHashArray.h"
#include "folly/Hash.h"
#include "folly/Conv.h"
#include "folly/Memory.h"
#include <gtest/gtest.h>
using namespace std;
using namespace folly;
template <class T>
class MmapAllocator {
public:
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef ptrdiff_t difference_type;
typedef size_t size_type;
T* address(T& x) const {
return std::addressof(x);
}
const T* address(const T& x) const {
return std::addressof(x);
}
size_t max_size() const {
return std::numeric_limits<size_t>::max();
}
template <class U> struct rebind {
typedef MmapAllocator<U> other;
};
bool operator!=(const MmapAllocator<T>& other) const {
return !(*this == other);
}
bool operator==(const MmapAllocator<T>& other) const {
return true;
}
template <class... Args>
void construct(T* p, Args&&... args) {
new (p) T(std::forward<Args>(args)...);
}
void destroy(T* p) {
p->~T();
}
T *allocate(size_t n) {
void *p = mmap(nullptr, n * sizeof(T), PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (!p) throw std::bad_alloc();
return (T *)p;
}
void deallocate(T *p, size_t n) {
munmap(p, n * sizeof(T));
}
};
template<class KeyT, class ValueT>
pair<KeyT,ValueT> createEntry(int i) {
return pair<KeyT,ValueT>(to<KeyT>(folly::hash::jenkins_rev_mix32(i) % 1000),
to<ValueT>(i + 3));
}
template<class KeyT, class ValueT>
template<class KeyT, class ValueT, class Allocator = std::allocator<char>>
void testMap() {
typedef AtomicHashArray<KeyT, ValueT> MyArr;
typedef AtomicHashArray<KeyT, ValueT, std::hash<KeyT>,
std::equal_to<KeyT>, Allocator> MyArr;
auto arr = MyArr::create(150);
map<KeyT, ValueT> ref;
for (int i = 0; i < 100; ++i) {
......@@ -75,9 +138,10 @@ void testMap() {
}
}
template<class KeyT, class ValueT>
template<class KeyT, class ValueT, class Allocator = std::allocator<char>>
void testNoncopyableMap() {
typedef AtomicHashArray<KeyT, std::unique_ptr<ValueT>> MyArr;
typedef AtomicHashArray<KeyT, std::unique_ptr<ValueT>, std::hash<KeyT>,
std::equal_to<KeyT>, Allocator> MyArr;
auto arr = MyArr::create(150);
for (int i = 0; i < 100; i++) {
arr->insert(make_pair(i,std::unique_ptr<ValueT>(new ValueT(i))));
......@@ -90,24 +154,34 @@ void testNoncopyableMap() {
TEST(Aha, InsertErase_i32_i32) {
testMap<int32_t,int32_t>();
testNoncopyableMap<int32_t,int32_t>();
testMap<int32_t, int32_t>();
testMap<int32_t, int32_t, MmapAllocator<char>>();
testNoncopyableMap<int32_t, int32_t>();
testNoncopyableMap<int32_t, int32_t, MmapAllocator<char>>();
}
TEST(Aha, InsertErase_i64_i32) {
testMap<int64_t,int32_t>();
testNoncopyableMap<int64_t,int32_t>();
testMap<int64_t, int32_t>();
testMap<int64_t, int32_t, MmapAllocator<char>>();
testNoncopyableMap<int64_t, int32_t>();
testNoncopyableMap<int64_t, int32_t, MmapAllocator<char>>();
}
TEST(Aha, InsertErase_i64_i64) {
testMap<int64_t,int64_t>();
testNoncopyableMap<int64_t,int64_t>();
testMap<int64_t, int64_t>();
testMap<int64_t, int64_t, MmapAllocator<char>>();
testNoncopyableMap<int64_t, int64_t>();
testNoncopyableMap<int64_t, int64_t, MmapAllocator<char>>();
}
TEST(Aha, InsertErase_i32_i64) {
testMap<int32_t,int64_t>();
testNoncopyableMap<int32_t,int64_t>();
testMap<int32_t, int64_t>();
testMap<int32_t, int64_t, MmapAllocator<char>>();
testNoncopyableMap<int32_t, int64_t>();
testNoncopyableMap<int32_t, int64_t, MmapAllocator<char>>();
}
TEST(Aha, InsertErase_i32_str) {
testMap<int32_t,string>();
testMap<int32_t, string>();
testMap<int32_t, string, MmapAllocator<char>>();
}
TEST(Aha, InsertErase_i64_str) {
testMap<int64_t,string>();
testMap<int64_t, string>();
testMap<int64_t, string, MmapAllocator<char>>();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment