Commit 8ad6b845 authored by Doron Roberts-Kedes's avatar Doron Roberts-Kedes Committed by Facebook Github Bot

Introduce SIMDTable for ConcurrentHashMapSegment

Summary:
As an alternative to backend to BucketTable, introduce SIMDTable, which mimics the use of SSE intrinsics to filter tags as found in F14 code.

On synthetic benchmarks, SIMDTable outperforms BucketTable by between 1.1x and 2.6x operations per second when the map does not fit in cache. However, when the map fits in cache SIMDTable executes around 10% fewer operations per second compared with BucketTable.

BucketTable, the existing backend, remains the default.

Reviewed By: djwatson

Differential Revision: D14458269

fbshipit-source-id: 5b6b01db5eb2430bdfc6f3500458f25971a6ad3d
parent 1060fb28
......@@ -79,7 +79,16 @@ template <
typename Allocator = std::allocator<uint8_t>,
uint8_t ShardBits = 8,
template <typename> class Atom = std::atomic,
class Mutex = std::mutex>
class Mutex = std::mutex,
template <
typename,
typename,
uint8_t,
typename,
typename,
typename,
template <typename> class,
class> class Impl = detail::concurrenthashmap::bucket::BucketTable>
class ConcurrentHashMap {
using SegmentT = detail::ConcurrentHashMapSegment<
KeyType,
......@@ -89,12 +98,12 @@ class ConcurrentHashMap {
KeyEqual,
Allocator,
Atom,
Mutex>;
Mutex,
Impl>;
float load_factor_ = SegmentT::kDefaultLoadFactor;
static constexpr uint64_t NumShards = (1 << ShardBits);
// Slightly higher than 1.0, in case hashing to shards isn't
// perfectly balanced, reserve(size) will still work without
// rehashing.
float load_factor_ = 1.05;
public:
class ConstIterator;
......
......@@ -15,10 +15,17 @@
*/
#pragma once
#include <folly/container/detail/F14Mask.h>
#include <folly/lang/Launder.h>
#include <folly/synchronization/Hazptr.h>
#include <algorithm>
#include <atomic>
#include <mutex>
#if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
#include <nmmintrin.h>
#endif
namespace folly {
namespace detail {
......@@ -197,6 +204,10 @@ template <
class Mutex = std::mutex>
class alignas(64) BucketTable {
public:
// Slightly higher than 1.0, in case hashing to shards isn't
// perfectly balanced, reserve(size) will still work without
// rehashing.
static constexpr float kDefaultLoadFactor = 1.05;
typedef std::pair<const KeyType, ValueType> value_type;
using Node =
......@@ -245,101 +256,21 @@ class alignas(64) BucketTable {
const KeyType& k,
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch,
Args&&... args) {
auto h = HashFn()(k);
std::unique_lock<Mutex> g(m_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto buckets = buckets_.load(std::memory_order_relaxed);
// Check for rehash needed for DOES_NOT_EXIST
if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) {
if (max_size_ && size_ << 1 > max_size_) {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(bcount << 1, batch);
buckets = buckets_.load(std::memory_order_relaxed);
bcount = bucket_count_.load(std::memory_order_relaxed);
}
auto idx = getIdx(bcount, h);
auto head = &buckets->buckets_[idx]();
auto node = head->load(std::memory_order_relaxed);
auto headnode = node;
auto prev = head;
auto& hazbuckets = it.hazptrs_[0];
auto& haznode = it.hazptrs_[1];
hazbuckets.reset(buckets);
while (node) {
// Is the key found?
if (KeyEqual()(k, node->getItem().first)) {
it.setNode(node, buckets, bcount, idx);
haznode.reset(node);
if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) {
return false;
}
}
if (type == InsertType::DOES_NOT_EXIST) {
return false;
} else {
if (!cur) {
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
}
auto next = node->next_.load(std::memory_order_relaxed);
cur->next_.store(next, std::memory_order_relaxed);
if (next) {
next->acquire_link(); // defined in hazptr_obj_base_linked
}
prev->store(cur, std::memory_order_release);
g.unlock();
// Release not under lock.
node->release();
return true;
}
}
prev = &node->next_;
node = node->next_.load(std::memory_order_relaxed);
}
if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) {
haznode.reset();
hazbuckets.reset();
return false;
}
// Node not found, check for rehash on ANY
if (size_ >= load_factor_nodes_ && type == InsertType::ANY) {
if (max_size_ && size_ << 1 > max_size_) {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(bcount << 1, batch);
// Reload correct bucket.
buckets = buckets_.load(std::memory_order_relaxed);
bcount <<= 1;
hazbuckets.reset(buckets);
idx = getIdx(bcount, h);
head = &buckets->buckets_[idx]();
headnode = head->load(std::memory_order_relaxed);
return doInsert(
it, k, type, match, nullptr, batch, std::forward<Args>(args)...);
}
// We found a slot to put the node.
size_++;
if (!cur) {
// InsertType::ANY
// OR DOES_NOT_EXIST, but only in the try_emplace case
DCHECK(type == InsertType::ANY || type == InsertType::DOES_NOT_EXIST);
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
}
cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release);
it.setNode(cur, buckets, bcount, idx);
return true;
template <typename MatchFunc, typename... Args>
bool insert(
Iterator& it,
const KeyType& k,
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch) {
return doInsert(it, k, type, match, cur, batch, cur);
}
// Must hold lock.
......@@ -675,6 +606,109 @@ class alignas(64) BucketTable {
DCHECK(buckets);
}
template <typename MatchFunc, typename... Args>
bool doInsert(
Iterator& it,
const KeyType& k,
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch,
Args&&... args) {
auto h = HashFn()(k);
std::unique_lock<Mutex> g(m_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto buckets = buckets_.load(std::memory_order_relaxed);
// Check for rehash needed for DOES_NOT_EXIST
if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) {
if (max_size_ && size_ << 1 > max_size_) {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(bcount << 1, batch);
buckets = buckets_.load(std::memory_order_relaxed);
bcount = bucket_count_.load(std::memory_order_relaxed);
}
auto idx = getIdx(bcount, h);
auto head = &buckets->buckets_[idx]();
auto node = head->load(std::memory_order_relaxed);
auto headnode = node;
auto prev = head;
auto& hazbuckets = it.hazptrs_[0];
auto& haznode = it.hazptrs_[1];
hazbuckets.reset(buckets);
while (node) {
// Is the key found?
if (KeyEqual()(k, node->getItem().first)) {
it.setNode(node, buckets, bcount, idx);
haznode.reset(node);
if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) {
return false;
}
}
if (type == InsertType::DOES_NOT_EXIST) {
return false;
} else {
if (!cur) {
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
}
auto next = node->next_.load(std::memory_order_relaxed);
cur->next_.store(next, std::memory_order_relaxed);
if (next) {
next->acquire_link(); // defined in hazptr_obj_base_linked
}
prev->store(cur, std::memory_order_release);
g.unlock();
// Release not under lock.
node->release();
return true;
}
}
prev = &node->next_;
node = node->next_.load(std::memory_order_relaxed);
}
if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) {
haznode.reset();
hazbuckets.reset();
return false;
}
// Node not found, check for rehash on ANY
if (size_ >= load_factor_nodes_ && type == InsertType::ANY) {
if (max_size_ && size_ << 1 > max_size_) {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(bcount << 1, batch);
// Reload correct bucket.
buckets = buckets_.load(std::memory_order_relaxed);
bcount <<= 1;
hazbuckets.reset(buckets);
idx = getIdx(bcount, h);
head = &buckets->buckets_[idx]();
headnode = head->load(std::memory_order_relaxed);
}
// We found a slot to put the node.
size_++;
if (!cur) {
// InsertType::ANY
// OR DOES_NOT_EXIST, but only in the try_emplace case
DCHECK(type == InsertType::ANY || type == InsertType::DOES_NOT_EXIST);
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
}
cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release);
it.setNode(cur, buckets, bcount, idx);
return true;
}
Mutex m_;
float load_factor_;
size_t load_factor_nodes_;
......@@ -689,61 +723,906 @@ class alignas(64) BucketTable {
} // namespace bucket
} // namespace concurrenthashmap
#if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
namespace simd {
using folly::f14::detail::DenseMaskIter;
using folly::f14::detail::FirstEmptyInMask;
using folly::f14::detail::FullMask;
using folly::f14::detail::MaskType;
using folly::f14::detail::SparseMaskIter;
using folly::hazptr_obj_base;
using folly::hazptr_obj_batch;
/* A Segment is a single shard of the ConcurrentHashMap.
* All writes take the lock, while readers are all wait-free.
* Readers always proceed in parallel with the single writer.
*
*
* Possible additional optimizations:
*
* * insert / erase could be lock / wait free. Would need to be
* careful that assign and rehash don't conflict (possibly with
* reader/writer lock, or microlock per node or per bucket, etc).
* Java 8 goes halfway, and does lock per bucket, except for the
* first item, that is inserted with a CAS (which is somewhat
* specific to java having a lock per object)
*
* * I tried using trylock() and find() to warm the cache for insert()
* and erase() similar to Java 7, but didn't have much luck.
*
* * We could order elements using split ordering, for faster rehash,
* and no need to ever copy nodes. Note that a full split ordering
* including dummy nodes increases the memory usage by 2x, but we
* could split the difference and still require a lock to set bucket
* pointers.
*/
template <
typename KeyType,
typename ValueType,
uint8_t ShardBits = 0,
typename HashFn = std::hash<KeyType>,
typename KeyEqual = std::equal_to<KeyType>,
typename Allocator = std::allocator<uint8_t>,
template <typename> class Atom = std::atomic,
class Mutex = std::mutex,
// TODO is there a better way to do this
template <
typename,
typename,
uint8_t,
typename,
typename,
typename,
template <typename> class,
class> class Impl = concurrenthashmap::bucket::BucketTable>
class alignas(64) ConcurrentHashMapSegment {
using ImplT = Impl<
KeyType,
ValueType,
ShardBits,
HashFn,
KeyEqual,
Allocator,
typename Allocator,
template <typename> class Atom = std::atomic>
class NodeT : public hazptr_obj_base<
NodeT<KeyType, ValueType, Allocator, Atom>,
Atom,
Mutex>;
HazptrDeleter<Allocator>> {
public:
typedef std::pair<const KeyType, ValueType> value_type;
template <typename Arg, typename... Args>
NodeT(hazptr_obj_batch<Atom>* batch, Arg&& k, Args&&... args)
: item_(
std::piecewise_construct,
std::forward_as_tuple(std::forward<Arg>(k)),
std::forward_as_tuple(std::forward<Args>(args)...)) {
init(batch);
}
value_type& getItem() {
return item_;
}
private:
void init(hazptr_obj_batch<Atom>* batch) {
DCHECK(batch);
this->set_deleter( // defined in hazptr_obj
HazptrDeleter<Allocator>());
this->set_batch_tag(batch); // defined in hazptr_obj
}
value_type item_;
};
constexpr std::size_t kRequiredVectorAlignment =
constexpr_max(std::size_t{16}, alignof(max_align_t));
template <
typename KeyType,
typename ValueType,
uint8_t ShardBits = 0,
typename HashFn = std::hash<KeyType>,
typename KeyEqual = std::equal_to<KeyType>,
typename Allocator = std::allocator<uint8_t>,
template <typename> class Atom = std::atomic,
class Mutex = std::mutex>
class alignas(64) SIMDTable {
public:
using Node =
concurrenthashmap::simd::NodeT<KeyType, ValueType, Allocator, Atom>;
private:
using HashPair = std::pair<std::size_t, std::size_t>;
struct alignas(kRequiredVectorAlignment) Chunk {
static constexpr unsigned kCapacity = 14;
static constexpr unsigned kDesiredCapacity = 12;
static constexpr MaskType kFullMask = FullMask<kCapacity>::value;
private:
// Non-empty tags have their top bit set.
// tags [0,8)
Atom<uint64_t> tags_low_;
// tags_hi_ holds tags [8,14), hostedOverflowCount and outboundOverflowCount
// hostedOverflowCount: the number of values in this chunk that were placed
// because they overflowed their desired chunk.
// outboundOverflowCount: num values that would have been placed into this
// chunk if there had been space, including values that also overflowed
// previous full chunks. This value saturates; once it becomes 255 it no
// longer increases nor decreases.
// Note: more bits can be used for outboundOverflowCount if this
// optimization becomes useful
Atom<uint64_t> tags_hi_;
std::array<aligned_storage_for_t<Atom<Node*>>, kCapacity> rawItems_;
public:
void clear() {
for (size_t i = 0; i < kCapacity; i++) {
item(i).store(nullptr, std::memory_order_relaxed);
}
tags_low_.store(0, std::memory_order_relaxed);
tags_hi_.store(0, std::memory_order_relaxed);
}
std::size_t tag(std::size_t index) const {
std::size_t off = index % 8;
const Atom<uint64_t>& tag_src = off == index ? tags_low_ : tags_hi_;
uint64_t tags = tag_src.load(std::memory_order_relaxed);
tags >>= (off * 8);
return tags & 0xff;
}
void setTag(std::size_t index, std::size_t tag) {
std::size_t off = index % 8;
Atom<uint64_t>& old_tags = off == index ? tags_low_ : tags_hi_;
uint64_t new_tags = old_tags.load(std::memory_order_relaxed);
uint64_t mask = 0xffUL << (off * 8);
new_tags = (new_tags & ~mask) | (tag << (off * 8));
old_tags.store(new_tags, std::memory_order_release);
}
void setNodeAndTag(std::size_t index, Node* node, std::size_t tag) {
FOLLY_SAFE_DCHECK(
index < kCapacity && (tag == 0x0 || (tag >= 0x80 && tag <= 0xff)),
"");
item(index).store(node, std::memory_order_relaxed);
setTag(index, tag);
}
void clearNodeAndTag(std::size_t index) {
setNodeAndTag(index, nullptr, 0);
}
////////
// Tag filtering using SSE2 intrinsics
SparseMaskIter tagMatchIter(std::size_t needle) const {
FOLLY_SAFE_DCHECK(needle >= 0x80 && needle < 0x100, "");
uint64_t low = tags_low_.load(std::memory_order_acquire);
uint64_t hi = tags_hi_.load(std::memory_order_acquire);
auto tagV = _mm_set_epi64x(hi, low);
auto needleV = _mm_set1_epi8(static_cast<uint8_t>(needle));
auto eqV = _mm_cmpeq_epi8(tagV, needleV);
auto mask = _mm_movemask_epi8(eqV) & kFullMask;
return SparseMaskIter{mask};
}
MaskType occupiedMask() const {
uint64_t low = tags_low_.load(std::memory_order_relaxed);
uint64_t hi = tags_hi_.load(std::memory_order_relaxed);
auto tagV = _mm_set_epi64x(hi, low);
return _mm_movemask_epi8(tagV) & kFullMask;
}
DenseMaskIter occupiedIter() const {
// Currently only invoked when relaxed semantics are sufficient.
return DenseMaskIter{nullptr /*unused*/, occupiedMask()};
}
FirstEmptyInMask firstEmpty() const {
return FirstEmptyInMask{occupiedMask() ^ kFullMask};
}
Atom<Node*>* itemAddr(std::size_t i) const {
return static_cast<Atom<Node*>*>(
const_cast<void*>(static_cast<void const*>(&rawItems_[i])));
}
Atom<Node*>& item(size_t i) {
return *launder(itemAddr(i));
}
static constexpr uint64_t kOutboundOverflowIndex = 7 * 8;
static constexpr uint64_t kSaturatedOutboundOverflowCount = 0xffUL
<< kOutboundOverflowIndex;
static constexpr uint64_t kOutboundOverflowOperand = 0x1UL
<< kOutboundOverflowIndex;
unsigned outboundOverflowCount() const {
uint64_t count = tags_hi_.load(std::memory_order_relaxed);
return count >> kOutboundOverflowIndex;
}
void incrOutboundOverflowCount() {
uint64_t count = tags_hi_.load(std::memory_order_relaxed);
if (count < kSaturatedOutboundOverflowCount) {
tags_hi_.store(
count + kOutboundOverflowOperand, std::memory_order_relaxed);
}
}
void decrOutboundOverflowCount() {
uint64_t count = tags_hi_.load(std::memory_order_relaxed);
if (count < kSaturatedOutboundOverflowCount) {
tags_hi_.store(
count - kOutboundOverflowOperand, std::memory_order_relaxed);
}
}
static constexpr uint64_t kHostedOverflowIndex = 6 * 8;
static constexpr uint64_t kHostedOverflowOperand = 0x10UL
<< kHostedOverflowIndex;
unsigned hostedOverflowCount() const {
uint64_t control = tags_hi_.load(std::memory_order_relaxed);
return (control >> 52) & 0xf;
}
void incrHostedOverflowCount() {
tags_hi_.fetch_add(kHostedOverflowOperand, std::memory_order_relaxed);
}
void decrHostedOverflowCount() {
tags_hi_.fetch_sub(kHostedOverflowOperand, std::memory_order_relaxed);
}
};
class Chunks : public hazptr_obj_base<Chunks, Atom, HazptrTableDeleter> {
Chunks() {}
~Chunks() {}
public:
static Chunks* create(size_t count, hazptr_obj_batch<Atom>* batch) {
auto buf = Allocator().allocate(sizeof(Chunks) + sizeof(Chunk) * count);
auto chunks = new (buf) Chunks();
DCHECK(batch);
chunks->set_batch_tag(batch); // defined in hazptr_obj
for (size_t i = 0; i < count; i++) {
new (&chunks->chunks_[i]) Chunk;
chunks->chunks_[i].clear();
}
return chunks;
}
void destroy(size_t count) {
for (size_t i = 0; i < count; i++) {
chunks_[i].~Chunk();
}
this->~Chunks();
Allocator().deallocate(
(uint8_t*)this, sizeof(Chunk) * count + sizeof(*this));
}
void reclaim_nodes(size_t count) {
for (size_t i = 0; i < count; i++) {
Chunk& chunk = chunks_[i];
auto occupied = chunk.occupiedIter();
while (occupied.hasNext()) {
auto idx = occupied.next();
chunk.setTag(idx, 0);
Node* node =
chunk.item(idx).exchange(nullptr, std::memory_order_relaxed);
// Tags and node ptrs should be in sync at this point.
DCHECK(node);
node->retire();
}
}
}
Chunk* getChunk(size_t index, size_t ccount) {
DCHECK(isPowTwo(ccount));
return &chunks_[index & (ccount - 1)];
}
private:
Chunk chunks_[0];
};
public:
static constexpr float kDefaultLoadFactor =
Chunk::kDesiredCapacity / (float)Chunk::kCapacity;
typedef std::pair<const KeyType, ValueType> value_type;
using InsertType = concurrenthashmap::InsertType;
class Iterator {
public:
FOLLY_ALWAYS_INLINE Iterator() {}
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {}
FOLLY_ALWAYS_INLINE ~Iterator() {}
void setNode(
Node* node,
Chunks* chunks,
size_t chunk_count,
uint64_t chunk_idx,
uint64_t tag_idx) {
DCHECK(chunk_idx < chunk_count || chunk_idx == 0);
DCHECK(isPowTwo(chunk_count));
node_ = node;
chunks_ = chunks;
chunk_count_ = chunk_count;
chunk_idx_ = chunk_idx;
tag_idx_ = tag_idx;
}
const value_type& operator*() const {
DCHECK(node_);
return node_->getItem();
}
const value_type* operator->() const {
DCHECK(node_);
return &(node_->getItem());
}
const Iterator& operator++() {
DCHECK(node_);
++tag_idx_;
findNextNode();
return *this;
}
void next() {
if (node_) {
return;
}
findNextNode();
}
bool operator==(const Iterator& o) const {
return node_ == o.node_;
}
bool operator!=(const Iterator& o) const {
return !(*this == o);
}
Iterator& operator=(const Iterator& o) = delete;
Iterator& operator=(Iterator&& o) noexcept {
if (this != &o) {
hazptrs_ = std::move(o.hazptrs_);
node_ = std::exchange(o.node_, nullptr);
chunks_ = std::exchange(o.chunks_, nullptr);
chunk_count_ = std::exchange(o.chunk_count_, 0);
chunk_idx_ = std::exchange(o.chunk_idx_, 0);
tag_idx_ = std::exchange(o.tag_idx_, 0);
}
return *this;
}
Iterator(const Iterator& o) = delete;
Iterator(Iterator&& o) noexcept
: hazptrs_(std::move(o.hazptrs_)),
node_(std::exchange(o.node_, nullptr)),
chunks_(std::exchange(o.chunks_, nullptr)),
chunk_count_(std::exchange(o.chunk_count_, 0)),
chunk_idx_(std::exchange(o.chunk_idx_, 0)),
tag_idx_(std::exchange(o.tag_idx_, 0)) {}
// These are accessed directly from the functions above
hazptr_array<2, Atom> hazptrs_;
private:
void findNextNode() {
do {
if (tag_idx_ >= Chunk::kCapacity) {
tag_idx_ = 0;
++chunk_idx_;
}
if (chunk_idx_ >= chunk_count_) {
break;
}
DCHECK(chunks_);
// Note that iteration could also be implemented with tag filtering
node_ = hazptrs_[1].get_protected(
chunks_->getChunk(chunk_idx_, chunk_count_)->item(tag_idx_));
if (node_) {
break;
}
++tag_idx_;
} while (true);
}
Node* node_{nullptr};
Chunks* chunks_{nullptr};
size_t chunk_count_{0};
uint64_t chunk_idx_{0};
uint64_t tag_idx_{0};
};
SIMDTable(
size_t initial_size,
float load_factor,
size_t max_size,
hazptr_obj_batch<Atom>* batch)
: load_factor_(load_factor),
max_size_(max_size),
chunks_(nullptr),
chunk_count_(0) {
DCHECK(batch);
DCHECK(
max_size_ == 0 ||
(isPowTwo(max_size_) &&
(folly::popcount(max_size_ - 1) + ShardBits <= 32)));
DCHECK(load_factor_ > 0.0);
load_factor_ = std::min<float>(load_factor_, 1.0);
rehash(initial_size, batch);
}
~SIMDTable() {
auto chunks = chunks_.load(std::memory_order_relaxed);
// We can delete and not retire() here, since users must have
// their own synchronization around destruction.
auto count = chunk_count_.load(std::memory_order_relaxed);
chunks->reclaim_nodes(count);
chunks->destroy(count);
}
size_t size() {
return size_;
}
bool empty() {
return size() == 0;
}
template <typename MatchFunc, typename... Args>
bool insert(
Iterator& it,
const KeyType& k,
InsertType type,
MatchFunc match,
hazptr_obj_batch<Atom>* batch,
Args&&... args) {
Node* node;
Chunks* chunks;
size_t ccount, chunk_idx, tag_idx;
auto h = HashFn()(k);
auto hp = splitHash(h);
std::unique_lock<Mutex> g(m_);
if (!prepare_insert(
it,
k,
type,
match,
batch,
chunk_idx,
tag_idx,
node,
chunks,
ccount,
hp)) {
return false;
}
auto cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
if (!node) {
std::tie(chunk_idx, tag_idx) =
findEmptyInsertLocation(chunks, ccount, hp);
it.setNode(cur, chunks, ccount, chunk_idx, tag_idx);
size_++;
}
Chunk* chunk = chunks->getChunk(chunk_idx, ccount);
chunk->setNodeAndTag(tag_idx, cur, hp.second);
g.unlock();
// Retire not under lock
if (node) {
node->retire();
}
return true;
}
template <typename MatchFunc, typename... Args>
bool insert(
Iterator& it,
const KeyType& k,
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch) {
DCHECK(cur != nullptr);
Node* node;
Chunks* chunks;
size_t ccount, chunk_idx, tag_idx;
auto h = HashFn()(k);
auto hp = splitHash(h);
std::unique_lock<Mutex> g(m_);
if (!prepare_insert(
it,
k,
type,
match,
batch,
chunk_idx,
tag_idx,
node,
chunks,
ccount,
hp)) {
return false;
}
if (!node) {
std::tie(chunk_idx, tag_idx) =
findEmptyInsertLocation(chunks, ccount, hp);
it.setNode(cur, chunks, ccount, chunk_idx, tag_idx);
size_++;
}
Chunk* chunk = chunks->getChunk(chunk_idx, ccount);
chunk->setNodeAndTag(tag_idx, cur, hp.second);
g.unlock();
// Retire not under lock
if (node) {
node->retire();
}
return true;
}
void rehash(size_t size, hazptr_obj_batch<Atom>* batch) {
size_t new_chunk_count = size == 0 ? 0 : (size - 1) / Chunk::kCapacity + 1;
rehash_internal(folly::nextPowTwo(new_chunk_count), batch);
}
bool find(Iterator& res, const KeyType& k) {
auto& hazz = res.hazptrs_[1];
auto h = HashFn()(k);
auto hp = splitHash(h);
size_t ccount;
Chunks* chunks;
getChunksAndCount(ccount, chunks, res.hazptrs_[0]);
size_t step = probeDelta(hp);
auto& chunk_idx = hp.first;
for (size_t tries = 0; tries < ccount; ++tries) {
Chunk* chunk = chunks->getChunk(chunk_idx, ccount);
auto hits = chunk->tagMatchIter(hp.second);
while (hits.hasNext()) {
size_t tag_idx = hits.next();
Node* node = hazz.get_protected(chunk->item(tag_idx));
if (LIKELY(node && KeyEqual()(k, node->getItem().first))) {
chunk_idx = chunk_idx & (ccount - 1);
res.setNode(node, chunks, ccount, chunk_idx, tag_idx);
return true;
}
hazz.reset();
}
if (LIKELY(chunk->outboundOverflowCount() == 0)) {
break;
}
chunk_idx += step;
}
return false;
}
template <typename MatchFunc>
std::size_t erase(const KeyType& key, Iterator* iter, MatchFunc match) {
auto h = HashFn()(key);
const HashPair hp = splitHash(h);
std::unique_lock<Mutex> g(m_);
size_t ccount = chunk_count_.load(std::memory_order_relaxed);
auto chunks = chunks_.load(std::memory_order_relaxed);
size_t chunk_idx, tag_idx;
Node* node = find_internal(key, hp, chunks, ccount, chunk_idx, tag_idx);
if (!node) {
return 0;
}
if (!match(node->getItem().second)) {
return 0;
}
Chunk* chunk = chunks->getChunk(chunk_idx, ccount);
// Decrement any overflow counters
if (chunk->hostedOverflowCount() != 0) {
size_t index = hp.first;
size_t delta = probeDelta(hp);
bool preferredChunk = true;
while (true) {
Chunk* overflowChunk = chunks->getChunk(index, ccount);
if (chunk == overflowChunk) {
if (!preferredChunk) {
overflowChunk->decrHostedOverflowCount();
}
break;
}
overflowChunk->decrOutboundOverflowCount();
preferredChunk = false;
index += delta;
}
}
chunk->clearNodeAndTag(tag_idx);
size_--;
if (iter) {
iter->hazptrs_[0].reset(chunks);
iter->setNode(nullptr, chunks, ccount, chunk_idx, tag_idx + 1);
iter->next();
}
// Retire the node while not under the lock.
g.unlock();
node->retire();
return 1;
}
void clear(hazptr_obj_batch<Atom>* batch) {
size_t ccount = chunk_count_.load(std::memory_order_relaxed);
Chunks* chunks;
auto newchunks = Chunks::create(ccount, batch);
{
std::lock_guard<Mutex> g(m_);
chunks = chunks_.load(std::memory_order_relaxed);
chunks_.store(newchunks, std::memory_order_release);
size_ = 0;
}
chunks->retire(HazptrTableDeleter(ccount));
chunks->reclaim_nodes(ccount);
}
void max_load_factor(float factor) {
DCHECK(factor > 0.0);
if (factor > 1.0) {
throw std::invalid_argument("load factor must be <= 1.0");
}
std::lock_guard<Mutex> g(m_);
load_factor_ = factor;
auto ccount = chunk_count_.load(std::memory_order_relaxed);
grow_threshold_ = ccount * Chunk::kCapacity * load_factor_;
}
Iterator cbegin() {
Iterator res;
size_t ccount;
Chunks* chunks;
getChunksAndCount(ccount, chunks, res.hazptrs_[0]);
res.setNode(nullptr, chunks, ccount, 0, 0);
res.next();
return res;
}
Iterator cend() {
return Iterator(nullptr);
}
private:
static HashPair splitHash(std::size_t hash) {
std::size_t c = _mm_crc32_u64(0, hash);
size_t tag = (c >> 24) | 0x80;
hash += c;
return std::make_pair(hash, tag);
}
static size_t probeDelta(HashPair hp) {
return 2 * hp.second + 1;
}
// Must hold lock.
Node* find_internal(
const KeyType& k,
const HashPair& hp,
Chunks* chunks,
size_t ccount,
size_t& chunk_idx,
size_t& tag_idx) {
// must be called with mutex held
size_t step = probeDelta(hp);
chunk_idx = hp.first;
for (size_t tries = 0; tries < ccount; ++tries) {
Chunk* chunk = chunks->getChunk(chunk_idx, ccount);
auto hits = chunk->tagMatchIter(hp.second);
while (hits.hasNext()) {
tag_idx = hits.next();
Node* node = chunk->item(tag_idx).load(std::memory_order_relaxed);
if (LIKELY(node && KeyEqual()(k, node->getItem().first))) {
chunk_idx = (chunk_idx & ccount - 1);
return node;
}
}
if (LIKELY(chunk->outboundOverflowCount() == 0)) {
break;
}
chunk_idx += step;
}
return nullptr;
}
template <typename MatchFunc, typename... Args>
bool prepare_insert(
Iterator& it,
const KeyType& k,
InsertType type,
MatchFunc match,
hazptr_obj_batch<Atom>* batch,
size_t& chunk_idx,
size_t& tag_idx,
Node*& node,
Chunks*& chunks,
size_t& ccount,
const HashPair& hp) {
ccount = chunk_count_.load(std::memory_order_relaxed);
chunks = chunks_.load(std::memory_order_relaxed);
if (size_ >= grow_threshold_ && type == InsertType::DOES_NOT_EXIST) {
if (max_size_ && size_ << 1 > max_size_) {
// Would exceed max size.
throw std::bad_alloc();
}
rehash_internal(ccount << 1, batch);
ccount = chunk_count_.load(std::memory_order_relaxed);
chunks = chunks_.load(std::memory_order_relaxed);
}
node = find_internal(k, hp, chunks, ccount, chunk_idx, tag_idx);
it.hazptrs_[0].reset(chunks);
if (node) {
it.hazptrs_[1].reset(node);
it.setNode(node, chunks, ccount, chunk_idx, tag_idx);
if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) {
return false;
}
} else if (type == InsertType::DOES_NOT_EXIST) {
return false;
}
} else {
if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) {
it.hazptrs_[0].reset();
return false;
}
// Already checked for rehash on DOES_NOT_EXIST, now check on ANY
if (size_ >= grow_threshold_ && type == InsertType::ANY) {
if (max_size_ && size_ << 1 > max_size_) {
// Would exceed max size.
throw std::bad_alloc();
}
rehash_internal(ccount << 1, batch);
ccount = chunk_count_.load(std::memory_order_relaxed);
chunks = chunks_.load(std::memory_order_relaxed);
it.hazptrs_[0].reset(chunks);
}
}
return true;
}
void rehash_internal(size_t new_chunk_count, hazptr_obj_batch<Atom>* batch) {
DCHECK(isPowTwo(new_chunk_count));
auto old_chunk_count = chunk_count_.load(std::memory_order_relaxed);
if (old_chunk_count >= new_chunk_count) {
return;
}
auto new_chunks = Chunks::create(new_chunk_count, batch);
auto old_chunks = chunks_.load(std::memory_order_relaxed);
grow_threshold_ = new_chunk_count * Chunk::kCapacity * load_factor_;
for (size_t i = 0; i < old_chunk_count; i++) {
Chunk* oldchunk = old_chunks->getChunk(i, old_chunk_count);
auto occupied = oldchunk->occupiedIter();
while (occupied.hasNext()) {
auto idx = occupied.next();
Node* node = oldchunk->item(idx).load(std::memory_order_relaxed);
size_t new_chunk_idx;
size_t new_tag_idx;
auto h = HashFn()(node->getItem().first);
auto hp = splitHash(h);
std::tie(new_chunk_idx, new_tag_idx) =
findEmptyInsertLocation(new_chunks, new_chunk_count, hp);
Chunk* newchunk = new_chunks->getChunk(new_chunk_idx, new_chunk_count);
newchunk->setNodeAndTag(new_tag_idx, node, hp.second);
}
}
seqlock_.fetch_add(1, std::memory_order_release);
chunk_count_.store(new_chunk_count, std::memory_order_release);
chunks_.store(new_chunks, std::memory_order_release);
seqlock_.fetch_add(1, std::memory_order_release);
if (old_chunks) {
old_chunks->retire(HazptrTableDeleter(old_chunk_count));
}
}
void getChunksAndCount(
size_t& ccount,
Chunks*& chunks,
hazptr_holder<Atom>& hazptr) {
while (true) {
auto seqlock = seqlock_.load(std::memory_order_acquire);
ccount = chunk_count_.load(std::memory_order_acquire);
chunks = hazptr.get_protected(chunks_);
auto seqlock2 = seqlock_.load(std::memory_order_acquire);
if (!(seqlock & 1) && (seqlock == seqlock2)) {
break;
}
}
DCHECK(chunks);
}
std::pair<size_t, size_t>
findEmptyInsertLocation(Chunks* chunks, size_t ccount, const HashPair& hp) {
size_t chunk_idx = hp.first;
Chunk* dst_chunk = chunks->getChunk(chunk_idx, ccount);
auto firstEmpty = dst_chunk->firstEmpty();
if (!firstEmpty.hasIndex()) {
size_t delta = probeDelta(hp);
do {
dst_chunk->incrOutboundOverflowCount();
chunk_idx += delta;
dst_chunk = chunks->getChunk(chunk_idx, ccount);
firstEmpty = dst_chunk->firstEmpty();
} while (!firstEmpty.hasIndex());
dst_chunk->incrHostedOverflowCount();
}
size_t dst_tag_idx = firstEmpty.index();
return std::make_pair(chunk_idx & (ccount - 1), dst_tag_idx);
}
Mutex m_;
float load_factor_; // ceil of 1.0
size_t grow_threshold_;
size_t size_{0};
size_t const max_size_;
// Fields needed for read-only access, on separate cacheline.
alignas(64) Atom<Chunks*> chunks_{nullptr};
std::atomic<uint64_t> seqlock_{0};
Atom<size_t> chunk_count_;
};
} // namespace simd
#endif // FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
} // namespace concurrenthashmap
/* A Segment is a single shard of the ConcurrentHashMap.
* All writes take the lock, while readers are all wait-free.
* Readers always proceed in parallel with the single writer.
*
*
* Possible additional optimizations:
*
* * insert / erase could be lock / wait free. Would need to be
* careful that assign and rehash don't conflict (possibly with
* reader/writer lock, or microlock per node or per bucket, etc).
* Java 8 goes halfway, and does lock per bucket, except for the
* first item, that is inserted with a CAS (which is somewhat
* specific to java having a lock per object)
*
* * I tried using trylock() and find() to warm the cache for insert()
* and erase() similar to Java 7, but didn't have much luck.
*
* * We could order elements using split ordering, for faster rehash,
* and no need to ever copy nodes. Note that a full split ordering
* including dummy nodes increases the memory usage by 2x, but we
* could split the difference and still require a lock to set bucket
* pointers.
*/
template <
typename KeyType,
typename ValueType,
uint8_t ShardBits = 0,
typename HashFn = std::hash<KeyType>,
typename KeyEqual = std::equal_to<KeyType>,
typename Allocator = std::allocator<uint8_t>,
template <typename> class Atom = std::atomic,
class Mutex = std::mutex,
template <
typename,
typename,
uint8_t,
typename,
typename,
typename,
template <typename> class,
class> class Impl = concurrenthashmap::bucket::BucketTable>
class alignas(64) ConcurrentHashMapSegment {
using ImplT = Impl<
KeyType,
ValueType,
ShardBits,
HashFn,
KeyEqual,
Allocator,
Atom,
Mutex>;
public:
typedef KeyType key_type;
typedef ValueType mapped_type;
......@@ -753,6 +1632,7 @@ class alignas(64) ConcurrentHashMapSegment {
using InsertType = concurrenthashmap::InsertType;
using Iterator = typename ImplT::Iterator;
using Node = typename ImplT::Node;
static constexpr float kDefaultLoadFactor = ImplT::kDefaultLoadFactor;
ConcurrentHashMapSegment(
size_t initial_buckets,
......@@ -786,7 +1666,6 @@ class alignas(64) ConcurrentHashMapSegment {
node->getItem().first,
InsertType::DOES_NOT_EXIST,
[](const ValueType&) { return false; },
node,
node);
if (!res) {
node->~Node();
......@@ -804,7 +1683,6 @@ class alignas(64) ConcurrentHashMapSegment {
std::forward<Key>(k),
InsertType::DOES_NOT_EXIST,
[](const ValueType&) { return false; },
nullptr,
std::forward<Key>(k),
std::forward<Args>(args)...);
}
......@@ -816,7 +1694,6 @@ class alignas(64) ConcurrentHashMapSegment {
k,
InsertType::DOES_NOT_EXIST,
[](const ValueType&) { return false; },
node,
node);
}
......@@ -829,7 +1706,6 @@ class alignas(64) ConcurrentHashMapSegment {
node->getItem().first,
InsertType::ANY,
[](const ValueType&) { return false; },
node,
node);
if (!res) {
node->~Node();
......@@ -847,7 +1723,6 @@ class alignas(64) ConcurrentHashMapSegment {
node->getItem().first,
InsertType::MUST_EXIST,
[](const ValueType&) { return false; },
node,
node);
if (!res) {
node->~Node();
......@@ -869,7 +1744,6 @@ class alignas(64) ConcurrentHashMapSegment {
node->getItem().first,
InsertType::MATCH,
[&expected](const ValueType& v) { return v == expected; },
node,
node);
if (!res) {
node->~Node();
......@@ -884,10 +1758,19 @@ class alignas(64) ConcurrentHashMapSegment {
const KeyType& k,
InsertType type,
MatchFunc match,
Node* cur,
Args&&... args) {
return impl_.insert(
it, k, type, match, cur, batch_, std::forward<Args>(args)...);
it, k, type, match, batch_, std::forward<Args>(args)...);
}
template <typename MatchFunc, typename... Args>
bool insert_internal(
Iterator& it,
const KeyType& k,
InsertType type,
MatchFunc match,
Node* cur) {
return impl_.insert(it, k, type, match, cur, batch_);
}
// Must hold lock.
......
......@@ -31,9 +31,45 @@ using namespace std;
DEFINE_int64(seed, 0, "Seed for random number generators");
TEST(ConcurrentHashMap, MapTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(3);
foomap.max_load_factor(1.05);
template <typename T>
class ConcurrentHashMapTest : public ::testing::Test {};
TYPED_TEST_CASE_P(ConcurrentHashMapTest);
template <template <
typename,
typename,
uint8_t,
typename,
typename,
typename,
template <typename> class,
class> class Impl>
struct MapFactory {
template <
typename KeyType,
typename ValueType,
typename HashFn = std::hash<KeyType>,
typename KeyEqual = std::equal_to<KeyType>,
typename Allocator = std::allocator<uint8_t>,
uint8_t ShardBits = 8,
template <typename> class Atom = std::atomic,
class Mutex = std::mutex>
using MapT = ConcurrentHashMap<
KeyType,
ValueType,
HashFn,
KeyEqual,
Allocator,
ShardBits,
Atom,
Mutex,
Impl>;
};
#define CHM typename TypeParam::template MapT
TYPED_TEST_P(ConcurrentHashMapTest, MapTest) {
CHM<uint64_t, uint64_t> foomap(3);
EXPECT_TRUE(foomap.empty());
EXPECT_EQ(foomap.find(1), foomap.cend());
auto r = foomap.insert(1, 0);
......@@ -49,6 +85,7 @@ TEST(ConcurrentHashMap, MapTest) {
EXPECT_FALSE(foomap.empty());
EXPECT_TRUE(foomap.insert(std::make_pair(2, 0)).second);
EXPECT_TRUE(foomap.insert_or_assign(2, 0).second);
EXPECT_EQ(foomap.size(), 2);
EXPECT_TRUE(foomap.assign_if_equal(2, 0, 3));
EXPECT_TRUE(foomap.insert(3, 0).second);
EXPECT_FALSE(foomap.erase_if_equal(3, 1));
......@@ -73,8 +110,8 @@ TEST(ConcurrentHashMap, MapTest) {
EXPECT_TRUE(foomap.empty());
}
TEST(ConcurrentHashMap, MaxSizeTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(2, 16);
TYPED_TEST_P(ConcurrentHashMapTest, MaxSizeTest) {
CHM<uint64_t, uint64_t> foomap(2, 16);
bool insert_failed = false;
for (int i = 0; i < 32; i++) {
auto res = foomap.insert(0, 0);
......@@ -85,8 +122,8 @@ TEST(ConcurrentHashMap, MaxSizeTest) {
EXPECT_TRUE(insert_failed);
}
TEST(ConcurrentHashMap, MoveTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(2, 16);
TYPED_TEST_P(ConcurrentHashMapTest, MoveTest) {
CHM<uint64_t, uint64_t> foomap(2, 16);
auto other = std::move(foomap);
auto other2 = std::move(other);
other = std::move(other2);
......@@ -114,8 +151,8 @@ struct foo {
int foo::moved{0};
int foo::copied{0};
TEST(ConcurrentHashMap, EmplaceTest) {
ConcurrentHashMap<uint64_t, foo> foomap(200);
TYPED_TEST_P(ConcurrentHashMapTest, EmplaceTest) {
CHM<uint64_t, foo> foomap(200);
foo bar; // Make sure to test copy
foomap.insert(1, bar);
EXPECT_EQ(foo::moved, 0);
......@@ -131,8 +168,17 @@ TEST(ConcurrentHashMap, EmplaceTest) {
EXPECT_EQ(foo::copied, 0);
}
TEST(ConcurrentHashMap, MapResizeTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(2);
TYPED_TEST_P(ConcurrentHashMapTest, MapInsertIteratorValueTest) {
CHM<uint64_t, uint64_t> foomap(2);
for (uint64_t i = 0; i < 1 << 16; i++) {
auto ret = foomap.insert(i, i + 1);
EXPECT_TRUE(ret.second);
EXPECT_EQ(ret.first->second, i + 1);
}
}
TYPED_TEST_P(ConcurrentHashMapTest, MapResizeTest) {
CHM<uint64_t, uint64_t> foomap(2);
EXPECT_EQ(foomap.find(1), foomap.cend());
EXPECT_TRUE(foomap.insert(1, 0).second);
EXPECT_TRUE(foomap.insert(2, 0).second);
......@@ -152,7 +198,7 @@ TEST(ConcurrentHashMap, MapResizeTest) {
}
// Ensure we can insert objects without copy constructors.
TEST(ConcurrentHashMap, MapNoCopiesTest) {
TYPED_TEST_P(ConcurrentHashMapTest, MapNoCopiesTest) {
struct Uncopyable {
int i_;
Uncopyable(int i) {
......@@ -168,7 +214,7 @@ TEST(ConcurrentHashMap, MapNoCopiesTest) {
return 0;
}
};
ConcurrentHashMap<Uncopyable, Uncopyable, Hasher> foomap(2);
CHM<Uncopyable, Uncopyable, Hasher> foomap(2);
EXPECT_TRUE(foomap.try_emplace(1, 1).second);
EXPECT_TRUE(foomap.try_emplace(2, 2).second);
auto res = foomap.find(2);
......@@ -181,7 +227,7 @@ TEST(ConcurrentHashMap, MapNoCopiesTest) {
EXPECT_EQ(&(res->second), &(res2->second));
}
TEST(ConcurrentHashMap, MapMovableKeysTest) {
TYPED_TEST_P(ConcurrentHashMapTest, MapMovableKeysTest) {
struct Movable {
int i_;
Movable(int i) {
......@@ -201,7 +247,7 @@ TEST(ConcurrentHashMap, MapMovableKeysTest) {
return 0;
}
};
ConcurrentHashMap<Movable, Movable, Hasher> foomap(2);
CHM<Movable, Movable, Hasher> foomap(2);
EXPECT_TRUE(foomap.insert(std::make_pair(Movable(10), Movable(1))).second);
EXPECT_TRUE(foomap.assign(Movable(10), Movable(2)));
EXPECT_TRUE(foomap.insert(Movable(11), Movable(1)).second);
......@@ -212,8 +258,8 @@ TEST(ConcurrentHashMap, MapMovableKeysTest) {
EXPECT_TRUE(foomap.try_emplace(Movable(13), Movable(3)).second);
}
TEST(ConcurrentHashMap, MapUpdateTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(2);
TYPED_TEST_P(ConcurrentHashMapTest, MapUpdateTest) {
CHM<uint64_t, uint64_t> foomap(2);
EXPECT_TRUE(foomap.insert(1, 10).second);
EXPECT_TRUE(bool(foomap.assign(1, 11)));
auto res = foomap.find(1);
......@@ -221,15 +267,15 @@ TEST(ConcurrentHashMap, MapUpdateTest) {
EXPECT_EQ(11, res->second);
}
TEST(ConcurrentHashMap, MapIterateTest2) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(2);
TYPED_TEST_P(ConcurrentHashMapTest, MapIterateTest2) {
CHM<uint64_t, uint64_t> foomap(2);
auto begin = foomap.cbegin();
auto end = foomap.cend();
EXPECT_EQ(begin, end);
}
TEST(ConcurrentHashMap, MapIterateTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(2);
TYPED_TEST_P(ConcurrentHashMapTest, MapIterateTest) {
CHM<uint64_t, uint64_t> foomap(2);
EXPECT_EQ(foomap.cbegin(), foomap.cend());
EXPECT_TRUE(foomap.insert(1, 1).second);
EXPECT_TRUE(foomap.insert(2, 2).second);
......@@ -251,29 +297,29 @@ TEST(ConcurrentHashMap, MapIterateTest) {
EXPECT_EQ(count, 2);
}
TEST(ConcurrentHashMap, MoveIterateAssignIterate) {
using Map = ConcurrentHashMap<int, int>;
TYPED_TEST_P(ConcurrentHashMapTest, MoveIterateAssignIterate) {
using Map = CHM<int, int>;
Map tmp;
Map map{std::move(tmp)};
map.insert(0, 0);
++map.cbegin();
ConcurrentHashMap<int, int> other;
CHM<int, int> other;
other.insert(0, 0);
map = std::move(other);
++map.cbegin();
}
TEST(ConcurrentHashMap, EraseTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(3);
TYPED_TEST_P(ConcurrentHashMapTest, EraseTest) {
CHM<uint64_t, uint64_t> foomap(3);
foomap.insert(1, 0);
auto f1 = foomap.find(1);
EXPECT_EQ(1, foomap.erase(1));
foomap.erase(f1);
}
TEST(ConcurrentHashMap, EraseIfEqualTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(3);
TYPED_TEST_P(ConcurrentHashMapTest, EraseIfEqualTest) {
CHM<uint64_t, uint64_t> foomap(3);
foomap.insert(1, 0);
EXPECT_FALSE(foomap.erase_if_equal(1, 1));
auto f1 = foomap.find(1);
......@@ -282,8 +328,8 @@ TEST(ConcurrentHashMap, EraseIfEqualTest) {
EXPECT_EQ(foomap.find(1), foomap.cend());
}
TEST(ConcurrentHashMap, CopyIterator) {
ConcurrentHashMap<int, int> map;
TYPED_TEST_P(ConcurrentHashMapTest, CopyIterator) {
CHM<int, int> map;
map.insert(0, 0);
for (auto cit = map.cbegin(); cit != map.cend(); ++cit) {
std::pair<int const, int> const ckv{0, 0};
......@@ -291,8 +337,8 @@ TEST(ConcurrentHashMap, CopyIterator) {
}
}
TEST(ConcurrentHashMap, EraseInIterateTest) {
ConcurrentHashMap<uint64_t, uint64_t> foomap(3);
TYPED_TEST_P(ConcurrentHashMapTest, EraseInIterateTest) {
CHM<uint64_t, uint64_t> foomap(3);
for (uint64_t k = 0; k < 10; ++k) {
foomap.insert(k, k);
}
......@@ -321,14 +367,13 @@ TEST(ConcurrentHashMap, EraseInIterateTest) {
// #define lib DeterministicSchedule
// #define join DeterministicSchedule::join(t)
TEST(ConcurrentHashMap, UpdateStressTest) {
TYPED_TEST_P(ConcurrentHashMapTest, UpdateStressTest) {
DeterministicSchedule sched(DeterministicSchedule::uniform(FLAGS_seed));
// size must match iters for this test.
unsigned size = 128 * 128;
unsigned iters = size;
ConcurrentHashMap<
unsigned long,
CHM<unsigned long,
unsigned long,
std::hash<unsigned long>,
std::equal_to<unsigned long>,
......@@ -374,13 +419,12 @@ TEST(ConcurrentHashMap, UpdateStressTest) {
}
}
TEST(ConcurrentHashMap, EraseStressTest) {
TYPED_TEST_P(ConcurrentHashMapTest, EraseStressTest) {
DeterministicSchedule sched(DeterministicSchedule::uniform(FLAGS_seed));
unsigned size = 2;
unsigned iters = size * 128 * 2;
ConcurrentHashMap<
unsigned long,
CHM<unsigned long,
unsigned long,
std::hash<unsigned long>,
std::equal_to<unsigned long>,
......@@ -438,13 +482,12 @@ TEST(ConcurrentHashMap, EraseStressTest) {
}
}
TEST(ConcurrentHashMap, IterateStressTest) {
TYPED_TEST_P(ConcurrentHashMapTest, IterateStressTest) {
DeterministicSchedule sched(DeterministicSchedule::uniform(FLAGS_seed));
unsigned size = 2;
unsigned iters = size * 128 * 2;
ConcurrentHashMap<
unsigned long,
CHM<unsigned long,
unsigned long,
std::hash<unsigned long>,
std::equal_to<unsigned long>,
......@@ -497,13 +540,12 @@ TEST(ConcurrentHashMap, IterateStressTest) {
}
}
TEST(ConcurrentHashMap, insertStressTest) {
TYPED_TEST_P(ConcurrentHashMapTest, insertStressTest) {
DeterministicSchedule sched(DeterministicSchedule::uniform(FLAGS_seed));
unsigned size = 2;
unsigned iters = size * 64 * 4;
ConcurrentHashMap<
unsigned long,
CHM<unsigned long,
unsigned long,
std::hash<unsigned long>,
std::equal_to<unsigned long>,
......@@ -532,7 +574,7 @@ TEST(ConcurrentHashMap, insertStressTest) {
}
}
TEST(ConcurrentHashMap, assignStressTest) {
TYPED_TEST_P(ConcurrentHashMapTest, assignStressTest) {
DeterministicSchedule sched(DeterministicSchedule::uniform(FLAGS_seed));
unsigned size = 2;
......@@ -560,8 +602,7 @@ TEST(ConcurrentHashMap, assignStressTest) {
EXPECT_EQ(v, v2);
}
};
ConcurrentHashMap<
unsigned long,
CHM<unsigned long,
big_value,
std::hash<unsigned long>,
std::equal_to<unsigned long>,
......@@ -596,14 +637,13 @@ TEST(ConcurrentHashMap, assignStressTest) {
}
}
TEST(ConcurrentHashMap, RefcountTest) {
TYPED_TEST_P(ConcurrentHashMapTest, RefcountTest) {
struct badhash {
size_t operator()(uint64_t) const {
return 0;
}
};
ConcurrentHashMap<
uint64_t,
CHM<uint64_t,
uint64_t,
badhash,
std::equal_to<uint64_t>,
......@@ -627,11 +667,11 @@ struct Wrapper {
bool& del;
};
TEST(ConcurrentHashMap, Deletion) {
TYPED_TEST_P(ConcurrentHashMapTest, Deletion) {
bool del{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map;
CHM<int, std::shared_ptr<Wrapper>> map;
map.insert(0, std::make_shared<Wrapper>(del));
}
......@@ -641,11 +681,11 @@ TEST(ConcurrentHashMap, Deletion) {
EXPECT_TRUE(del);
}
TEST(ConcurrentHashMap, DeletionWithErase) {
TYPED_TEST_P(ConcurrentHashMapTest, DeletionWithErase) {
bool del{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map;
CHM<int, std::shared_ptr<Wrapper>> map;
map.insert(0, std::make_shared<Wrapper>(del));
map.erase(0);
......@@ -656,11 +696,11 @@ TEST(ConcurrentHashMap, DeletionWithErase) {
EXPECT_TRUE(del);
}
TEST(ConcurrentHashMap, DeletionWithIterator) {
TYPED_TEST_P(ConcurrentHashMapTest, DeletionWithIterator) {
bool del{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map;
CHM<int, std::shared_ptr<Wrapper>> map;
map.insert(0, std::make_shared<Wrapper>(del));
auto it = map.find(0);
......@@ -672,11 +712,11 @@ TEST(ConcurrentHashMap, DeletionWithIterator) {
EXPECT_TRUE(del);
}
TEST(ConcurrentHashMap, DeletionWithForLoop) {
TYPED_TEST_P(ConcurrentHashMapTest, DeletionWithForLoop) {
bool del{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map;
CHM<int, std::shared_ptr<Wrapper>> map;
map.insert(0, std::make_shared<Wrapper>(del));
for (auto it = map.cbegin(); it != map.cend(); ++it) {
......@@ -689,11 +729,11 @@ TEST(ConcurrentHashMap, DeletionWithForLoop) {
EXPECT_TRUE(del);
}
TEST(ConcurrentHashMap, DeletionMultiple) {
TYPED_TEST_P(ConcurrentHashMapTest, DeletionMultiple) {
bool del1{false}, del2{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map;
CHM<int, std::shared_ptr<Wrapper>> map;
map.insert(0, std::make_shared<Wrapper>(del1));
map.insert(1, std::make_shared<Wrapper>(del2));
......@@ -705,11 +745,11 @@ TEST(ConcurrentHashMap, DeletionMultiple) {
EXPECT_TRUE(del2);
}
TEST(ConcurrentHashMap, DeletionAssigned) {
TYPED_TEST_P(ConcurrentHashMapTest, DeletionAssigned) {
bool del1{false}, del2{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map;
CHM<int, std::shared_ptr<Wrapper>> map;
map.insert(0, std::make_shared<Wrapper>(del1));
map.insert_or_assign(0, std::make_shared<Wrapper>(del2));
......@@ -721,12 +761,12 @@ TEST(ConcurrentHashMap, DeletionAssigned) {
EXPECT_TRUE(del2);
}
TEST(ConcurrentHashMap, DeletionMultipleMaps) {
TYPED_TEST_P(ConcurrentHashMapTest, DeletionMultipleMaps) {
bool del1{false}, del2{false};
{
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map1;
ConcurrentHashMap<int, std::shared_ptr<Wrapper>> map2;
CHM<int, std::shared_ptr<Wrapper>> map1;
CHM<int, std::shared_ptr<Wrapper>> map2;
map1.insert(0, std::make_shared<Wrapper>(del1));
map2.insert(0, std::make_shared<Wrapper>(del2));
......@@ -738,8 +778,8 @@ TEST(ConcurrentHashMap, DeletionMultipleMaps) {
EXPECT_TRUE(del2);
}
TEST(ConcurrentHashMap, ForEachLoop) {
ConcurrentHashMap<int, int> map;
TYPED_TEST_P(ConcurrentHashMapTest, ForEachLoop) {
CHM<int, int> map;
map.insert(1, 2);
size_t iters = 0;
for (const auto& kv : map) {
......@@ -750,16 +790,14 @@ TEST(ConcurrentHashMap, ForEachLoop) {
EXPECT_EQ(iters, 1);
}
TEST(ConcurrentHashMap, IteratorMove) {
using CHM = ConcurrentHashMap<int, int>;
using Iter = CHM::ConstIterator;
TYPED_TEST_P(ConcurrentHashMapTest, IteratorMove) {
struct Foo {
Iter it;
explicit Foo(Iter&& it_) : it(std::move(it_)) {}
CHM<int, int>::ConstIterator it;
explicit Foo(CHM<int, int>::ConstIterator&& it_) : it(std::move(it_)) {}
Foo(Foo&&) = default;
Foo& operator=(Foo&&) = default;
};
CHM map;
CHM<int, int> map;
int k = 111;
int v = 999999;
map.insert(k, v);
......@@ -769,3 +807,52 @@ TEST(ConcurrentHashMap, IteratorMove) {
foo2 = std::move(foo);
ASSERT_EQ(foo2.it->second, v);
}
REGISTER_TYPED_TEST_CASE_P(
ConcurrentHashMapTest,
MapTest,
MaxSizeTest,
MoveTest,
EmplaceTest,
MapResizeTest,
MapNoCopiesTest,
MapMovableKeysTest,
MapUpdateTest,
MapIterateTest2,
MapIterateTest,
MoveIterateAssignIterate,
MapInsertIteratorValueTest,
CopyIterator,
Deletion,
DeletionAssigned,
DeletionMultiple,
DeletionMultipleMaps,
DeletionWithErase,
DeletionWithForLoop,
DeletionWithIterator,
EraseIfEqualTest,
EraseInIterateTest,
EraseStressTest,
EraseTest,
ForEachLoop,
IterateStressTest,
RefcountTest,
UpdateStressTest,
assignStressTest,
insertStressTest,
IteratorMove);
using folly::detail::concurrenthashmap::bucket::BucketTable;
#if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
using folly::detail::concurrenthashmap::simd::SIMDTable;
typedef ::testing::Types<MapFactory<BucketTable>, MapFactory<SIMDTable>>
MapFactoryTypes;
#else
typedef ::testing::Types<MapFactory<BucketTable>> MapFactoryTypes;
#endif
INSTANTIATE_TYPED_TEST_CASE_P(
MapFactoryTypesInstantiation,
ConcurrentHashMapTest,
MapFactoryTypes);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment