Commit aeac6f59 authored by Dave Watson's avatar Dave Watson Committed by Facebook Github Bot

Add a buckets + size seqlock

Summary:
There is currently an unnecessary data dependency loading the
current backing buckets + size, reading the size, then again indexing
in to the array.

Rework the Buckets struct to make a single allocation, and store the bucket_count_ in the Segment directly.  The bucket pointer and segment are protected by a seqlock.

Reviewed By: ot

Differential Revision: D7167919

fbshipit-source-id: 10ddceffad19f54c790b3ab7a87260445571f81d
parent b6a27e14
......@@ -36,6 +36,19 @@ class HazptrDeleter {
}
};
template <typename Allocator>
class HazptrBucketDeleter {
size_t count_;
public:
HazptrBucketDeleter(size_t count) : count_(count) {}
HazptrBucketDeleter() = default;
template <typename Bucket>
void operator()(Bucket* bucket) {
bucket->destroy(count_);
}
};
template <
typename KeyType,
typename ValueType,
......@@ -221,23 +234,22 @@ class alignas(64) ConcurrentHashMapSegment {
float load_factor,
size_t max_size)
: load_factor_(load_factor), max_size_(max_size) {
auto buckets = (Buckets*)Allocator().allocate(sizeof(Buckets));
initial_buckets = folly::nextPowTwo(initial_buckets);
DCHECK(
max_size_ == 0 ||
(isPowTwo(max_size_) &&
(folly::popcount(max_size_ - 1) + ShardBits <= 32)));
new (buckets) Buckets(initial_buckets);
auto buckets = Buckets::create(initial_buckets);
buckets_.store(buckets, std::memory_order_release);
load_factor_nodes_ = initial_buckets * load_factor_;
bucket_count_.store(initial_buckets, std::memory_order_relaxed);
}
~ConcurrentHashMapSegment() {
auto buckets = buckets_.load(std::memory_order_relaxed);
// We can delete and not retire() here, since users must have
// their own synchronization around destruction.
buckets->~Buckets();
Allocator().deallocate((uint8_t*)buckets, sizeof(Buckets));
buckets->destroy(bucket_count_.load(std::memory_order_relaxed));
}
size_t size() {
......@@ -364,6 +376,7 @@ class alignas(64) ConcurrentHashMapSegment {
auto h = HashFn()(k);
std::unique_lock<Mutex> g(m_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto buckets = buckets_.load(std::memory_order_relaxed);
// Check for rehash needed for DOES_NOT_EXIST
if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) {
......@@ -371,11 +384,12 @@ class alignas(64) ConcurrentHashMapSegment {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(buckets->bucket_count_ << 1);
rehash(bcount << 1);
buckets = buckets_.load(std::memory_order_relaxed);
bcount = bucket_count_.load(std::memory_order_relaxed);
}
auto idx = getIdx(buckets, h);
auto idx = getIdx(bcount, h);
auto head = &buckets->buckets_[idx];
auto node = head->load(std::memory_order_relaxed);
auto headnode = node;
......@@ -386,7 +400,7 @@ class alignas(64) ConcurrentHashMapSegment {
while (node) {
// Is the key found?
if (KeyEqual()(k, node->getItem().first)) {
it.setNode(node, buckets, idx);
it.setNode(node, buckets, bcount, idx);
haznode.reset(node);
if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) {
......@@ -427,12 +441,13 @@ class alignas(64) ConcurrentHashMapSegment {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(buckets->bucket_count_ << 1);
rehash(bcount << 1);
// Reload correct bucket.
buckets = buckets_.load(std::memory_order_relaxed);
bcount <<= 1;
hazbuckets.reset(buckets);
idx = getIdx(buckets, h);
idx = getIdx(bcount, h);
head = &buckets->buckets_[idx];
headnode = head->load(std::memory_order_relaxed);
}
......@@ -448,26 +463,26 @@ class alignas(64) ConcurrentHashMapSegment {
}
cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release);
it.setNode(cur, buckets, idx);
it.setNode(cur, buckets, bcount, idx);
return true;
}
// Must hold lock.
void rehash(size_t bucket_count) {
auto buckets = buckets_.load(std::memory_order_relaxed);
auto newbuckets = (Buckets*)Allocator().allocate(sizeof(Buckets));
new (newbuckets) Buckets(bucket_count);
auto newbuckets = Buckets::create(bucket_count);
load_factor_nodes_ = bucket_count * load_factor_;
for (size_t i = 0; i < buckets->bucket_count_; i++) {
auto oldcount = bucket_count_.load(std::memory_order_relaxed);
for (size_t i = 0; i < oldcount; i++) {
auto bucket = &buckets->buckets_[i];
auto node = bucket->load(std::memory_order_relaxed);
if (!node) {
continue;
}
auto h = HashFn()(node->getItem().first);
auto idx = getIdx(newbuckets, h);
auto idx = getIdx(bucket_count, h);
// Reuse as long a chain as possible from the end. Since the
// nodes don't have previous pointers, the longest last chain
// will be the same for both the previous hashmap and the new one,
......@@ -478,7 +493,7 @@ class alignas(64) ConcurrentHashMapSegment {
auto last = node->next_.load(std::memory_order_relaxed);
for (; last != nullptr;
last = last->next_.load(std::memory_order_relaxed)) {
auto k = getIdx(newbuckets, HashFn()(last->getItem().first));
auto k = getIdx(bucket_count, HashFn()(last->getItem().first));
if (k != lastidx) {
lastidx = k;
lastrun = last;
......@@ -494,7 +509,7 @@ class alignas(64) ConcurrentHashMapSegment {
node = node->next_.load(std::memory_order_relaxed)) {
auto newnode = (Node*)Allocator().allocate(sizeof(Node));
new (newnode) Node(node);
auto k = getIdx(newbuckets, HashFn()(node->getItem().first));
auto k = getIdx(bucket_count, HashFn()(node->getItem().first));
auto prevhead = &newbuckets->buckets_[k];
newnode->next_.store(prevhead->load(std::memory_order_relaxed));
prevhead->store(newnode, std::memory_order_relaxed);
......@@ -502,10 +517,13 @@ class alignas(64) ConcurrentHashMapSegment {
}
auto oldbuckets = buckets_.load(std::memory_order_relaxed);
seqlock_.fetch_add(1, std::memory_order_release);
bucket_count_.store(bucket_count, std::memory_order_release);
buckets_.store(newbuckets, std::memory_order_release);
seqlock_.fetch_add(1, std::memory_order_release);
oldbuckets->retire(
folly::hazptr::default_hazptr_domain(),
concurrenthashmap::HazptrDeleter<Allocator>());
concurrenthashmap::HazptrBucketDeleter<Allocator>(oldcount));
}
bool find(Iterator& res, const KeyType& k) {
......@@ -513,15 +531,18 @@ class alignas(64) ConcurrentHashMapSegment {
folly::hazptr::hazptr_local<1> hlocal;
auto haznext = &hlocal[0];
auto h = HashFn()(k);
auto buckets = res.hazptrs_[0].get_protected(buckets_);
auto idx = getIdx(buckets, h);
size_t bcount;
Buckets* buckets;
getBucketsAndCount(bcount, buckets, res.hazptrs_[0]);
auto idx = getIdx(bcount, h);
auto prev = &buckets->buckets_[idx];
auto node = hazcurr->get_protected(*prev);
while (node) {
if (KeyEqual()(k, node->getItem().first)) {
// We may be using hlocal, make sure we are using hazptrs_
res.hazptrs_[1].reset(node);
res.setNode(node, buckets, idx);
res.setNode(node, buckets, bcount, idx);
return true;
}
node = haznext[0].get_protected(node->next_);
......@@ -541,8 +562,9 @@ class alignas(64) ConcurrentHashMapSegment {
{
std::lock_guard<Mutex> g(m_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto buckets = buckets_.load(std::memory_order_relaxed);
auto idx = getIdx(buckets, h);
auto idx = getIdx(bcount, h);
auto head = &buckets->buckets_[idx];
node = head->load(std::memory_order_relaxed);
Node* prev = nullptr;
......@@ -562,7 +584,10 @@ class alignas(64) ConcurrentHashMapSegment {
if (iter) {
iter->hazptrs_[0].reset(buckets);
iter->setNode(
node->next_.load(std::memory_order_acquire), buckets, idx);
node->next_.load(std::memory_order_acquire),
buckets,
bcount,
idx);
iter->next();
}
size_--;
......@@ -594,30 +619,33 @@ class alignas(64) ConcurrentHashMapSegment {
}
void clear() {
auto buckets = buckets_.load(std::memory_order_relaxed);
auto newbuckets = (Buckets*)Allocator().allocate(sizeof(Buckets));
new (newbuckets) Buckets(buckets->bucket_count_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
Buckets* buckets;
auto newbuckets = Buckets::create(bcount);
{
std::lock_guard<Mutex> g(m_);
buckets = buckets_.load(std::memory_order_relaxed);
buckets_.store(newbuckets, std::memory_order_release);
size_ = 0;
}
buckets->retire(
folly::hazptr::default_hazptr_domain(),
concurrenthashmap::HazptrDeleter<Allocator>());
concurrenthashmap::HazptrBucketDeleter<Allocator>(bcount));
}
void max_load_factor(float factor) {
std::lock_guard<Mutex> g(m_);
load_factor_ = factor;
auto buckets = buckets_.load(std::memory_order_relaxed);
load_factor_nodes_ = buckets->bucket_count_ * load_factor_;
load_factor_nodes_ =
bucket_count_.load(std::memory_order_relaxed) * load_factor_;
}
Iterator cbegin() {
Iterator res;
auto buckets = res.hazptrs_[0].get_protected(buckets_);
res.setNode(nullptr, buckets, 0);
size_t bcount;
Buckets* buckets;
getBucketsAndCount(bcount, buckets, res.hazptrs_[0]);
res.setNode(nullptr, buckets, bcount, 0);
res.next();
return res;
}
......@@ -630,29 +658,37 @@ class alignas(64) ConcurrentHashMapSegment {
// allocating buckets_ at the same time.
class Buckets : public folly::hazptr::hazptr_obj_base<
Buckets,
concurrenthashmap::HazptrDeleter<Allocator>> {
concurrenthashmap::HazptrBucketDeleter<Allocator>> {
Buckets() {}
~Buckets() {}
public:
explicit Buckets(size_t count) : bucket_count_(count) {
buckets_ =
(Atom<Node*>*)Allocator().allocate(sizeof(Atom<Node*>) * count);
new (buckets_) Atom<Node*>[ count ];
static Buckets* create(size_t count) {
auto buf =
Allocator().allocate(sizeof(Buckets) + sizeof(Atom<Node*>) * count);
auto buckets = new (buf) Buckets();
for (size_t i = 0; i < count; i++) {
buckets_[i].store(nullptr, std::memory_order_relaxed);
auto bucket = new (&buckets->buckets_[i]) Atom<Node*>();
bucket->store(nullptr, std::memory_order_relaxed);
}
return buckets;
}
~Buckets() {
for (size_t i = 0; i < bucket_count_; i++) {
void destroy(size_t count) {
for (size_t i = 0; i < count; i++) {
auto elem = buckets_[i].load(std::memory_order_relaxed);
if (elem) {
elem->release();
}
typedef Atom<Node*> Element;
buckets_[i].~Element();
}
this->~Buckets();
Allocator().deallocate(
(uint8_t*)buckets_, sizeof(Atom<Node*>) * bucket_count_);
(uint8_t*)this, sizeof(Atom<Node*>) * count + sizeof(*this));
}
size_t bucket_count_;
Atom<Node*>* buckets_{nullptr};
Atom<Node*> buckets_[0];
};
public:
......@@ -662,10 +698,12 @@ class alignas(64) ConcurrentHashMapSegment {
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {}
FOLLY_ALWAYS_INLINE ~Iterator() {}
void setNode(Node* node, Buckets* buckets, uint64_t idx) {
void
setNode(Node* node, Buckets* buckets, size_t bucket_count, uint64_t idx) {
node_ = node;
buckets_ = buckets;
idx_ = idx;
bucket_count_ = bucket_count;
}
const value_type& operator*() const {
......@@ -690,11 +728,10 @@ class alignas(64) ConcurrentHashMapSegment {
void next() {
while (!node_) {
if (idx_ >= buckets_->bucket_count_) {
if (idx_ >= bucket_count_) {
break;
}
DCHECK(buckets_);
DCHECK(buckets_->buckets_);
node_ = hazptrs_[1].get_protected(buckets_->buckets_[idx_]);
if (node_) {
break;
......@@ -723,6 +760,7 @@ class alignas(64) ConcurrentHashMapSegment {
idx_ = o.idx_;
buckets_ = o.buckets_;
hazptrs_[0].reset(buckets_);
bucket_count_ = o.bucket_count_;
return *this;
}
......@@ -732,6 +770,7 @@ class alignas(64) ConcurrentHashMapSegment {
idx_ = o.idx_;
buckets_ = o.buckets_;
hazptrs_[0].reset(buckets_);
bucket_count_ = o.bucket_count_;
}
/* implicit */ Iterator(Iterator&& o) noexcept
......@@ -739,6 +778,7 @@ class alignas(64) ConcurrentHashMapSegment {
node_ = o.node_;
buckets_ = o.buckets_;
idx_ = o.idx_;
bucket_count_ = o.bucket_count_;
}
// These are accessed directly from the functions above
......@@ -747,22 +787,42 @@ class alignas(64) ConcurrentHashMapSegment {
private:
Node* node_{nullptr};
Buckets* buckets_{nullptr};
uint64_t idx_;
size_t bucket_count_{0};
uint64_t idx_{0};
};
private:
// Shards have already used low ShardBits of the hash.
// Shift it over to use fresh bits.
uint64_t getIdx(Buckets* buckets, size_t hash) {
return (hash >> ShardBits) & (buckets->bucket_count_ - 1);
uint64_t getIdx(size_t bucket_count, size_t hash) {
return (hash >> ShardBits) & (bucket_count - 1);
}
void getBucketsAndCount(
size_t& bcount,
Buckets*& buckets,
folly::hazptr::hazptr_holder& hazptr) {
while (true) {
auto seqlock = seqlock_.load(std::memory_order_acquire);
bcount = bucket_count_.load(std::memory_order_acquire);
buckets = hazptr.get_protected(buckets_);
auto seqlock2 = seqlock_.load(std::memory_order_acquire);
if (!(seqlock & 1) && (seqlock == seqlock2)) {
break;
}
}
DCHECK(buckets);
}
Mutex m_;
float load_factor_;
size_t load_factor_nodes_;
size_t size_{0};
size_t const max_size_;
Atom<Buckets*> buckets_{nullptr};
Mutex m_;
// Fields needed for read-only access, on separate cacheline.
alignas(64) Atom<Buckets*> buckets_{nullptr};
std::atomic<uint64_t> seqlock_{0};
Atom<size_t> bucket_count_;
};
} // namespace detail
} // namespace folly
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment