Commit aeac6f59 authored by Dave Watson's avatar Dave Watson Committed by Facebook Github Bot

Add a buckets + size seqlock

Summary:
There is currently an unnecessary data dependency loading the
current backing buckets + size, reading the size, then again indexing
in to the array.

Rework the Buckets struct to make a single allocation, and store the bucket_count_ in the Segment directly.  The bucket pointer and segment are protected by a seqlock.

Reviewed By: ot

Differential Revision: D7167919

fbshipit-source-id: 10ddceffad19f54c790b3ab7a87260445571f81d
parent b6a27e14
...@@ -36,6 +36,19 @@ class HazptrDeleter { ...@@ -36,6 +36,19 @@ class HazptrDeleter {
} }
}; };
template <typename Allocator>
class HazptrBucketDeleter {
size_t count_;
public:
HazptrBucketDeleter(size_t count) : count_(count) {}
HazptrBucketDeleter() = default;
template <typename Bucket>
void operator()(Bucket* bucket) {
bucket->destroy(count_);
}
};
template < template <
typename KeyType, typename KeyType,
typename ValueType, typename ValueType,
...@@ -221,23 +234,22 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -221,23 +234,22 @@ class alignas(64) ConcurrentHashMapSegment {
float load_factor, float load_factor,
size_t max_size) size_t max_size)
: load_factor_(load_factor), max_size_(max_size) { : load_factor_(load_factor), max_size_(max_size) {
auto buckets = (Buckets*)Allocator().allocate(sizeof(Buckets));
initial_buckets = folly::nextPowTwo(initial_buckets); initial_buckets = folly::nextPowTwo(initial_buckets);
DCHECK( DCHECK(
max_size_ == 0 || max_size_ == 0 ||
(isPowTwo(max_size_) && (isPowTwo(max_size_) &&
(folly::popcount(max_size_ - 1) + ShardBits <= 32))); (folly::popcount(max_size_ - 1) + ShardBits <= 32)));
new (buckets) Buckets(initial_buckets); auto buckets = Buckets::create(initial_buckets);
buckets_.store(buckets, std::memory_order_release); buckets_.store(buckets, std::memory_order_release);
load_factor_nodes_ = initial_buckets * load_factor_; load_factor_nodes_ = initial_buckets * load_factor_;
bucket_count_.store(initial_buckets, std::memory_order_relaxed);
} }
~ConcurrentHashMapSegment() { ~ConcurrentHashMapSegment() {
auto buckets = buckets_.load(std::memory_order_relaxed); auto buckets = buckets_.load(std::memory_order_relaxed);
// We can delete and not retire() here, since users must have // We can delete and not retire() here, since users must have
// their own synchronization around destruction. // their own synchronization around destruction.
buckets->~Buckets(); buckets->destroy(bucket_count_.load(std::memory_order_relaxed));
Allocator().deallocate((uint8_t*)buckets, sizeof(Buckets));
} }
size_t size() { size_t size() {
...@@ -364,6 +376,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -364,6 +376,7 @@ class alignas(64) ConcurrentHashMapSegment {
auto h = HashFn()(k); auto h = HashFn()(k);
std::unique_lock<Mutex> g(m_); std::unique_lock<Mutex> g(m_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto buckets = buckets_.load(std::memory_order_relaxed); auto buckets = buckets_.load(std::memory_order_relaxed);
// Check for rehash needed for DOES_NOT_EXIST // Check for rehash needed for DOES_NOT_EXIST
if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) { if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) {
...@@ -371,11 +384,12 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -371,11 +384,12 @@ class alignas(64) ConcurrentHashMapSegment {
// Would exceed max size. // Would exceed max size.
throw std::bad_alloc(); throw std::bad_alloc();
} }
rehash(buckets->bucket_count_ << 1); rehash(bcount << 1);
buckets = buckets_.load(std::memory_order_relaxed); buckets = buckets_.load(std::memory_order_relaxed);
bcount = bucket_count_.load(std::memory_order_relaxed);
} }
auto idx = getIdx(buckets, h); auto idx = getIdx(bcount, h);
auto head = &buckets->buckets_[idx]; auto head = &buckets->buckets_[idx];
auto node = head->load(std::memory_order_relaxed); auto node = head->load(std::memory_order_relaxed);
auto headnode = node; auto headnode = node;
...@@ -386,7 +400,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -386,7 +400,7 @@ class alignas(64) ConcurrentHashMapSegment {
while (node) { while (node) {
// Is the key found? // Is the key found?
if (KeyEqual()(k, node->getItem().first)) { if (KeyEqual()(k, node->getItem().first)) {
it.setNode(node, buckets, idx); it.setNode(node, buckets, bcount, idx);
haznode.reset(node); haznode.reset(node);
if (type == InsertType::MATCH) { if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) { if (!match(node->getItem().second)) {
...@@ -427,12 +441,13 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -427,12 +441,13 @@ class alignas(64) ConcurrentHashMapSegment {
// Would exceed max size. // Would exceed max size.
throw std::bad_alloc(); throw std::bad_alloc();
} }
rehash(buckets->bucket_count_ << 1); rehash(bcount << 1);
// Reload correct bucket. // Reload correct bucket.
buckets = buckets_.load(std::memory_order_relaxed); buckets = buckets_.load(std::memory_order_relaxed);
bcount <<= 1;
hazbuckets.reset(buckets); hazbuckets.reset(buckets);
idx = getIdx(buckets, h); idx = getIdx(bcount, h);
head = &buckets->buckets_[idx]; head = &buckets->buckets_[idx];
headnode = head->load(std::memory_order_relaxed); headnode = head->load(std::memory_order_relaxed);
} }
...@@ -448,26 +463,26 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -448,26 +463,26 @@ class alignas(64) ConcurrentHashMapSegment {
} }
cur->next_.store(headnode, std::memory_order_relaxed); cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release); head->store(cur, std::memory_order_release);
it.setNode(cur, buckets, idx); it.setNode(cur, buckets, bcount, idx);
return true; return true;
} }
// Must hold lock. // Must hold lock.
void rehash(size_t bucket_count) { void rehash(size_t bucket_count) {
auto buckets = buckets_.load(std::memory_order_relaxed); auto buckets = buckets_.load(std::memory_order_relaxed);
auto newbuckets = (Buckets*)Allocator().allocate(sizeof(Buckets)); auto newbuckets = Buckets::create(bucket_count);
new (newbuckets) Buckets(bucket_count);
load_factor_nodes_ = bucket_count * load_factor_; load_factor_nodes_ = bucket_count * load_factor_;
for (size_t i = 0; i < buckets->bucket_count_; i++) { auto oldcount = bucket_count_.load(std::memory_order_relaxed);
for (size_t i = 0; i < oldcount; i++) {
auto bucket = &buckets->buckets_[i]; auto bucket = &buckets->buckets_[i];
auto node = bucket->load(std::memory_order_relaxed); auto node = bucket->load(std::memory_order_relaxed);
if (!node) { if (!node) {
continue; continue;
} }
auto h = HashFn()(node->getItem().first); auto h = HashFn()(node->getItem().first);
auto idx = getIdx(newbuckets, h); auto idx = getIdx(bucket_count, h);
// Reuse as long a chain as possible from the end. Since the // Reuse as long a chain as possible from the end. Since the
// nodes don't have previous pointers, the longest last chain // nodes don't have previous pointers, the longest last chain
// will be the same for both the previous hashmap and the new one, // will be the same for both the previous hashmap and the new one,
...@@ -478,7 +493,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -478,7 +493,7 @@ class alignas(64) ConcurrentHashMapSegment {
auto last = node->next_.load(std::memory_order_relaxed); auto last = node->next_.load(std::memory_order_relaxed);
for (; last != nullptr; for (; last != nullptr;
last = last->next_.load(std::memory_order_relaxed)) { last = last->next_.load(std::memory_order_relaxed)) {
auto k = getIdx(newbuckets, HashFn()(last->getItem().first)); auto k = getIdx(bucket_count, HashFn()(last->getItem().first));
if (k != lastidx) { if (k != lastidx) {
lastidx = k; lastidx = k;
lastrun = last; lastrun = last;
...@@ -494,7 +509,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -494,7 +509,7 @@ class alignas(64) ConcurrentHashMapSegment {
node = node->next_.load(std::memory_order_relaxed)) { node = node->next_.load(std::memory_order_relaxed)) {
auto newnode = (Node*)Allocator().allocate(sizeof(Node)); auto newnode = (Node*)Allocator().allocate(sizeof(Node));
new (newnode) Node(node); new (newnode) Node(node);
auto k = getIdx(newbuckets, HashFn()(node->getItem().first)); auto k = getIdx(bucket_count, HashFn()(node->getItem().first));
auto prevhead = &newbuckets->buckets_[k]; auto prevhead = &newbuckets->buckets_[k];
newnode->next_.store(prevhead->load(std::memory_order_relaxed)); newnode->next_.store(prevhead->load(std::memory_order_relaxed));
prevhead->store(newnode, std::memory_order_relaxed); prevhead->store(newnode, std::memory_order_relaxed);
...@@ -502,10 +517,13 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -502,10 +517,13 @@ class alignas(64) ConcurrentHashMapSegment {
} }
auto oldbuckets = buckets_.load(std::memory_order_relaxed); auto oldbuckets = buckets_.load(std::memory_order_relaxed);
seqlock_.fetch_add(1, std::memory_order_release);
bucket_count_.store(bucket_count, std::memory_order_release);
buckets_.store(newbuckets, std::memory_order_release); buckets_.store(newbuckets, std::memory_order_release);
seqlock_.fetch_add(1, std::memory_order_release);
oldbuckets->retire( oldbuckets->retire(
folly::hazptr::default_hazptr_domain(), folly::hazptr::default_hazptr_domain(),
concurrenthashmap::HazptrDeleter<Allocator>()); concurrenthashmap::HazptrBucketDeleter<Allocator>(oldcount));
} }
bool find(Iterator& res, const KeyType& k) { bool find(Iterator& res, const KeyType& k) {
...@@ -513,15 +531,18 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -513,15 +531,18 @@ class alignas(64) ConcurrentHashMapSegment {
folly::hazptr::hazptr_local<1> hlocal; folly::hazptr::hazptr_local<1> hlocal;
auto haznext = &hlocal[0]; auto haznext = &hlocal[0];
auto h = HashFn()(k); auto h = HashFn()(k);
auto buckets = res.hazptrs_[0].get_protected(buckets_); size_t bcount;
auto idx = getIdx(buckets, h); Buckets* buckets;
getBucketsAndCount(bcount, buckets, res.hazptrs_[0]);
auto idx = getIdx(bcount, h);
auto prev = &buckets->buckets_[idx]; auto prev = &buckets->buckets_[idx];
auto node = hazcurr->get_protected(*prev); auto node = hazcurr->get_protected(*prev);
while (node) { while (node) {
if (KeyEqual()(k, node->getItem().first)) { if (KeyEqual()(k, node->getItem().first)) {
// We may be using hlocal, make sure we are using hazptrs_ // We may be using hlocal, make sure we are using hazptrs_
res.hazptrs_[1].reset(node); res.hazptrs_[1].reset(node);
res.setNode(node, buckets, idx); res.setNode(node, buckets, bcount, idx);
return true; return true;
} }
node = haznext[0].get_protected(node->next_); node = haznext[0].get_protected(node->next_);
...@@ -541,8 +562,9 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -541,8 +562,9 @@ class alignas(64) ConcurrentHashMapSegment {
{ {
std::lock_guard<Mutex> g(m_); std::lock_guard<Mutex> g(m_);
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto buckets = buckets_.load(std::memory_order_relaxed); auto buckets = buckets_.load(std::memory_order_relaxed);
auto idx = getIdx(buckets, h); auto idx = getIdx(bcount, h);
auto head = &buckets->buckets_[idx]; auto head = &buckets->buckets_[idx];
node = head->load(std::memory_order_relaxed); node = head->load(std::memory_order_relaxed);
Node* prev = nullptr; Node* prev = nullptr;
...@@ -562,7 +584,10 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -562,7 +584,10 @@ class alignas(64) ConcurrentHashMapSegment {
if (iter) { if (iter) {
iter->hazptrs_[0].reset(buckets); iter->hazptrs_[0].reset(buckets);
iter->setNode( iter->setNode(
node->next_.load(std::memory_order_acquire), buckets, idx); node->next_.load(std::memory_order_acquire),
buckets,
bcount,
idx);
iter->next(); iter->next();
} }
size_--; size_--;
...@@ -594,30 +619,33 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -594,30 +619,33 @@ class alignas(64) ConcurrentHashMapSegment {
} }
void clear() { void clear() {
auto buckets = buckets_.load(std::memory_order_relaxed); size_t bcount = bucket_count_.load(std::memory_order_relaxed);
auto newbuckets = (Buckets*)Allocator().allocate(sizeof(Buckets)); Buckets* buckets;
new (newbuckets) Buckets(buckets->bucket_count_); auto newbuckets = Buckets::create(bcount);
{ {
std::lock_guard<Mutex> g(m_); std::lock_guard<Mutex> g(m_);
buckets = buckets_.load(std::memory_order_relaxed);
buckets_.store(newbuckets, std::memory_order_release); buckets_.store(newbuckets, std::memory_order_release);
size_ = 0; size_ = 0;
} }
buckets->retire( buckets->retire(
folly::hazptr::default_hazptr_domain(), folly::hazptr::default_hazptr_domain(),
concurrenthashmap::HazptrDeleter<Allocator>()); concurrenthashmap::HazptrBucketDeleter<Allocator>(bcount));
} }
void max_load_factor(float factor) { void max_load_factor(float factor) {
std::lock_guard<Mutex> g(m_); std::lock_guard<Mutex> g(m_);
load_factor_ = factor; load_factor_ = factor;
auto buckets = buckets_.load(std::memory_order_relaxed); load_factor_nodes_ =
load_factor_nodes_ = buckets->bucket_count_ * load_factor_; bucket_count_.load(std::memory_order_relaxed) * load_factor_;
} }
Iterator cbegin() { Iterator cbegin() {
Iterator res; Iterator res;
auto buckets = res.hazptrs_[0].get_protected(buckets_); size_t bcount;
res.setNode(nullptr, buckets, 0); Buckets* buckets;
getBucketsAndCount(bcount, buckets, res.hazptrs_[0]);
res.setNode(nullptr, buckets, bcount, 0);
res.next(); res.next();
return res; return res;
} }
...@@ -630,29 +658,37 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -630,29 +658,37 @@ class alignas(64) ConcurrentHashMapSegment {
// allocating buckets_ at the same time. // allocating buckets_ at the same time.
class Buckets : public folly::hazptr::hazptr_obj_base< class Buckets : public folly::hazptr::hazptr_obj_base<
Buckets, Buckets,
concurrenthashmap::HazptrDeleter<Allocator>> { concurrenthashmap::HazptrBucketDeleter<Allocator>> {
Buckets() {}
~Buckets() {}
public: public:
explicit Buckets(size_t count) : bucket_count_(count) { static Buckets* create(size_t count) {
buckets_ = auto buf =
(Atom<Node*>*)Allocator().allocate(sizeof(Atom<Node*>) * count); Allocator().allocate(sizeof(Buckets) + sizeof(Atom<Node*>) * count);
new (buckets_) Atom<Node*>[ count ]; auto buckets = new (buf) Buckets();
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
buckets_[i].store(nullptr, std::memory_order_relaxed); auto bucket = new (&buckets->buckets_[i]) Atom<Node*>();
bucket->store(nullptr, std::memory_order_relaxed);
} }
return buckets;
} }
~Buckets() {
for (size_t i = 0; i < bucket_count_; i++) { void destroy(size_t count) {
for (size_t i = 0; i < count; i++) {
auto elem = buckets_[i].load(std::memory_order_relaxed); auto elem = buckets_[i].load(std::memory_order_relaxed);
if (elem) { if (elem) {
elem->release(); elem->release();
} }
typedef Atom<Node*> Element;
buckets_[i].~Element();
} }
this->~Buckets();
Allocator().deallocate( Allocator().deallocate(
(uint8_t*)buckets_, sizeof(Atom<Node*>) * bucket_count_); (uint8_t*)this, sizeof(Atom<Node*>) * count + sizeof(*this));
} }
size_t bucket_count_; Atom<Node*> buckets_[0];
Atom<Node*>* buckets_{nullptr};
}; };
public: public:
...@@ -662,10 +698,12 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -662,10 +698,12 @@ class alignas(64) ConcurrentHashMapSegment {
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {} FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {}
FOLLY_ALWAYS_INLINE ~Iterator() {} FOLLY_ALWAYS_INLINE ~Iterator() {}
void setNode(Node* node, Buckets* buckets, uint64_t idx) { void
setNode(Node* node, Buckets* buckets, size_t bucket_count, uint64_t idx) {
node_ = node; node_ = node;
buckets_ = buckets; buckets_ = buckets;
idx_ = idx; idx_ = idx;
bucket_count_ = bucket_count;
} }
const value_type& operator*() const { const value_type& operator*() const {
...@@ -690,11 +728,10 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -690,11 +728,10 @@ class alignas(64) ConcurrentHashMapSegment {
void next() { void next() {
while (!node_) { while (!node_) {
if (idx_ >= buckets_->bucket_count_) { if (idx_ >= bucket_count_) {
break; break;
} }
DCHECK(buckets_); DCHECK(buckets_);
DCHECK(buckets_->buckets_);
node_ = hazptrs_[1].get_protected(buckets_->buckets_[idx_]); node_ = hazptrs_[1].get_protected(buckets_->buckets_[idx_]);
if (node_) { if (node_) {
break; break;
...@@ -723,6 +760,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -723,6 +760,7 @@ class alignas(64) ConcurrentHashMapSegment {
idx_ = o.idx_; idx_ = o.idx_;
buckets_ = o.buckets_; buckets_ = o.buckets_;
hazptrs_[0].reset(buckets_); hazptrs_[0].reset(buckets_);
bucket_count_ = o.bucket_count_;
return *this; return *this;
} }
...@@ -732,6 +770,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -732,6 +770,7 @@ class alignas(64) ConcurrentHashMapSegment {
idx_ = o.idx_; idx_ = o.idx_;
buckets_ = o.buckets_; buckets_ = o.buckets_;
hazptrs_[0].reset(buckets_); hazptrs_[0].reset(buckets_);
bucket_count_ = o.bucket_count_;
} }
/* implicit */ Iterator(Iterator&& o) noexcept /* implicit */ Iterator(Iterator&& o) noexcept
...@@ -739,6 +778,7 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -739,6 +778,7 @@ class alignas(64) ConcurrentHashMapSegment {
node_ = o.node_; node_ = o.node_;
buckets_ = o.buckets_; buckets_ = o.buckets_;
idx_ = o.idx_; idx_ = o.idx_;
bucket_count_ = o.bucket_count_;
} }
// These are accessed directly from the functions above // These are accessed directly from the functions above
...@@ -747,22 +787,42 @@ class alignas(64) ConcurrentHashMapSegment { ...@@ -747,22 +787,42 @@ class alignas(64) ConcurrentHashMapSegment {
private: private:
Node* node_{nullptr}; Node* node_{nullptr};
Buckets* buckets_{nullptr}; Buckets* buckets_{nullptr};
uint64_t idx_; size_t bucket_count_{0};
uint64_t idx_{0};
}; };
private: private:
// Shards have already used low ShardBits of the hash. // Shards have already used low ShardBits of the hash.
// Shift it over to use fresh bits. // Shift it over to use fresh bits.
uint64_t getIdx(Buckets* buckets, size_t hash) { uint64_t getIdx(size_t bucket_count, size_t hash) {
return (hash >> ShardBits) & (buckets->bucket_count_ - 1); return (hash >> ShardBits) & (bucket_count - 1);
}
void getBucketsAndCount(
size_t& bcount,
Buckets*& buckets,
folly::hazptr::hazptr_holder& hazptr) {
while (true) {
auto seqlock = seqlock_.load(std::memory_order_acquire);
bcount = bucket_count_.load(std::memory_order_acquire);
buckets = hazptr.get_protected(buckets_);
auto seqlock2 = seqlock_.load(std::memory_order_acquire);
if (!(seqlock & 1) && (seqlock == seqlock2)) {
break;
}
}
DCHECK(buckets);
} }
Mutex m_;
float load_factor_; float load_factor_;
size_t load_factor_nodes_; size_t load_factor_nodes_;
size_t size_{0}; size_t size_{0};
size_t const max_size_; size_t const max_size_;
Atom<Buckets*> buckets_{nullptr};
Mutex m_; // Fields needed for read-only access, on separate cacheline.
alignas(64) Atom<Buckets*> buckets_{nullptr};
std::atomic<uint64_t> seqlock_{0};
Atom<size_t> bucket_count_;
}; };
} // namespace detail } // namespace detail
} // namespace folly } // namespace folly
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment