Commit 6100907a authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

ConcurrentHashMap: Use hazptr_obj_batch.

Summary: Use hazptr_batch and tagged objects for buckets and nodes. After this diff, higher-level users no longer need to call hazptr_cleanup. CHM destructor guarantees the destruction of all key and value objects that were in this instance of CHM.

Reviewed By: djwatson

Differential Revision: D10150474

fbshipit-source-id: 99530971d157ce4ca0bb0a983b3f7eb40666885e
parent d4b4982c
......@@ -134,6 +134,8 @@ class ConcurrentHashMap {
std::memory_order_relaxed);
o.segments_[i].store(nullptr, std::memory_order_relaxed);
}
batch_.store(o.batch(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed);
}
ConcurrentHashMap& operator=(ConcurrentHashMap&& o) {
......@@ -150,14 +152,12 @@ class ConcurrentHashMap {
}
size_ = o.size_;
max_size_ = o.max_size_;
batch_shutdown_cleanup();
batch_.store(o.batch(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed);
return *this;
}
/* Note that some objects stored in ConcurrentHashMap may outlive the
* ConcurrentHashMap's destructor, if you need immediate cleanup, call
* hazptr_cleanup(), which guarantees all objects are destructed after
* it completes.
*/
~ConcurrentHashMap() {
for (uint64_t i = 0; i < NumShards; i++) {
auto seg = segments_[i].load(std::memory_order_relaxed);
......@@ -166,6 +166,7 @@ class ConcurrentHashMap {
Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT));
}
}
batch_shutdown_cleanup();
}
bool empty() const noexcept {
......@@ -245,7 +246,7 @@ class ConcurrentHashMap {
std::pair<ConstIterator, bool> emplace(Args&&... args) {
using Node = typename SegmentT::Node;
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(std::forward<Args>(args)...);
new (node) Node(ensureBatch(), std::forward<Args>(args)...);
auto segment = pickSegment(node->getItem().first);
std::pair<ConstIterator, bool> res(
std::piecewise_construct,
......@@ -493,9 +494,10 @@ class ConcurrentHashMap {
SegmentT* ensureSegment(uint64_t i) const {
SegmentT* seg = segments_[i].load(std::memory_order_acquire);
if (!seg) {
auto b = ensureBatch();
SegmentT* newseg = (SegmentT*)Allocator().allocate(sizeof(SegmentT));
newseg = new (newseg)
SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits);
SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits, b);
if (!segments_[i].compare_exchange_strong(seg, newseg)) {
// seg is updated with new value, delete ours.
newseg->~SegmentT();
......@@ -507,9 +509,40 @@ class ConcurrentHashMap {
return seg;
}
hazptr_obj_batch<Atom>* batch() const noexcept {
return batch_.load(std::memory_order_acquire);
}
hazptr_obj_batch<Atom>* ensureBatch() const {
auto b = batch();
if (!b) {
auto storage = Allocator().allocate(sizeof(hazptr_obj_batch<Atom>));
auto newbatch = new (storage) hazptr_obj_batch<Atom>();
if (batch_.compare_exchange_strong(b, newbatch)) {
b = newbatch;
} else {
newbatch->shutdown_and_reclaim();
newbatch->~hazptr_obj_batch<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_batch<Atom>));
}
}
return b;
}
void batch_shutdown_cleanup() {
auto b = batch();
if (b) {
b->shutdown_and_reclaim();
hazptr_cleanup_batch_tag(b);
b->~hazptr_obj_batch<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_batch<Atom>));
}
}
mutable Atom<SegmentT*> segments_[NumShards];
size_t size_{0};
size_t max_size_{0};
mutable Atom<hazptr_obj_batch<Atom>*> batch_{nullptr};
};
} // namespace folly
......@@ -130,21 +130,18 @@ class NodeT : public hazptr_obj_base_linked<
public:
typedef std::pair<const KeyType, ValueType> value_type;
explicit NodeT(NodeT* other) : item_(other->item_) {
this->set_deleter( // defined in hazptr_obj
concurrenthashmap::HazptrDeleter<Allocator>());
this->acquire_link_safe(); // defined in hazptr_obj_base_linked
explicit NodeT(hazptr_obj_batch<Atom>* batch, NodeT* other)
: item_(other->item_) {
init(batch);
}
template <typename Arg, typename... Args>
NodeT(Arg&& k, Args&&... args)
NodeT(hazptr_obj_batch<Atom>* batch, Arg&& k, Args&&... args)
: item_(
std::piecewise_construct,
std::forward<Arg>(k),
std::forward<Args>(args)...) {
this->set_deleter( // defined in hazptr_obj
concurrenthashmap::HazptrDeleter<Allocator>());
this->acquire_link_safe(); // defined in hazptr_obj_base_linked
init(batch);
}
void release() {
......@@ -168,6 +165,14 @@ class NodeT : public hazptr_obj_base_linked<
Atom<NodeT*> next_{nullptr};
private:
void init(hazptr_obj_batch<Atom>* batch) {
DCHECK(batch);
this->set_deleter( // defined in hazptr_obj
concurrenthashmap::HazptrDeleter<Allocator>());
this->set_batch_tag(batch); // defined in hazptr_obj
this->acquire_link_safe(); // defined in hazptr_obj_base_linked
}
ValueHolder<KeyType, ValueType, Allocator> item_;
};
......@@ -227,14 +232,16 @@ class alignas(64) ConcurrentHashMapSegment {
ConcurrentHashMapSegment(
size_t initial_buckets,
float load_factor,
size_t max_size)
: load_factor_(load_factor), max_size_(max_size) {
size_t max_size,
hazptr_obj_batch<Atom>* batch)
: load_factor_(load_factor), max_size_(max_size), batch_(batch) {
DCHECK(batch);
initial_buckets = folly::nextPowTwo(initial_buckets);
DCHECK(
max_size_ == 0 ||
(isPowTwo(max_size_) &&
(folly::popcount(max_size_ - 1) + ShardBits <= 32)));
auto buckets = Buckets::create(initial_buckets);
auto buckets = Buckets::create(initial_buckets, batch);
buckets_.store(buckets, std::memory_order_release);
load_factor_nodes_ = initial_buckets * load_factor_;
bucket_count_.store(initial_buckets, std::memory_order_relaxed);
......@@ -264,7 +271,7 @@ class alignas(64) ConcurrentHashMapSegment {
template <typename Key, typename Value>
bool insert(Iterator& it, Key&& k, Value&& v) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(std::forward<Key>(k), std::forward<Value>(v));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -307,7 +314,7 @@ class alignas(64) ConcurrentHashMapSegment {
template <typename Key, typename Value>
bool insert_or_assign(Iterator& it, Key&& k, Value&& v) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(std::forward<Key>(k), std::forward<Value>(v));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -325,7 +332,7 @@ class alignas(64) ConcurrentHashMapSegment {
template <typename Key, typename Value>
bool assign(Iterator& it, Key&& k, Value&& v) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(std::forward<Key>(k), std::forward<Value>(v));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -347,7 +354,7 @@ class alignas(64) ConcurrentHashMapSegment {
const ValueType& expected,
Value&& desired) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(std::forward<Key>(k), std::forward<Value>(desired));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(desired));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -409,7 +416,7 @@ class alignas(64) ConcurrentHashMapSegment {
} else {
if (!cur) {
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(std::forward<Args>(args)...);
new (cur) Node(batch_, std::forward<Args>(args)...);
}
auto next = node->next_.load(std::memory_order_relaxed);
cur->next_.store(next, std::memory_order_relaxed);
......@@ -456,7 +463,7 @@ class alignas(64) ConcurrentHashMapSegment {
// OR DOES_NOT_EXIST, but only in the try_emplace case
DCHECK(type == InsertType::ANY || type == InsertType::DOES_NOT_EXIST);
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(std::forward<Args>(args)...);
new (cur) Node(batch_, std::forward<Args>(args)...);
}
cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release);
......@@ -467,7 +474,7 @@ class alignas(64) ConcurrentHashMapSegment {
// Must hold lock.
void rehash(size_t bucket_count) {
auto buckets = buckets_.load(std::memory_order_relaxed);
auto newbuckets = Buckets::create(bucket_count);
auto newbuckets = Buckets::create(bucket_count, batch_);
load_factor_nodes_ = bucket_count * load_factor_;
......@@ -505,7 +512,7 @@ class alignas(64) ConcurrentHashMapSegment {
for (; node != lastrun;
node = node->next_.load(std::memory_order_relaxed)) {
auto newnode = (Node*)Allocator().allocate(sizeof(Node));
new (newnode) Node(node);
new (newnode) Node(batch_, node);
auto k = getIdx(bucket_count, HashFn()(node->getItem().first));
auto prevhead = &newbuckets->buckets_[k]();
newnode->next_.store(prevhead->load(std::memory_order_relaxed));
......@@ -614,7 +621,7 @@ class alignas(64) ConcurrentHashMapSegment {
void clear() {
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
Buckets* buckets;
auto newbuckets = Buckets::create(bcount);
auto newbuckets = Buckets::create(bcount, batch_);
{
std::lock_guard<Mutex> g(m_);
buckets = buckets_.load(std::memory_order_relaxed);
......@@ -657,10 +664,11 @@ class alignas(64) ConcurrentHashMapSegment {
~Buckets() {}
public:
static Buckets* create(size_t count) {
static Buckets* create(size_t count, hazptr_obj_batch<Atom>* batch) {
auto buf =
Allocator().allocate(sizeof(Buckets) + sizeof(BucketRoot) * count);
auto buckets = new (buf) Buckets();
buckets->set_batch_tag(batch); // defined in hazptr_obj
for (size_t i = 0; i < count; i++) {
new (&buckets->buckets_[i]) BucketRoot;
}
......@@ -818,6 +826,7 @@ class alignas(64) ConcurrentHashMapSegment {
alignas(64) Atom<Buckets*> buckets_{nullptr};
std::atomic<uint64_t> seqlock_{0};
Atom<size_t> bucket_count_;
hazptr_obj_batch<Atom>* batch_;
};
} // namespace detail
} // namespace folly
......@@ -301,8 +301,8 @@ class shared_head_only_list {
Node* pop_all_lock() noexcept {
folly::detail::Sleeper s;
auto oldval = head();
while (true) {
auto oldval = head();
auto lockbit = oldval & kLockBit;
if (lockbit == kUnlocked) {
auto newval = reinterpret_cast<uintptr_t>(nullptr) + kLockBit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment