Commit 2c1539c1 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

hazptr: Rename hazptr_obj_batch to hazptr_obj_cohort

Summary: Rename hazptr obj batch to obj cohort, a more accurate description.

Reviewed By: davidtgoldblatt

Differential Revision: D19518771

fbshipit-source-id: f50a8a481f260dde4fce10fb9664d4f86c263b60
parent 8221b02b
......@@ -193,8 +193,8 @@ class ConcurrentHashMap {
std::memory_order_relaxed);
o.segments_[i].store(nullptr, std::memory_order_relaxed);
}
batch_.store(o.batch(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed);
cohort_.store(o.cohort(), std::memory_order_relaxed);
o.cohort_.store(nullptr, std::memory_order_relaxed);
}
ConcurrentHashMap& operator=(ConcurrentHashMap&& o) {
......@@ -211,9 +211,9 @@ class ConcurrentHashMap {
}
size_ = o.size_;
max_size_ = o.max_size_;
batch_shutdown_cleanup();
batch_.store(o.batch(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed);
cohort_shutdown_cleanup();
cohort_.store(o.cohort(), std::memory_order_relaxed);
o.cohort_.store(nullptr, std::memory_order_relaxed);
return *this;
}
......@@ -225,7 +225,7 @@ class ConcurrentHashMap {
Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT));
}
}
batch_shutdown_cleanup();
cohort_shutdown_cleanup();
}
bool empty() const noexcept {
......@@ -305,7 +305,7 @@ class ConcurrentHashMap {
std::pair<ConstIterator, bool> emplace(Args&&... args) {
using Node = typename SegmentT::Node;
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(ensureBatch(), std::forward<Args>(args)...);
new (node) Node(ensureCohort(), std::forward<Args>(args)...);
auto segment = pickSegment(node->getItem().first);
std::pair<ConstIterator, bool> res(
std::piecewise_construct,
......@@ -568,7 +568,7 @@ class ConcurrentHashMap {
SegmentT* ensureSegment(uint64_t i) const {
SegmentT* seg = segments_[i].load(std::memory_order_acquire);
if (!seg) {
auto b = ensureBatch();
auto b = ensureCohort();
SegmentT* newseg = (SegmentT*)Allocator().allocate(sizeof(SegmentT));
newseg = new (newseg)
SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits, b);
......@@ -583,37 +583,37 @@ class ConcurrentHashMap {
return seg;
}
hazptr_obj_batch<Atom>* batch() const noexcept {
return batch_.load(std::memory_order_acquire);
hazptr_obj_cohort<Atom>* cohort() const noexcept {
return cohort_.load(std::memory_order_acquire);
}
hazptr_obj_batch<Atom>* ensureBatch() const {
auto b = batch();
hazptr_obj_cohort<Atom>* ensureCohort() const {
auto b = cohort();
if (!b) {
auto storage = Allocator().allocate(sizeof(hazptr_obj_batch<Atom>));
auto newbatch = new (storage) hazptr_obj_batch<Atom>();
if (batch_.compare_exchange_strong(b, newbatch)) {
b = newbatch;
auto storage = Allocator().allocate(sizeof(hazptr_obj_cohort<Atom>));
auto newcohort = new (storage) hazptr_obj_cohort<Atom>();
if (cohort_.compare_exchange_strong(b, newcohort)) {
b = newcohort;
} else {
newbatch->~hazptr_obj_batch<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_batch<Atom>));
newcohort->~hazptr_obj_cohort<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_cohort<Atom>));
}
}
return b;
}
void batch_shutdown_cleanup() {
auto b = batch();
void cohort_shutdown_cleanup() {
auto b = cohort();
if (b) {
b->~hazptr_obj_batch<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_batch<Atom>));
b->~hazptr_obj_cohort<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_cohort<Atom>));
}
}
mutable Atom<SegmentT*> segments_[NumShards];
size_t size_{0};
size_t max_size_{0};
mutable Atom<hazptr_obj_batch<Atom>*> batch_{nullptr};
mutable Atom<hazptr_obj_cohort<Atom>*> cohort_{nullptr};
};
#if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
......
......@@ -237,9 +237,9 @@ class UnboundedQueue {
struct Consumer {
Atom<Segment*> head;
Atom<Ticket> ticket;
hazptr_obj_batch<Atom> batch;
hazptr_obj_cohort<Atom> cohort;
explicit Consumer(Segment* s) : head(s), ticket(0) {
s->set_batch_no_tag(&batch); // defined in hazptr_obj
s->set_cohort_no_tag(&cohort); // defined in hazptr_obj
}
};
struct Producer {
......@@ -559,7 +559,7 @@ class UnboundedQueue {
Segment* allocNextSegment(Segment* s) {
auto t = s->minTicket() + SegmentSize;
Segment* next = new Segment(t);
next->set_batch_no_tag(&c_.batch); // defined in hazptr_obj
next->set_cohort_no_tag(&c_.cohort); // defined in hazptr_obj
next->acquire_ref_safe(); // defined in hazptr_obj_base_linked
if (!s->casNextSegment(next)) {
delete next;
......
......@@ -148,18 +148,18 @@ class NodeT : public hazptr_obj_base_linked<
public:
typedef std::pair<const KeyType, ValueType> value_type;
explicit NodeT(hazptr_obj_batch<Atom>* batch, NodeT* other)
explicit NodeT(hazptr_obj_cohort<Atom>* cohort, NodeT* other)
: item_(other->item_) {
init(batch);
init(cohort);
}
template <typename Arg, typename... Args>
NodeT(hazptr_obj_batch<Atom>* batch, Arg&& k, Args&&... args)
NodeT(hazptr_obj_cohort<Atom>* cohort, Arg&& k, Args&&... args)
: item_(
std::piecewise_construct,
std::forward<Arg>(k),
std::forward<Args>(args)...) {
init(batch);
init(cohort);
}
void release() {
......@@ -183,11 +183,11 @@ class NodeT : public hazptr_obj_base_linked<
Atom<NodeT*> next_{nullptr};
private:
void init(hazptr_obj_batch<Atom>* batch) {
DCHECK(batch);
void init(hazptr_obj_cohort<Atom>* cohort) {
DCHECK(cohort);
this->set_deleter( // defined in hazptr_obj
concurrenthashmap::HazptrDeleter<Allocator>());
this->set_batch_tag(batch); // defined in hazptr_obj
this->set_cohort_tag(cohort); // defined in hazptr_obj
this->acquire_link_safe(); // defined in hazptr_obj_base_linked
}
......@@ -220,15 +220,15 @@ class alignas(64) BucketTable {
size_t initial_buckets,
float load_factor,
size_t max_size,
hazptr_obj_batch<Atom>* batch)
hazptr_obj_cohort<Atom>* cohort)
: load_factor_(load_factor), max_size_(max_size) {
DCHECK(batch);
DCHECK(cohort);
initial_buckets = folly::nextPowTwo(initial_buckets);
DCHECK(
max_size_ == 0 ||
(isPowTwo(max_size_) &&
(folly::popcount(max_size_ - 1) + ShardBits <= 32)));
auto buckets = Buckets::create(initial_buckets, batch);
auto buckets = Buckets::create(initial_buckets, cohort);
buckets_.store(buckets, std::memory_order_release);
load_factor_nodes_ = initial_buckets * load_factor_;
bucket_count_.store(initial_buckets, std::memory_order_relaxed);
......@@ -272,10 +272,10 @@ class alignas(64) BucketTable {
const KeyType& k,
InsertType type,
MatchFunc match,
hazptr_obj_batch<Atom>* batch,
hazptr_obj_cohort<Atom>* cohort,
Args&&... args) {
return doInsert(
it, k, type, match, nullptr, batch, std::forward<Args>(args)...);
it, k, type, match, nullptr, cohort, std::forward<Args>(args)...);
}
template <typename MatchFunc, typename... Args>
......@@ -285,14 +285,14 @@ class alignas(64) BucketTable {
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch) {
return doInsert(it, k, type, match, cur, batch, cur);
hazptr_obj_cohort<Atom>* cohort) {
return doInsert(it, k, type, match, cur, cohort, cur);
}
// Must hold lock.
void rehash(size_t bucket_count, hazptr_obj_batch<Atom>* batch) {
void rehash(size_t bucket_count, hazptr_obj_cohort<Atom>* cohort) {
auto buckets = buckets_.load(std::memory_order_relaxed);
auto newbuckets = Buckets::create(bucket_count, batch);
auto newbuckets = Buckets::create(bucket_count, cohort);
load_factor_nodes_ = bucket_count * load_factor_;
......@@ -330,7 +330,7 @@ class alignas(64) BucketTable {
for (; node != lastrun;
node = node->next_.load(std::memory_order_relaxed)) {
auto newnode = (Node*)Allocator().allocate(sizeof(Node));
new (newnode) Node(batch, node);
new (newnode) Node(cohort, node);
auto k = getIdx(bucket_count, HashFn()(node->getItem().first));
auto prevhead = &newbuckets->buckets_[k]();
newnode->next_.store(prevhead->load(std::memory_order_relaxed));
......@@ -422,10 +422,10 @@ class alignas(64) BucketTable {
return 0;
}
void clear(hazptr_obj_batch<Atom>* batch) {
void clear(hazptr_obj_cohort<Atom>* cohort) {
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
Buckets* buckets;
auto newbuckets = Buckets::create(bcount, batch);
auto newbuckets = Buckets::create(bcount, cohort);
{
std::lock_guard<Mutex> g(m_);
buckets = buckets_.load(std::memory_order_relaxed);
......@@ -469,12 +469,12 @@ class alignas(64) BucketTable {
~Buckets() {}
public:
static Buckets* create(size_t count, hazptr_obj_batch<Atom>* batch) {
static Buckets* create(size_t count, hazptr_obj_cohort<Atom>* cohort) {
auto buf =
Allocator().allocate(sizeof(Buckets) + sizeof(BucketRoot) * count);
auto buckets = new (buf) Buckets();
DCHECK(batch);
buckets->set_batch_tag(batch); // defined in hazptr_obj
DCHECK(cohort);
buckets->set_cohort_tag(cohort); // defined in hazptr_obj
for (size_t i = 0; i < count; i++) {
new (&buckets->buckets_[i]) BucketRoot;
}
......@@ -629,7 +629,7 @@ class alignas(64) BucketTable {
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch,
hazptr_obj_cohort<Atom>* cohort,
Args&&... args) {
auto h = HashFn()(k);
std::unique_lock<Mutex> g(m_);
......@@ -642,7 +642,7 @@ class alignas(64) BucketTable {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(bcount << 1, batch);
rehash(bcount << 1, cohort);
buckets = buckets_.load(std::memory_order_relaxed);
bcount = bucket_count_.load(std::memory_order_relaxed);
}
......@@ -670,7 +670,7 @@ class alignas(64) BucketTable {
} else {
if (!cur) {
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
new (cur) Node(cohort, std::forward<Args>(args)...);
}
auto next = node->next_.load(std::memory_order_relaxed);
cur->next_.store(next, std::memory_order_relaxed);
......@@ -699,7 +699,7 @@ class alignas(64) BucketTable {
// Would exceed max size.
throw std::bad_alloc();
}
rehash(bcount << 1, batch);
rehash(bcount << 1, cohort);
// Reload correct bucket.
buckets = buckets_.load(std::memory_order_relaxed);
......@@ -717,7 +717,7 @@ class alignas(64) BucketTable {
// OR DOES_NOT_EXIST, but only in the try_emplace case
DCHECK(type == InsertType::ANY || type == InsertType::DOES_NOT_EXIST);
cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
new (cur) Node(cohort, std::forward<Args>(args)...);
}
cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release);
......@@ -750,7 +750,7 @@ using folly::f14::detail::MaskType;
using folly::f14::detail::SparseMaskIter;
using folly::hazptr_obj_base;
using folly::hazptr_obj_batch;
using folly::hazptr_obj_cohort;
template <
typename KeyType,
......@@ -765,12 +765,12 @@ class NodeT : public hazptr_obj_base<
typedef std::pair<const KeyType, ValueType> value_type;
template <typename Arg, typename... Args>
NodeT(hazptr_obj_batch<Atom>* batch, Arg&& k, Args&&... args)
NodeT(hazptr_obj_cohort<Atom>* cohort, Arg&& k, Args&&... args)
: item_(
std::piecewise_construct,
std::forward_as_tuple(std::forward<Arg>(k)),
std::forward_as_tuple(std::forward<Args>(args)...)) {
init(batch);
init(cohort);
}
value_type& getItem() {
......@@ -778,11 +778,11 @@ class NodeT : public hazptr_obj_base<
}
private:
void init(hazptr_obj_batch<Atom>* batch) {
DCHECK(batch);
void init(hazptr_obj_cohort<Atom>* cohort) {
DCHECK(cohort);
this->set_deleter( // defined in hazptr_obj
HazptrDeleter<Allocator>());
this->set_batch_tag(batch); // defined in hazptr_obj
this->set_cohort_tag(cohort); // defined in hazptr_obj
}
value_type item_;
......@@ -962,11 +962,11 @@ class alignas(64) SIMDTable {
~Chunks() {}
public:
static Chunks* create(size_t count, hazptr_obj_batch<Atom>* batch) {
static Chunks* create(size_t count, hazptr_obj_cohort<Atom>* cohort) {
auto buf = Allocator().allocate(sizeof(Chunks) + sizeof(Chunk) * count);
auto chunks = new (buf) Chunks();
DCHECK(batch);
chunks->set_batch_tag(batch); // defined in hazptr_obj
DCHECK(cohort);
chunks->set_cohort_tag(cohort); // defined in hazptr_obj
for (size_t i = 0; i < count; i++) {
new (&chunks->chunks_[i]) Chunk;
chunks->chunks_[i].clear();
......@@ -1129,19 +1129,19 @@ class alignas(64) SIMDTable {
size_t initial_size,
float load_factor,
size_t max_size,
hazptr_obj_batch<Atom>* batch)
hazptr_obj_cohort<Atom>* cohort)
: load_factor_(load_factor),
max_size_(max_size),
chunks_(nullptr),
chunk_count_(0) {
DCHECK(batch);
DCHECK(cohort);
DCHECK(
max_size_ == 0 ||
(isPowTwo(max_size_) &&
(folly::popcount(max_size_ - 1) + ShardBits <= 32)));
DCHECK(load_factor_ > 0.0);
load_factor_ = std::min<float>(load_factor_, 1.0);
rehash(initial_size, batch);
rehash(initial_size, cohort);
}
~SIMDTable() {
......@@ -1182,7 +1182,7 @@ class alignas(64) SIMDTable {
const KeyType& k,
InsertType type,
MatchFunc match,
hazptr_obj_batch<Atom>* batch,
hazptr_obj_cohort<Atom>* cohort,
Args&&... args) {
Node* node;
Chunks* chunks;
......@@ -1198,7 +1198,7 @@ class alignas(64) SIMDTable {
k,
type,
match,
batch,
cohort,
chunk_idx,
tag_idx,
node,
......@@ -1209,7 +1209,7 @@ class alignas(64) SIMDTable {
}
auto cur = (Node*)Allocator().allocate(sizeof(Node));
new (cur) Node(batch, std::forward<Args>(args)...);
new (cur) Node(cohort, std::forward<Args>(args)...);
if (!node) {
std::tie(chunk_idx, tag_idx) =
......@@ -1236,7 +1236,7 @@ class alignas(64) SIMDTable {
InsertType type,
MatchFunc match,
Node* cur,
hazptr_obj_batch<Atom>* batch) {
hazptr_obj_cohort<Atom>* cohort) {
DCHECK(cur != nullptr);
Node* node;
Chunks* chunks;
......@@ -1252,7 +1252,7 @@ class alignas(64) SIMDTable {
k,
type,
match,
batch,
cohort,
chunk_idx,
tag_idx,
node,
......@@ -1280,9 +1280,9 @@ class alignas(64) SIMDTable {
return true;
}
void rehash(size_t size, hazptr_obj_batch<Atom>* batch) {
void rehash(size_t size, hazptr_obj_cohort<Atom>* cohort) {
size_t new_chunk_count = size == 0 ? 0 : (size - 1) / Chunk::kCapacity + 1;
rehash_internal(folly::nextPowTwo(new_chunk_count), batch);
rehash_internal(folly::nextPowTwo(new_chunk_count), cohort);
}
bool find(Iterator& res, const KeyType& k) {
......@@ -1373,10 +1373,10 @@ class alignas(64) SIMDTable {
return 1;
}
void clear(hazptr_obj_batch<Atom>* batch) {
void clear(hazptr_obj_cohort<Atom>* cohort) {
size_t ccount = chunk_count_.load(std::memory_order_relaxed);
Chunks* chunks;
auto newchunks = Chunks::create(ccount, batch);
auto newchunks = Chunks::create(ccount, cohort);
{
std::lock_guard<Mutex> g(m_);
chunks = chunks_.load(std::memory_order_relaxed);
......@@ -1461,7 +1461,7 @@ class alignas(64) SIMDTable {
const KeyType& k,
InsertType type,
MatchFunc match,
hazptr_obj_batch<Atom>* batch,
hazptr_obj_cohort<Atom>* cohort,
size_t& chunk_idx,
size_t& tag_idx,
Node*& node,
......@@ -1476,7 +1476,7 @@ class alignas(64) SIMDTable {
// Would exceed max size.
throw std::bad_alloc();
}
rehash_internal(ccount << 1, batch);
rehash_internal(ccount << 1, cohort);
ccount = chunk_count_.load(std::memory_order_relaxed);
chunks = chunks_.load(std::memory_order_relaxed);
}
......@@ -1505,7 +1505,7 @@ class alignas(64) SIMDTable {
// Would exceed max size.
throw std::bad_alloc();
}
rehash_internal(ccount << 1, batch);
rehash_internal(ccount << 1, cohort);
ccount = chunk_count_.load(std::memory_order_relaxed);
chunks = chunks_.load(std::memory_order_relaxed);
it.hazptrs_[0].reset(chunks);
......@@ -1514,13 +1514,15 @@ class alignas(64) SIMDTable {
return true;
}
void rehash_internal(size_t new_chunk_count, hazptr_obj_batch<Atom>* batch) {
void rehash_internal(
size_t new_chunk_count,
hazptr_obj_cohort<Atom>* cohort) {
DCHECK(isPowTwo(new_chunk_count));
auto old_chunk_count = chunk_count_.load(std::memory_order_relaxed);
if (old_chunk_count >= new_chunk_count) {
return;
}
auto new_chunks = Chunks::create(new_chunk_count, batch);
auto new_chunks = Chunks::create(new_chunk_count, cohort);
auto old_chunks = chunks_.load(std::memory_order_relaxed);
grow_threshold_ = new_chunk_count * Chunk::kCapacity * load_factor_;
......@@ -1670,9 +1672,9 @@ class alignas(64) ConcurrentHashMapSegment {
size_t initial_buckets,
float load_factor,
size_t max_size,
hazptr_obj_batch<Atom>* batch)
: impl_(initial_buckets, load_factor, max_size, batch), batch_(batch) {
DCHECK(batch);
hazptr_obj_cohort<Atom>* cohort)
: impl_(initial_buckets, load_factor, max_size, cohort), cohort_(cohort) {
DCHECK(cohort);
}
~ConcurrentHashMapSegment() = default;
......@@ -1692,7 +1694,7 @@ class alignas(64) ConcurrentHashMapSegment {
template <typename Key, typename Value>
bool insert(Iterator& it, Key&& k, Value&& v) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v));
new (node) Node(cohort_, std::forward<Key>(k), std::forward<Value>(v));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -1732,7 +1734,7 @@ class alignas(64) ConcurrentHashMapSegment {
template <typename Key, typename Value>
bool insert_or_assign(Iterator& it, Key&& k, Value&& v) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v));
new (node) Node(cohort_, std::forward<Key>(k), std::forward<Value>(v));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -1749,7 +1751,7 @@ class alignas(64) ConcurrentHashMapSegment {
template <typename Key, typename Value>
bool assign(Iterator& it, Key&& k, Value&& v) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v));
new (node) Node(cohort_, std::forward<Key>(k), std::forward<Value>(v));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -1770,7 +1772,8 @@ class alignas(64) ConcurrentHashMapSegment {
const ValueType& expected,
Value&& desired) {
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(desired));
new (node)
Node(cohort_, std::forward<Key>(k), std::forward<Value>(desired));
auto res = insert_internal(
it,
node->getItem().first,
......@@ -1792,7 +1795,7 @@ class alignas(64) ConcurrentHashMapSegment {
MatchFunc match,
Args&&... args) {
return impl_.insert(
it, k, type, match, batch_, std::forward<Args>(args)...);
it, k, type, match, cohort_, std::forward<Args>(args)...);
}
template <typename MatchFunc, typename... Args>
......@@ -1802,12 +1805,12 @@ class alignas(64) ConcurrentHashMapSegment {
InsertType type,
MatchFunc match,
Node* cur) {
return impl_.insert(it, k, type, match, cur, batch_);
return impl_.insert(it, k, type, match, cur, cohort_);
}
// Must hold lock.
void rehash(size_t bucket_count) {
impl_.rehash(bucket_count, batch_);
impl_.rehash(bucket_count, cohort_);
}
bool find(Iterator& res, const KeyType& k) {
......@@ -1844,7 +1847,7 @@ class alignas(64) ConcurrentHashMapSegment {
}
void clear() {
impl_.clear(batch_);
impl_.clear(cohort_);
}
void max_load_factor(float factor) {
......@@ -1861,7 +1864,7 @@ class alignas(64) ConcurrentHashMapSegment {
private:
ImplT impl_;
hazptr_obj_batch<Atom>* batch_;
hazptr_obj_cohort<Atom>* cohort_;
};
} // namespace detail
} // namespace folly
......@@ -190,7 +190,7 @@ RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) {
}
RequestContext::StateHazptr::~StateHazptr() {
batch_.shutdown_and_reclaim();
cohort_.shutdown_and_reclaim();
auto p = combined();
if (p) {
delete p;
......@@ -214,7 +214,7 @@ RequestContext::StateHazptr::ensureCombined() {
}
void RequestContext::StateHazptr::setCombined(Combined* p) {
p->set_batch_tag(&batch_);
p->set_cohort_tag(&cohort_);
combined_.store(p, std::memory_order_release);
}
......
......@@ -366,7 +366,7 @@ class RequestContext {
// Hazard pointer-protected combined structure for request data
// and callbacks.
struct Combined;
hazptr_obj_batch<> batch_; // For destruction order
hazptr_obj_cohort<> cohort_; // For destruction order
std::atomic<Combined*> combined_{nullptr};
std::mutex mutex_;
......
......@@ -59,9 +59,9 @@ class hazptr_obj;
template <template <typename> class Atom = std::atomic>
class hazptr_obj_list;
/** hazptr_obj_batch */
/** hazptr_obj_cohort */
template <template <typename> class Atom = std::atomic>
class hazptr_obj_batch;
class hazptr_obj_cohort;
/** hazptr_obj_retired_list */
template <template <typename> class Atom = std::atomic>
......
......@@ -53,7 +53,7 @@ constexpr int hazptr_domain_rcount_threshold() {
* Notes on destruction order, tagged objects, locking and deadlock
* avoidance:
* - Tagged objects support reclamation order guarantees. A call to
* cleanup_batch_tag(tag) guarantees that all objects with the
* cleanup_cohort_tag(tag) guarantees that all objects with the
* specified tag are reclaimed before the function returns.
* - Due to the strict order, access to the set of tagged objects
* needs synchronization and care must be taken to avoid deadlock.
......@@ -66,7 +66,7 @@ constexpr int hazptr_domain_rcount_threshold() {
* reclaimed objects. This type is needed to guarantee an upper
* bound on unreclaimed reclaimable objects.
* - Type B: A Type B reclamation operation is triggered by a call
* to the function cleanup_batch_tag for a specific tag. All
* to the function cleanup_cohort_tag for a specific tag. All
* objects with the specified tag must be reclaimed
* unconditionally before returning from such a function
* call. Hazard pointers are not checked. This type of reclamation
......@@ -187,9 +187,9 @@ class hazptr_domain {
wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
}
/** cleanup_batch_tag */
void cleanup_batch_tag(const hazptr_obj_batch<Atom>* batch) noexcept {
auto tag = reinterpret_cast<uintptr_t>(batch) + kTagBit;
/** cleanup_cohort_tag */
void cleanup_cohort_tag(const hazptr_obj_cohort<Atom>* cohort) noexcept {
auto tag = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
auto obj = tagged_.pop_all(RetiredList::kAlsoLock);
ObjList match, nomatch;
list_match_tag(tag, obj, match, nomatch);
......@@ -219,7 +219,7 @@ class hazptr_domain {
void
list_match_tag(uintptr_t tag, Obj* obj, ObjList& match, ObjList& nomatch) {
list_match_condition(
obj, match, nomatch, [tag](Obj* o) { return o->batch_tag() == tag; });
obj, match, nomatch, [tag](Obj* o) { return o->cohort_tag() == tag; });
}
private:
......@@ -236,7 +236,7 @@ class hazptr_domain {
hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>;
friend class hazptr_obj<Atom>;
friend class hazptr_obj_batch<Atom>;
friend class hazptr_obj_cohort<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL
friend class hazptr_tc<Atom>;
#endif
......@@ -277,7 +277,7 @@ class hazptr_domain {
if (l.empty()) {
return;
}
uintptr_t btag = l.head()->batch_tag();
uintptr_t btag = l.head()->cohort_tag();
bool tagged = ((btag & kTagBit) == kTagBit);
RetiredList& rlist = tagged ? tagged_ : untagged_;
Atom<uint64_t>& sync_time =
......@@ -285,7 +285,7 @@ class hazptr_domain {
/*** Full fence ***/ asymmetricLightBarrier();
/* Only tagged lists need to be locked because tagging is used to
* guarantee the identification of all objects with a specific
* tag. Locking pcrotects against concurrent hazptr_cleanup_tag()
* tag. Locking protects against concurrent hazptr_cleanup_tag()
* calls missing tagged objects. */
bool lock =
tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked;
......@@ -342,7 +342,7 @@ class hazptr_domain {
for (; hprec; hprec = hprec->next()) {
hs.insert(hprec->hazptr());
}
/* Check objets against hazard pointer values */
/* Check objects against hazard pointer values */
ObjList match, nomatch;
list_match_condition(obj, match, nomatch, [&](Obj* o) {
return hs.count(o->raw_ptr()) > 0;
......
......@@ -42,42 +42,42 @@ namespace folly {
* Data members:
* - next_: link to next object in private singly linked lists.
* - reclaim_: reclamation function for this object.
* - batch_tag_: A pointer to a batch (a linked list where the object
* is to be pushed when retired). It can also be used as a tag (see
* below). See details below.
* - cohort_tag_: A pointer to a cohort (a linked list where the
* object is to be pushed when retired). It can also be used as a
* tag (see below). See details below.
*
* Batches, Tags, Tagged Objects, and Untagged Objects:
* Cohorts, Tags, Tagged Objects, and Untagged Objects:
*
* - Batches: Batches (instances of hazptr_obj_batch) are sets of
* retired hazptr_obj-s. Batches are used to keep related objects
* - Cohorts: Cohorts (instances of hazptr_obj_cohort) are sets of
* retired hazptr_obj-s. Cohorts are used to keep related objects
* together instead of being spread across thread local structures
* and/or mixed with unrelated objects.
*
* - Tags: A tag is a unique identifier used for fast identification
* of related objects. Tags are implemented as addresses of
* batches, with the lowest bit set (to save the space of separate
* batch and tag data members and to differentiate from batches of
* cohorts, with the lowest bit set (to save the space of separate
* cohort and tag data members and to differentiate from cohorts of
* untagged objects.
*
* - Tagged objects: Objects are tagged for fast identification. The
* primary use case is for guaranteeing the destruction of all
* objects with a certain tag (e.g., the destruction of all Key and
* Value objects that were part of a Folly ConcurrentHashMap
* instance). Member function set_batch_tag makes an object tagged.
* instance). Member function set_cohort_tag makes an object tagged.
*
* - Untagged objects: Objects that do not need to be identified
* separately from unrelated objects are not tagged (to keep tagged
* objects uncluttered). Untagged objects may or may not be
* associated with batches. An example of untagged objects
* associated with batches are Segment-s of Folly UnboundedQueue.
* associated with cohorts. An example of untagged objects
* associated with cohorts are Segment-s of Folly UnboundedQueue.
* Although such objects do not need to be tagged, keeping them in
* batches helps avoid cases of a few missing objects delaying the
* cohorts helps avoid cases of a few missing objects delaying the
* reclamation of large numbers of link-counted objects. Objects
* are untagged either by default or after calling
* set_batch_no_tag.
* set_cohort_no_tag.
*
* - Thread Safety: Member functions set_batch_tag and
* set_batch_no_tag are not thread-safe. Thread safety must be
* - Thread Safety: Member functions set_cohort_tag and
* set_cohort_no_tag are not thread-safe. Thread safety must be
* ensured by the calling thread.
*/
template <template <typename> class Atom>
......@@ -102,21 +102,21 @@ class hazptr_obj {
ReclaimFnPtr reclaim_;
hazptr_obj<Atom>* next_;
uintptr_t batch_tag_;
uintptr_t cohort_tag_;
public:
/** Constructors */
/* All constructors set next_ to this in order to catch misuse bugs
such as double retire. By default, objects are untagged and not
associated with a batch. */
associated with a cohort. */
hazptr_obj() noexcept : next_(this), batch_tag_(0) {}
hazptr_obj() noexcept : next_(this), cohort_tag_(0) {}
hazptr_obj(const hazptr_obj<Atom>& o) noexcept
: next_(this), batch_tag_(o.batch_tag_) {}
: next_(this), cohort_tag_(o.cohort_tag_) {}
hazptr_obj(hazptr_obj<Atom>&& o) noexcept
: next_(this), batch_tag_(o.batch_tag_) {}
: next_(this), cohort_tag_(o.cohort_tag_) {}
/** Copy operator */
hazptr_obj<Atom>& operator=(const hazptr_obj<Atom>&) noexcept {
......@@ -128,31 +128,31 @@ class hazptr_obj {
return *this;
}
/** batch_tag */
uintptr_t batch_tag() {
return batch_tag_;
/** cohort_tag */
uintptr_t cohort_tag() {
return cohort_tag_;
}
/** batch */
hazptr_obj_batch<Atom>* batch() {
uintptr_t btag = batch_tag_;
/** cohort */
hazptr_obj_cohort<Atom>* cohort() {
uintptr_t btag = cohort_tag_;
btag -= btag & kTagBit;
return reinterpret_cast<hazptr_obj_batch<Atom>*>(btag);
return reinterpret_cast<hazptr_obj_cohort<Atom>*>(btag);
}
/** tagged */
bool tagged() {
return (batch_tag_ & kTagBit) == kTagBit;
return (cohort_tag_ & kTagBit) == kTagBit;
}
/** set_batch_tag: Set batch and make object tagged. */
void set_batch_tag(hazptr_obj_batch<Atom>* batch) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch) + kTagBit;
/** set_cohort_tag: Set cohort and make object tagged. */
void set_cohort_tag(hazptr_obj_cohort<Atom>* cohort) {
cohort_tag_ = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
}
/** set_batch_no_tag: Set batch and make object untagged. */
void set_batch_no_tag(hazptr_obj_batch<Atom>* batch) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch);
/** set_cohort_no_tag: Set cohort and make object untagged. */
void set_cohort_no_tag(hazptr_obj_cohort<Atom>* cohort) {
cohort_tag_ = reinterpret_cast<uintptr_t>(cohort);
}
private:
......@@ -161,7 +161,7 @@ class hazptr_obj {
friend class hazptr_obj_base;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base_refcounted;
friend class hazptr_obj_batch<Atom>;
friend class hazptr_obj_cohort<Atom>;
friend class hazptr_priv<Atom>;
hazptr_obj<Atom>* next() const noexcept {
......@@ -188,10 +188,10 @@ class hazptr_obj {
}
void push_obj(hazptr_domain<Atom>& domain) {
auto b = batch();
if (b) {
auto coh = cohort();
if (coh) {
DCHECK_EQ(&domain, &default_hazptr_domain<Atom>());
b->push_obj(this);
coh->push_obj(this);
} else {
push_to_retired(domain);
}
......@@ -277,19 +277,19 @@ class hazptr_obj_list {
}; // hazptr_obj_list
/**
* hazptr_obj_batch
* hazptr_obj_cohort
*
* List of retired objects. For objects to be retred to a batch,
* either of the hazptr_obj member functions set_batch_tag or
* set_batch_no_tag needs to be called before the object is retired.
* List of retired objects. For objects to be retred to a cohort,
* either of the hazptr_obj member functions set_cohort_tag or
* set_cohort_no_tag needs to be called before the object is retired.
*
* See description of hazptr_obj for notes on batches, tags, and
* See description of hazptr_obj for notes on cohorts, tags, and
* tageed and untagged objects.
*
* [Note: For now supports only the default domain.]
*/
template <template <typename> class Atom>
class hazptr_obj_batch {
class hazptr_obj_cohort {
using Obj = hazptr_obj<Atom>;
using List = hazptr_detail::linked_list<Obj>;
using SharedList = hazptr_detail::shared_head_tail_list<Obj, Atom>;
......@@ -303,17 +303,17 @@ class hazptr_obj_batch {
public:
/** Constructor */
hazptr_obj_batch() noexcept
hazptr_obj_cohort() noexcept
: l_(), count_(0), active_(true), pushed_to_domain_tagged_{false} {}
/** Not copyable or moveable */
hazptr_obj_batch(const hazptr_obj_batch& o) = delete;
hazptr_obj_batch(hazptr_obj_batch&& o) = delete;
hazptr_obj_batch& operator=(const hazptr_obj_batch&& o) = delete;
hazptr_obj_batch& operator=(hazptr_obj_batch&& o) = delete;
hazptr_obj_cohort(const hazptr_obj_cohort& o) = delete;
hazptr_obj_cohort(hazptr_obj_cohort&& o) = delete;
hazptr_obj_cohort& operator=(const hazptr_obj_cohort&& o) = delete;
hazptr_obj_cohort& operator=(hazptr_obj_cohort&& o) = delete;
/** Destructor */
~hazptr_obj_batch() {
~hazptr_obj_cohort() {
if (active_) {
shutdown_and_reclaim();
}
......@@ -332,7 +332,7 @@ class hazptr_obj_batch {
reclaim_list(obj);
}
if (pushed_to_domain_tagged_.load(std::memory_order_relaxed)) {
default_hazptr_domain<Atom>().cleanup_batch_tag(this);
default_hazptr_domain<Atom>().cleanup_cohort_tag(this);
}
DCHECK(l_.empty());
}
......@@ -403,7 +403,7 @@ class hazptr_obj_batch {
}
}
}
}; // hazptr_obj_batch
}; // hazptr_obj_cohort
/**
* hazptr_obj_retired_list
......
......@@ -42,7 +42,7 @@ using folly::hazptr_holder;
using folly::hazptr_local;
using folly::hazptr_obj_base;
using folly::hazptr_obj_base_linked;
using folly::hazptr_obj_batch;
using folly::hazptr_obj_cohort;
using folly::hazptr_retire;
using folly::hazptr_root;
using folly::hazptr_tc;
......@@ -813,16 +813,16 @@ void priv_dtor_test() {
}
template <template <typename> class Atom = std::atomic>
void batch_test() {
void cohort_test() {
int num = 10001;
using NodeT = Node<Atom>;
c_.clear();
{
hazptr_obj_batch<Atom> batch;
hazptr_obj_cohort<Atom> cohort;
auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) {
auto p = new NodeT;
p->set_batch_no_tag(&batch);
p->set_cohort_no_tag(&cohort);
p->retire();
}
});
......@@ -833,11 +833,11 @@ void batch_test() {
hazptr_cleanup<Atom>();
c_.clear();
{
hazptr_obj_batch<Atom> batch;
hazptr_obj_cohort<Atom> cohort;
auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) {
auto p = new NodeT;
p->set_batch_tag(&batch);
p->set_cohort_tag(&cohort);
p->retire();
}
});
......@@ -851,10 +851,10 @@ void batch_test() {
template <template <typename> class Atom = std::atomic>
void recursive_destruction_test() {
struct Foo : public hazptr_obj_base<Foo, Atom> {
hazptr_obj_batch<Atom> batch_;
hazptr_obj_cohort<Atom> cohort_;
Foo* foo_{nullptr};
explicit Foo(hazptr_obj_batch<Atom>* b) {
this->set_batch_tag(b);
explicit Foo(hazptr_obj_cohort<Atom>* b) {
this->set_cohort_tag(b);
c_.inc_ctors();
}
~Foo() {
......@@ -867,8 +867,8 @@ void recursive_destruction_test() {
}
foo_ = foo;
}
hazptr_obj_batch<Atom>* batch() {
return &batch_;
hazptr_obj_cohort<Atom>* cohort() {
return &cohort_;
}
};
......@@ -879,13 +879,13 @@ void recursive_destruction_test() {
std::vector<std::thread> threads(nthr);
for (int tid = 0; tid < nthr; ++tid) {
threads[tid] = DSched::thread([&, tid]() {
hazptr_obj_batch<Atom> b0;
hazptr_obj_cohort<Atom> b0;
Foo* foo0 = new Foo(&b0);
for (int i = tid; i < num1; i += nthr) {
Foo* foo1 = new Foo(foo0->batch());
Foo* foo1 = new Foo(foo0->cohort());
foo0->set(foo1);
for (int j = 0; j < num2; ++j) {
foo1->set(new Foo(foo1->batch()));
foo1->set(new Foo(foo1->cohort()));
}
}
foo0->retire();
......@@ -902,10 +902,10 @@ void recursive_destruction_test() {
void fork_test() {
folly::start_hazptr_thread_pool_executor();
auto trigger_reclamation = [] {
hazptr_obj_batch b;
hazptr_obj_cohort b;
for (int i = 0; i < 2001; ++i) {
auto p = new Node;
p->set_batch_no_tag(&b);
p->set_cohort_no_tag(&b);
p->retire();
}
};
......@@ -1187,13 +1187,13 @@ TEST_F(HazptrPreInitTest, dsched_priv_dtor) {
priv_dtor_test<DeterministicAtomic>();
}
TEST(HazptrTest, batch) {
batch_test();
TEST(HazptrTest, cohort) {
cohort_test();
}
TEST(HazptrTest, dsched_batch) {
TEST(HazptrTest, dsched_cohort) {
DSched sched(DSched::uniform(0));
batch_test<DeterministicAtomic>();
cohort_test<DeterministicAtomic>();
}
TEST(HazptrTest, recursive_destruction) {
......@@ -1446,20 +1446,20 @@ uint64_t cleanup_bench(std::string name, int nthreads) {
return bench(name, ops, repFn);
}
uint64_t batch_bench(std::string name, int nthreads) {
uint64_t cohort_bench(std::string name, int nthreads) {
struct Foo : public hazptr_obj_base<Foo> {};
// Push unrelated objects into the domain tagged list
hazptr_obj_batch batch;
hazptr_obj_cohort cohort;
for (int i = 0; i < 999; ++i) {
auto p = new Foo;
p->set_batch_tag(&batch);
p->set_cohort_tag(&cohort);
p->retire();
}
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
hazptr_obj_batch b;
hazptr_obj_cohort b;
}
};
auto endFn = [] {};
......@@ -1504,8 +1504,8 @@ void benches() {
}
std::cout << "1/1000 hazptr_cleanup ";
cleanup_bench("", i);
std::cout << "Life cycle of unused tagged obj batch ";
batch_bench("", i);
std::cout << "Life cycle of unused tagged obj cohort ";
cohort_bench("", i);
}
}
......@@ -1536,7 +1536,7 @@ allocate/retire/reclaim object 70 ns 68 ns 67 ns
20-item list protect all - own hazptr 28 ns 28 ns 28 ns
20-item list protect all 31 ns 29 ns 29 ns
1/1000 hazptr_cleanup 2 ns 1 ns 1 ns
Life cycle of unused tagged obj batch 1 ns 1 ns 1 ns
Life cycle of unused tagged obj cohort 1 ns 1 ns 1 ns
================================ 10 threads ================================
10x construct/destruct hazptr_holder 11 ns 8 ns 8 ns
10x construct/destruct hazptr_array<1> 8 ns 7 ns 7 ns
......@@ -1555,5 +1555,5 @@ allocate/retire/reclaim object 20 ns 17 ns 16 ns
20-item list protect all - own hazptr 4 ns 4 ns 4 ns
20-item list protect all 5 ns 4 ns 4 ns
1/1000 hazptr_cleanup 119 ns 113 ns 97 ns
Life cycle of unused tagged obj batch 0 ns 0 ns 0 ns
Life cycle of unused tagged obj cohort 0 ns 0 ns 0 ns
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment