Commit 2c1539c1 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

hazptr: Rename hazptr_obj_batch to hazptr_obj_cohort

Summary: Rename hazptr obj batch to obj cohort, a more accurate description.

Reviewed By: davidtgoldblatt

Differential Revision: D19518771

fbshipit-source-id: f50a8a481f260dde4fce10fb9664d4f86c263b60
parent 8221b02b
...@@ -193,8 +193,8 @@ class ConcurrentHashMap { ...@@ -193,8 +193,8 @@ class ConcurrentHashMap {
std::memory_order_relaxed); std::memory_order_relaxed);
o.segments_[i].store(nullptr, std::memory_order_relaxed); o.segments_[i].store(nullptr, std::memory_order_relaxed);
} }
batch_.store(o.batch(), std::memory_order_relaxed); cohort_.store(o.cohort(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed); o.cohort_.store(nullptr, std::memory_order_relaxed);
} }
ConcurrentHashMap& operator=(ConcurrentHashMap&& o) { ConcurrentHashMap& operator=(ConcurrentHashMap&& o) {
...@@ -211,9 +211,9 @@ class ConcurrentHashMap { ...@@ -211,9 +211,9 @@ class ConcurrentHashMap {
} }
size_ = o.size_; size_ = o.size_;
max_size_ = o.max_size_; max_size_ = o.max_size_;
batch_shutdown_cleanup(); cohort_shutdown_cleanup();
batch_.store(o.batch(), std::memory_order_relaxed); cohort_.store(o.cohort(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed); o.cohort_.store(nullptr, std::memory_order_relaxed);
return *this; return *this;
} }
...@@ -225,7 +225,7 @@ class ConcurrentHashMap { ...@@ -225,7 +225,7 @@ class ConcurrentHashMap {
Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT)); Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT));
} }
} }
batch_shutdown_cleanup(); cohort_shutdown_cleanup();
} }
bool empty() const noexcept { bool empty() const noexcept {
...@@ -305,7 +305,7 @@ class ConcurrentHashMap { ...@@ -305,7 +305,7 @@ class ConcurrentHashMap {
std::pair<ConstIterator, bool> emplace(Args&&... args) { std::pair<ConstIterator, bool> emplace(Args&&... args) {
using Node = typename SegmentT::Node; using Node = typename SegmentT::Node;
auto node = (Node*)Allocator().allocate(sizeof(Node)); auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(ensureBatch(), std::forward<Args>(args)...); new (node) Node(ensureCohort(), std::forward<Args>(args)...);
auto segment = pickSegment(node->getItem().first); auto segment = pickSegment(node->getItem().first);
std::pair<ConstIterator, bool> res( std::pair<ConstIterator, bool> res(
std::piecewise_construct, std::piecewise_construct,
...@@ -568,7 +568,7 @@ class ConcurrentHashMap { ...@@ -568,7 +568,7 @@ class ConcurrentHashMap {
SegmentT* ensureSegment(uint64_t i) const { SegmentT* ensureSegment(uint64_t i) const {
SegmentT* seg = segments_[i].load(std::memory_order_acquire); SegmentT* seg = segments_[i].load(std::memory_order_acquire);
if (!seg) { if (!seg) {
auto b = ensureBatch(); auto b = ensureCohort();
SegmentT* newseg = (SegmentT*)Allocator().allocate(sizeof(SegmentT)); SegmentT* newseg = (SegmentT*)Allocator().allocate(sizeof(SegmentT));
newseg = new (newseg) newseg = new (newseg)
SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits, b); SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits, b);
...@@ -583,37 +583,37 @@ class ConcurrentHashMap { ...@@ -583,37 +583,37 @@ class ConcurrentHashMap {
return seg; return seg;
} }
hazptr_obj_batch<Atom>* batch() const noexcept { hazptr_obj_cohort<Atom>* cohort() const noexcept {
return batch_.load(std::memory_order_acquire); return cohort_.load(std::memory_order_acquire);
} }
hazptr_obj_batch<Atom>* ensureBatch() const { hazptr_obj_cohort<Atom>* ensureCohort() const {
auto b = batch(); auto b = cohort();
if (!b) { if (!b) {
auto storage = Allocator().allocate(sizeof(hazptr_obj_batch<Atom>)); auto storage = Allocator().allocate(sizeof(hazptr_obj_cohort<Atom>));
auto newbatch = new (storage) hazptr_obj_batch<Atom>(); auto newcohort = new (storage) hazptr_obj_cohort<Atom>();
if (batch_.compare_exchange_strong(b, newbatch)) { if (cohort_.compare_exchange_strong(b, newcohort)) {
b = newbatch; b = newcohort;
} else { } else {
newbatch->~hazptr_obj_batch<Atom>(); newcohort->~hazptr_obj_cohort<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_batch<Atom>)); Allocator().deallocate(storage, sizeof(hazptr_obj_cohort<Atom>));
} }
} }
return b; return b;
} }
void batch_shutdown_cleanup() { void cohort_shutdown_cleanup() {
auto b = batch(); auto b = cohort();
if (b) { if (b) {
b->~hazptr_obj_batch<Atom>(); b->~hazptr_obj_cohort<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_batch<Atom>)); Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_cohort<Atom>));
} }
} }
mutable Atom<SegmentT*> segments_[NumShards]; mutable Atom<SegmentT*> segments_[NumShards];
size_t size_{0}; size_t size_{0};
size_t max_size_{0}; size_t max_size_{0};
mutable Atom<hazptr_obj_batch<Atom>*> batch_{nullptr}; mutable Atom<hazptr_obj_cohort<Atom>*> cohort_{nullptr};
}; };
#if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE #if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
......
...@@ -237,9 +237,9 @@ class UnboundedQueue { ...@@ -237,9 +237,9 @@ class UnboundedQueue {
struct Consumer { struct Consumer {
Atom<Segment*> head; Atom<Segment*> head;
Atom<Ticket> ticket; Atom<Ticket> ticket;
hazptr_obj_batch<Atom> batch; hazptr_obj_cohort<Atom> cohort;
explicit Consumer(Segment* s) : head(s), ticket(0) { explicit Consumer(Segment* s) : head(s), ticket(0) {
s->set_batch_no_tag(&batch); // defined in hazptr_obj s->set_cohort_no_tag(&cohort); // defined in hazptr_obj
} }
}; };
struct Producer { struct Producer {
...@@ -559,7 +559,7 @@ class UnboundedQueue { ...@@ -559,7 +559,7 @@ class UnboundedQueue {
Segment* allocNextSegment(Segment* s) { Segment* allocNextSegment(Segment* s) {
auto t = s->minTicket() + SegmentSize; auto t = s->minTicket() + SegmentSize;
Segment* next = new Segment(t); Segment* next = new Segment(t);
next->set_batch_no_tag(&c_.batch); // defined in hazptr_obj next->set_cohort_no_tag(&c_.cohort); // defined in hazptr_obj
next->acquire_ref_safe(); // defined in hazptr_obj_base_linked next->acquire_ref_safe(); // defined in hazptr_obj_base_linked
if (!s->casNextSegment(next)) { if (!s->casNextSegment(next)) {
delete next; delete next;
......
...@@ -190,7 +190,7 @@ RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) { ...@@ -190,7 +190,7 @@ RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) {
} }
RequestContext::StateHazptr::~StateHazptr() { RequestContext::StateHazptr::~StateHazptr() {
batch_.shutdown_and_reclaim(); cohort_.shutdown_and_reclaim();
auto p = combined(); auto p = combined();
if (p) { if (p) {
delete p; delete p;
...@@ -214,7 +214,7 @@ RequestContext::StateHazptr::ensureCombined() { ...@@ -214,7 +214,7 @@ RequestContext::StateHazptr::ensureCombined() {
} }
void RequestContext::StateHazptr::setCombined(Combined* p) { void RequestContext::StateHazptr::setCombined(Combined* p) {
p->set_batch_tag(&batch_); p->set_cohort_tag(&cohort_);
combined_.store(p, std::memory_order_release); combined_.store(p, std::memory_order_release);
} }
......
...@@ -366,7 +366,7 @@ class RequestContext { ...@@ -366,7 +366,7 @@ class RequestContext {
// Hazard pointer-protected combined structure for request data // Hazard pointer-protected combined structure for request data
// and callbacks. // and callbacks.
struct Combined; struct Combined;
hazptr_obj_batch<> batch_; // For destruction order hazptr_obj_cohort<> cohort_; // For destruction order
std::atomic<Combined*> combined_{nullptr}; std::atomic<Combined*> combined_{nullptr};
std::mutex mutex_; std::mutex mutex_;
......
...@@ -59,9 +59,9 @@ class hazptr_obj; ...@@ -59,9 +59,9 @@ class hazptr_obj;
template <template <typename> class Atom = std::atomic> template <template <typename> class Atom = std::atomic>
class hazptr_obj_list; class hazptr_obj_list;
/** hazptr_obj_batch */ /** hazptr_obj_cohort */
template <template <typename> class Atom = std::atomic> template <template <typename> class Atom = std::atomic>
class hazptr_obj_batch; class hazptr_obj_cohort;
/** hazptr_obj_retired_list */ /** hazptr_obj_retired_list */
template <template <typename> class Atom = std::atomic> template <template <typename> class Atom = std::atomic>
......
...@@ -53,7 +53,7 @@ constexpr int hazptr_domain_rcount_threshold() { ...@@ -53,7 +53,7 @@ constexpr int hazptr_domain_rcount_threshold() {
* Notes on destruction order, tagged objects, locking and deadlock * Notes on destruction order, tagged objects, locking and deadlock
* avoidance: * avoidance:
* - Tagged objects support reclamation order guarantees. A call to * - Tagged objects support reclamation order guarantees. A call to
* cleanup_batch_tag(tag) guarantees that all objects with the * cleanup_cohort_tag(tag) guarantees that all objects with the
* specified tag are reclaimed before the function returns. * specified tag are reclaimed before the function returns.
* - Due to the strict order, access to the set of tagged objects * - Due to the strict order, access to the set of tagged objects
* needs synchronization and care must be taken to avoid deadlock. * needs synchronization and care must be taken to avoid deadlock.
...@@ -66,7 +66,7 @@ constexpr int hazptr_domain_rcount_threshold() { ...@@ -66,7 +66,7 @@ constexpr int hazptr_domain_rcount_threshold() {
* reclaimed objects. This type is needed to guarantee an upper * reclaimed objects. This type is needed to guarantee an upper
* bound on unreclaimed reclaimable objects. * bound on unreclaimed reclaimable objects.
* - Type B: A Type B reclamation operation is triggered by a call * - Type B: A Type B reclamation operation is triggered by a call
* to the function cleanup_batch_tag for a specific tag. All * to the function cleanup_cohort_tag for a specific tag. All
* objects with the specified tag must be reclaimed * objects with the specified tag must be reclaimed
* unconditionally before returning from such a function * unconditionally before returning from such a function
* call. Hazard pointers are not checked. This type of reclamation * call. Hazard pointers are not checked. This type of reclamation
...@@ -187,9 +187,9 @@ class hazptr_domain { ...@@ -187,9 +187,9 @@ class hazptr_domain {
wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
} }
/** cleanup_batch_tag */ /** cleanup_cohort_tag */
void cleanup_batch_tag(const hazptr_obj_batch<Atom>* batch) noexcept { void cleanup_cohort_tag(const hazptr_obj_cohort<Atom>* cohort) noexcept {
auto tag = reinterpret_cast<uintptr_t>(batch) + kTagBit; auto tag = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
auto obj = tagged_.pop_all(RetiredList::kAlsoLock); auto obj = tagged_.pop_all(RetiredList::kAlsoLock);
ObjList match, nomatch; ObjList match, nomatch;
list_match_tag(tag, obj, match, nomatch); list_match_tag(tag, obj, match, nomatch);
...@@ -219,7 +219,7 @@ class hazptr_domain { ...@@ -219,7 +219,7 @@ class hazptr_domain {
void void
list_match_tag(uintptr_t tag, Obj* obj, ObjList& match, ObjList& nomatch) { list_match_tag(uintptr_t tag, Obj* obj, ObjList& match, ObjList& nomatch) {
list_match_condition( list_match_condition(
obj, match, nomatch, [tag](Obj* o) { return o->batch_tag() == tag; }); obj, match, nomatch, [tag](Obj* o) { return o->cohort_tag() == tag; });
} }
private: private:
...@@ -236,7 +236,7 @@ class hazptr_domain { ...@@ -236,7 +236,7 @@ class hazptr_domain {
hazptr_domain<Atom>&) noexcept; hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>; friend class hazptr_holder<Atom>;
friend class hazptr_obj<Atom>; friend class hazptr_obj<Atom>;
friend class hazptr_obj_batch<Atom>; friend class hazptr_obj_cohort<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL #if FOLLY_HAZPTR_THR_LOCAL
friend class hazptr_tc<Atom>; friend class hazptr_tc<Atom>;
#endif #endif
...@@ -277,7 +277,7 @@ class hazptr_domain { ...@@ -277,7 +277,7 @@ class hazptr_domain {
if (l.empty()) { if (l.empty()) {
return; return;
} }
uintptr_t btag = l.head()->batch_tag(); uintptr_t btag = l.head()->cohort_tag();
bool tagged = ((btag & kTagBit) == kTagBit); bool tagged = ((btag & kTagBit) == kTagBit);
RetiredList& rlist = tagged ? tagged_ : untagged_; RetiredList& rlist = tagged ? tagged_ : untagged_;
Atom<uint64_t>& sync_time = Atom<uint64_t>& sync_time =
...@@ -285,7 +285,7 @@ class hazptr_domain { ...@@ -285,7 +285,7 @@ class hazptr_domain {
/*** Full fence ***/ asymmetricLightBarrier(); /*** Full fence ***/ asymmetricLightBarrier();
/* Only tagged lists need to be locked because tagging is used to /* Only tagged lists need to be locked because tagging is used to
* guarantee the identification of all objects with a specific * guarantee the identification of all objects with a specific
* tag. Locking pcrotects against concurrent hazptr_cleanup_tag() * tag. Locking protects against concurrent hazptr_cleanup_tag()
* calls missing tagged objects. */ * calls missing tagged objects. */
bool lock = bool lock =
tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked; tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked;
...@@ -342,7 +342,7 @@ class hazptr_domain { ...@@ -342,7 +342,7 @@ class hazptr_domain {
for (; hprec; hprec = hprec->next()) { for (; hprec; hprec = hprec->next()) {
hs.insert(hprec->hazptr()); hs.insert(hprec->hazptr());
} }
/* Check objets against hazard pointer values */ /* Check objects against hazard pointer values */
ObjList match, nomatch; ObjList match, nomatch;
list_match_condition(obj, match, nomatch, [&](Obj* o) { list_match_condition(obj, match, nomatch, [&](Obj* o) {
return hs.count(o->raw_ptr()) > 0; return hs.count(o->raw_ptr()) > 0;
......
...@@ -42,42 +42,42 @@ namespace folly { ...@@ -42,42 +42,42 @@ namespace folly {
* Data members: * Data members:
* - next_: link to next object in private singly linked lists. * - next_: link to next object in private singly linked lists.
* - reclaim_: reclamation function for this object. * - reclaim_: reclamation function for this object.
* - batch_tag_: A pointer to a batch (a linked list where the object * - cohort_tag_: A pointer to a cohort (a linked list where the
* is to be pushed when retired). It can also be used as a tag (see * object is to be pushed when retired). It can also be used as a
* below). See details below. * tag (see below). See details below.
* *
* Batches, Tags, Tagged Objects, and Untagged Objects: * Cohorts, Tags, Tagged Objects, and Untagged Objects:
* *
* - Batches: Batches (instances of hazptr_obj_batch) are sets of * - Cohorts: Cohorts (instances of hazptr_obj_cohort) are sets of
* retired hazptr_obj-s. Batches are used to keep related objects * retired hazptr_obj-s. Cohorts are used to keep related objects
* together instead of being spread across thread local structures * together instead of being spread across thread local structures
* and/or mixed with unrelated objects. * and/or mixed with unrelated objects.
* *
* - Tags: A tag is a unique identifier used for fast identification * - Tags: A tag is a unique identifier used for fast identification
* of related objects. Tags are implemented as addresses of * of related objects. Tags are implemented as addresses of
* batches, with the lowest bit set (to save the space of separate * cohorts, with the lowest bit set (to save the space of separate
* batch and tag data members and to differentiate from batches of * cohort and tag data members and to differentiate from cohorts of
* untagged objects. * untagged objects.
* *
* - Tagged objects: Objects are tagged for fast identification. The * - Tagged objects: Objects are tagged for fast identification. The
* primary use case is for guaranteeing the destruction of all * primary use case is for guaranteeing the destruction of all
* objects with a certain tag (e.g., the destruction of all Key and * objects with a certain tag (e.g., the destruction of all Key and
* Value objects that were part of a Folly ConcurrentHashMap * Value objects that were part of a Folly ConcurrentHashMap
* instance). Member function set_batch_tag makes an object tagged. * instance). Member function set_cohort_tag makes an object tagged.
* *
* - Untagged objects: Objects that do not need to be identified * - Untagged objects: Objects that do not need to be identified
* separately from unrelated objects are not tagged (to keep tagged * separately from unrelated objects are not tagged (to keep tagged
* objects uncluttered). Untagged objects may or may not be * objects uncluttered). Untagged objects may or may not be
* associated with batches. An example of untagged objects * associated with cohorts. An example of untagged objects
* associated with batches are Segment-s of Folly UnboundedQueue. * associated with cohorts are Segment-s of Folly UnboundedQueue.
* Although such objects do not need to be tagged, keeping them in * Although such objects do not need to be tagged, keeping them in
* batches helps avoid cases of a few missing objects delaying the * cohorts helps avoid cases of a few missing objects delaying the
* reclamation of large numbers of link-counted objects. Objects * reclamation of large numbers of link-counted objects. Objects
* are untagged either by default or after calling * are untagged either by default or after calling
* set_batch_no_tag. * set_cohort_no_tag.
* *
* - Thread Safety: Member functions set_batch_tag and * - Thread Safety: Member functions set_cohort_tag and
* set_batch_no_tag are not thread-safe. Thread safety must be * set_cohort_no_tag are not thread-safe. Thread safety must be
* ensured by the calling thread. * ensured by the calling thread.
*/ */
template <template <typename> class Atom> template <template <typename> class Atom>
...@@ -102,21 +102,21 @@ class hazptr_obj { ...@@ -102,21 +102,21 @@ class hazptr_obj {
ReclaimFnPtr reclaim_; ReclaimFnPtr reclaim_;
hazptr_obj<Atom>* next_; hazptr_obj<Atom>* next_;
uintptr_t batch_tag_; uintptr_t cohort_tag_;
public: public:
/** Constructors */ /** Constructors */
/* All constructors set next_ to this in order to catch misuse bugs /* All constructors set next_ to this in order to catch misuse bugs
such as double retire. By default, objects are untagged and not such as double retire. By default, objects are untagged and not
associated with a batch. */ associated with a cohort. */
hazptr_obj() noexcept : next_(this), batch_tag_(0) {} hazptr_obj() noexcept : next_(this), cohort_tag_(0) {}
hazptr_obj(const hazptr_obj<Atom>& o) noexcept hazptr_obj(const hazptr_obj<Atom>& o) noexcept
: next_(this), batch_tag_(o.batch_tag_) {} : next_(this), cohort_tag_(o.cohort_tag_) {}
hazptr_obj(hazptr_obj<Atom>&& o) noexcept hazptr_obj(hazptr_obj<Atom>&& o) noexcept
: next_(this), batch_tag_(o.batch_tag_) {} : next_(this), cohort_tag_(o.cohort_tag_) {}
/** Copy operator */ /** Copy operator */
hazptr_obj<Atom>& operator=(const hazptr_obj<Atom>&) noexcept { hazptr_obj<Atom>& operator=(const hazptr_obj<Atom>&) noexcept {
...@@ -128,31 +128,31 @@ class hazptr_obj { ...@@ -128,31 +128,31 @@ class hazptr_obj {
return *this; return *this;
} }
/** batch_tag */ /** cohort_tag */
uintptr_t batch_tag() { uintptr_t cohort_tag() {
return batch_tag_; return cohort_tag_;
} }
/** batch */ /** cohort */
hazptr_obj_batch<Atom>* batch() { hazptr_obj_cohort<Atom>* cohort() {
uintptr_t btag = batch_tag_; uintptr_t btag = cohort_tag_;
btag -= btag & kTagBit; btag -= btag & kTagBit;
return reinterpret_cast<hazptr_obj_batch<Atom>*>(btag); return reinterpret_cast<hazptr_obj_cohort<Atom>*>(btag);
} }
/** tagged */ /** tagged */
bool tagged() { bool tagged() {
return (batch_tag_ & kTagBit) == kTagBit; return (cohort_tag_ & kTagBit) == kTagBit;
} }
/** set_batch_tag: Set batch and make object tagged. */ /** set_cohort_tag: Set cohort and make object tagged. */
void set_batch_tag(hazptr_obj_batch<Atom>* batch) { void set_cohort_tag(hazptr_obj_cohort<Atom>* cohort) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch) + kTagBit; cohort_tag_ = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
} }
/** set_batch_no_tag: Set batch and make object untagged. */ /** set_cohort_no_tag: Set cohort and make object untagged. */
void set_batch_no_tag(hazptr_obj_batch<Atom>* batch) { void set_cohort_no_tag(hazptr_obj_cohort<Atom>* cohort) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch); cohort_tag_ = reinterpret_cast<uintptr_t>(cohort);
} }
private: private:
...@@ -161,7 +161,7 @@ class hazptr_obj { ...@@ -161,7 +161,7 @@ class hazptr_obj {
friend class hazptr_obj_base; friend class hazptr_obj_base;
template <typename, template <typename> class, typename> template <typename, template <typename> class, typename>
friend class hazptr_obj_base_refcounted; friend class hazptr_obj_base_refcounted;
friend class hazptr_obj_batch<Atom>; friend class hazptr_obj_cohort<Atom>;
friend class hazptr_priv<Atom>; friend class hazptr_priv<Atom>;
hazptr_obj<Atom>* next() const noexcept { hazptr_obj<Atom>* next() const noexcept {
...@@ -188,10 +188,10 @@ class hazptr_obj { ...@@ -188,10 +188,10 @@ class hazptr_obj {
} }
void push_obj(hazptr_domain<Atom>& domain) { void push_obj(hazptr_domain<Atom>& domain) {
auto b = batch(); auto coh = cohort();
if (b) { if (coh) {
DCHECK_EQ(&domain, &default_hazptr_domain<Atom>()); DCHECK_EQ(&domain, &default_hazptr_domain<Atom>());
b->push_obj(this); coh->push_obj(this);
} else { } else {
push_to_retired(domain); push_to_retired(domain);
} }
...@@ -277,19 +277,19 @@ class hazptr_obj_list { ...@@ -277,19 +277,19 @@ class hazptr_obj_list {
}; // hazptr_obj_list }; // hazptr_obj_list
/** /**
* hazptr_obj_batch * hazptr_obj_cohort
* *
* List of retired objects. For objects to be retred to a batch, * List of retired objects. For objects to be retred to a cohort,
* either of the hazptr_obj member functions set_batch_tag or * either of the hazptr_obj member functions set_cohort_tag or
* set_batch_no_tag needs to be called before the object is retired. * set_cohort_no_tag needs to be called before the object is retired.
* *
* See description of hazptr_obj for notes on batches, tags, and * See description of hazptr_obj for notes on cohorts, tags, and
* tageed and untagged objects. * tageed and untagged objects.
* *
* [Note: For now supports only the default domain.] * [Note: For now supports only the default domain.]
*/ */
template <template <typename> class Atom> template <template <typename> class Atom>
class hazptr_obj_batch { class hazptr_obj_cohort {
using Obj = hazptr_obj<Atom>; using Obj = hazptr_obj<Atom>;
using List = hazptr_detail::linked_list<Obj>; using List = hazptr_detail::linked_list<Obj>;
using SharedList = hazptr_detail::shared_head_tail_list<Obj, Atom>; using SharedList = hazptr_detail::shared_head_tail_list<Obj, Atom>;
...@@ -303,17 +303,17 @@ class hazptr_obj_batch { ...@@ -303,17 +303,17 @@ class hazptr_obj_batch {
public: public:
/** Constructor */ /** Constructor */
hazptr_obj_batch() noexcept hazptr_obj_cohort() noexcept
: l_(), count_(0), active_(true), pushed_to_domain_tagged_{false} {} : l_(), count_(0), active_(true), pushed_to_domain_tagged_{false} {}
/** Not copyable or moveable */ /** Not copyable or moveable */
hazptr_obj_batch(const hazptr_obj_batch& o) = delete; hazptr_obj_cohort(const hazptr_obj_cohort& o) = delete;
hazptr_obj_batch(hazptr_obj_batch&& o) = delete; hazptr_obj_cohort(hazptr_obj_cohort&& o) = delete;
hazptr_obj_batch& operator=(const hazptr_obj_batch&& o) = delete; hazptr_obj_cohort& operator=(const hazptr_obj_cohort&& o) = delete;
hazptr_obj_batch& operator=(hazptr_obj_batch&& o) = delete; hazptr_obj_cohort& operator=(hazptr_obj_cohort&& o) = delete;
/** Destructor */ /** Destructor */
~hazptr_obj_batch() { ~hazptr_obj_cohort() {
if (active_) { if (active_) {
shutdown_and_reclaim(); shutdown_and_reclaim();
} }
...@@ -332,7 +332,7 @@ class hazptr_obj_batch { ...@@ -332,7 +332,7 @@ class hazptr_obj_batch {
reclaim_list(obj); reclaim_list(obj);
} }
if (pushed_to_domain_tagged_.load(std::memory_order_relaxed)) { if (pushed_to_domain_tagged_.load(std::memory_order_relaxed)) {
default_hazptr_domain<Atom>().cleanup_batch_tag(this); default_hazptr_domain<Atom>().cleanup_cohort_tag(this);
} }
DCHECK(l_.empty()); DCHECK(l_.empty());
} }
...@@ -403,7 +403,7 @@ class hazptr_obj_batch { ...@@ -403,7 +403,7 @@ class hazptr_obj_batch {
} }
} }
} }
}; // hazptr_obj_batch }; // hazptr_obj_cohort
/** /**
* hazptr_obj_retired_list * hazptr_obj_retired_list
......
...@@ -42,7 +42,7 @@ using folly::hazptr_holder; ...@@ -42,7 +42,7 @@ using folly::hazptr_holder;
using folly::hazptr_local; using folly::hazptr_local;
using folly::hazptr_obj_base; using folly::hazptr_obj_base;
using folly::hazptr_obj_base_linked; using folly::hazptr_obj_base_linked;
using folly::hazptr_obj_batch; using folly::hazptr_obj_cohort;
using folly::hazptr_retire; using folly::hazptr_retire;
using folly::hazptr_root; using folly::hazptr_root;
using folly::hazptr_tc; using folly::hazptr_tc;
...@@ -813,16 +813,16 @@ void priv_dtor_test() { ...@@ -813,16 +813,16 @@ void priv_dtor_test() {
} }
template <template <typename> class Atom = std::atomic> template <template <typename> class Atom = std::atomic>
void batch_test() { void cohort_test() {
int num = 10001; int num = 10001;
using NodeT = Node<Atom>; using NodeT = Node<Atom>;
c_.clear(); c_.clear();
{ {
hazptr_obj_batch<Atom> batch; hazptr_obj_cohort<Atom> cohort;
auto thr = DSched::thread([&]() { auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) { for (int i = 0; i < num; ++i) {
auto p = new NodeT; auto p = new NodeT;
p->set_batch_no_tag(&batch); p->set_cohort_no_tag(&cohort);
p->retire(); p->retire();
} }
}); });
...@@ -833,11 +833,11 @@ void batch_test() { ...@@ -833,11 +833,11 @@ void batch_test() {
hazptr_cleanup<Atom>(); hazptr_cleanup<Atom>();
c_.clear(); c_.clear();
{ {
hazptr_obj_batch<Atom> batch; hazptr_obj_cohort<Atom> cohort;
auto thr = DSched::thread([&]() { auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) { for (int i = 0; i < num; ++i) {
auto p = new NodeT; auto p = new NodeT;
p->set_batch_tag(&batch); p->set_cohort_tag(&cohort);
p->retire(); p->retire();
} }
}); });
...@@ -851,10 +851,10 @@ void batch_test() { ...@@ -851,10 +851,10 @@ void batch_test() {
template <template <typename> class Atom = std::atomic> template <template <typename> class Atom = std::atomic>
void recursive_destruction_test() { void recursive_destruction_test() {
struct Foo : public hazptr_obj_base<Foo, Atom> { struct Foo : public hazptr_obj_base<Foo, Atom> {
hazptr_obj_batch<Atom> batch_; hazptr_obj_cohort<Atom> cohort_;
Foo* foo_{nullptr}; Foo* foo_{nullptr};
explicit Foo(hazptr_obj_batch<Atom>* b) { explicit Foo(hazptr_obj_cohort<Atom>* b) {
this->set_batch_tag(b); this->set_cohort_tag(b);
c_.inc_ctors(); c_.inc_ctors();
} }
~Foo() { ~Foo() {
...@@ -867,8 +867,8 @@ void recursive_destruction_test() { ...@@ -867,8 +867,8 @@ void recursive_destruction_test() {
} }
foo_ = foo; foo_ = foo;
} }
hazptr_obj_batch<Atom>* batch() { hazptr_obj_cohort<Atom>* cohort() {
return &batch_; return &cohort_;
} }
}; };
...@@ -879,13 +879,13 @@ void recursive_destruction_test() { ...@@ -879,13 +879,13 @@ void recursive_destruction_test() {
std::vector<std::thread> threads(nthr); std::vector<std::thread> threads(nthr);
for (int tid = 0; tid < nthr; ++tid) { for (int tid = 0; tid < nthr; ++tid) {
threads[tid] = DSched::thread([&, tid]() { threads[tid] = DSched::thread([&, tid]() {
hazptr_obj_batch<Atom> b0; hazptr_obj_cohort<Atom> b0;
Foo* foo0 = new Foo(&b0); Foo* foo0 = new Foo(&b0);
for (int i = tid; i < num1; i += nthr) { for (int i = tid; i < num1; i += nthr) {
Foo* foo1 = new Foo(foo0->batch()); Foo* foo1 = new Foo(foo0->cohort());
foo0->set(foo1); foo0->set(foo1);
for (int j = 0; j < num2; ++j) { for (int j = 0; j < num2; ++j) {
foo1->set(new Foo(foo1->batch())); foo1->set(new Foo(foo1->cohort()));
} }
} }
foo0->retire(); foo0->retire();
...@@ -902,10 +902,10 @@ void recursive_destruction_test() { ...@@ -902,10 +902,10 @@ void recursive_destruction_test() {
void fork_test() { void fork_test() {
folly::start_hazptr_thread_pool_executor(); folly::start_hazptr_thread_pool_executor();
auto trigger_reclamation = [] { auto trigger_reclamation = [] {
hazptr_obj_batch b; hazptr_obj_cohort b;
for (int i = 0; i < 2001; ++i) { for (int i = 0; i < 2001; ++i) {
auto p = new Node; auto p = new Node;
p->set_batch_no_tag(&b); p->set_cohort_no_tag(&b);
p->retire(); p->retire();
} }
}; };
...@@ -1187,13 +1187,13 @@ TEST_F(HazptrPreInitTest, dsched_priv_dtor) { ...@@ -1187,13 +1187,13 @@ TEST_F(HazptrPreInitTest, dsched_priv_dtor) {
priv_dtor_test<DeterministicAtomic>(); priv_dtor_test<DeterministicAtomic>();
} }
TEST(HazptrTest, batch) { TEST(HazptrTest, cohort) {
batch_test(); cohort_test();
} }
TEST(HazptrTest, dsched_batch) { TEST(HazptrTest, dsched_cohort) {
DSched sched(DSched::uniform(0)); DSched sched(DSched::uniform(0));
batch_test<DeterministicAtomic>(); cohort_test<DeterministicAtomic>();
} }
TEST(HazptrTest, recursive_destruction) { TEST(HazptrTest, recursive_destruction) {
...@@ -1446,20 +1446,20 @@ uint64_t cleanup_bench(std::string name, int nthreads) { ...@@ -1446,20 +1446,20 @@ uint64_t cleanup_bench(std::string name, int nthreads) {
return bench(name, ops, repFn); return bench(name, ops, repFn);
} }
uint64_t batch_bench(std::string name, int nthreads) { uint64_t cohort_bench(std::string name, int nthreads) {
struct Foo : public hazptr_obj_base<Foo> {}; struct Foo : public hazptr_obj_base<Foo> {};
// Push unrelated objects into the domain tagged list // Push unrelated objects into the domain tagged list
hazptr_obj_batch batch; hazptr_obj_cohort cohort;
for (int i = 0; i < 999; ++i) { for (int i = 0; i < 999; ++i) {
auto p = new Foo; auto p = new Foo;
p->set_batch_tag(&batch); p->set_cohort_tag(&cohort);
p->retire(); p->retire();
} }
auto repFn = [&] { auto repFn = [&] {
auto init = [] {}; auto init = [] {};
auto fn = [&](int tid) { auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) { for (int j = tid; j < ops; j += nthreads) {
hazptr_obj_batch b; hazptr_obj_cohort b;
} }
}; };
auto endFn = [] {}; auto endFn = [] {};
...@@ -1504,8 +1504,8 @@ void benches() { ...@@ -1504,8 +1504,8 @@ void benches() {
} }
std::cout << "1/1000 hazptr_cleanup "; std::cout << "1/1000 hazptr_cleanup ";
cleanup_bench("", i); cleanup_bench("", i);
std::cout << "Life cycle of unused tagged obj batch "; std::cout << "Life cycle of unused tagged obj cohort ";
batch_bench("", i); cohort_bench("", i);
} }
} }
...@@ -1536,7 +1536,7 @@ allocate/retire/reclaim object 70 ns 68 ns 67 ns ...@@ -1536,7 +1536,7 @@ allocate/retire/reclaim object 70 ns 68 ns 67 ns
20-item list protect all - own hazptr 28 ns 28 ns 28 ns 20-item list protect all - own hazptr 28 ns 28 ns 28 ns
20-item list protect all 31 ns 29 ns 29 ns 20-item list protect all 31 ns 29 ns 29 ns
1/1000 hazptr_cleanup 2 ns 1 ns 1 ns 1/1000 hazptr_cleanup 2 ns 1 ns 1 ns
Life cycle of unused tagged obj batch 1 ns 1 ns 1 ns Life cycle of unused tagged obj cohort 1 ns 1 ns 1 ns
================================ 10 threads ================================ ================================ 10 threads ================================
10x construct/destruct hazptr_holder 11 ns 8 ns 8 ns 10x construct/destruct hazptr_holder 11 ns 8 ns 8 ns
10x construct/destruct hazptr_array<1> 8 ns 7 ns 7 ns 10x construct/destruct hazptr_array<1> 8 ns 7 ns 7 ns
...@@ -1555,5 +1555,5 @@ allocate/retire/reclaim object 20 ns 17 ns 16 ns ...@@ -1555,5 +1555,5 @@ allocate/retire/reclaim object 20 ns 17 ns 16 ns
20-item list protect all - own hazptr 4 ns 4 ns 4 ns 20-item list protect all - own hazptr 4 ns 4 ns 4 ns
20-item list protect all 5 ns 4 ns 4 ns 20-item list protect all 5 ns 4 ns 4 ns
1/1000 hazptr_cleanup 119 ns 113 ns 97 ns 1/1000 hazptr_cleanup 119 ns 113 ns 97 ns
Life cycle of unused tagged obj batch 0 ns 0 ns 0 ns Life cycle of unused tagged obj cohort 0 ns 0 ns 0 ns
*/ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment