Commit 2c1539c1 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

hazptr: Rename hazptr_obj_batch to hazptr_obj_cohort

Summary: Rename hazptr obj batch to obj cohort, a more accurate description.

Reviewed By: davidtgoldblatt

Differential Revision: D19518771

fbshipit-source-id: f50a8a481f260dde4fce10fb9664d4f86c263b60
parent 8221b02b
......@@ -193,8 +193,8 @@ class ConcurrentHashMap {
std::memory_order_relaxed);
o.segments_[i].store(nullptr, std::memory_order_relaxed);
}
batch_.store(o.batch(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed);
cohort_.store(o.cohort(), std::memory_order_relaxed);
o.cohort_.store(nullptr, std::memory_order_relaxed);
}
ConcurrentHashMap& operator=(ConcurrentHashMap&& o) {
......@@ -211,9 +211,9 @@ class ConcurrentHashMap {
}
size_ = o.size_;
max_size_ = o.max_size_;
batch_shutdown_cleanup();
batch_.store(o.batch(), std::memory_order_relaxed);
o.batch_.store(nullptr, std::memory_order_relaxed);
cohort_shutdown_cleanup();
cohort_.store(o.cohort(), std::memory_order_relaxed);
o.cohort_.store(nullptr, std::memory_order_relaxed);
return *this;
}
......@@ -225,7 +225,7 @@ class ConcurrentHashMap {
Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT));
}
}
batch_shutdown_cleanup();
cohort_shutdown_cleanup();
}
bool empty() const noexcept {
......@@ -305,7 +305,7 @@ class ConcurrentHashMap {
std::pair<ConstIterator, bool> emplace(Args&&... args) {
using Node = typename SegmentT::Node;
auto node = (Node*)Allocator().allocate(sizeof(Node));
new (node) Node(ensureBatch(), std::forward<Args>(args)...);
new (node) Node(ensureCohort(), std::forward<Args>(args)...);
auto segment = pickSegment(node->getItem().first);
std::pair<ConstIterator, bool> res(
std::piecewise_construct,
......@@ -568,7 +568,7 @@ class ConcurrentHashMap {
SegmentT* ensureSegment(uint64_t i) const {
SegmentT* seg = segments_[i].load(std::memory_order_acquire);
if (!seg) {
auto b = ensureBatch();
auto b = ensureCohort();
SegmentT* newseg = (SegmentT*)Allocator().allocate(sizeof(SegmentT));
newseg = new (newseg)
SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits, b);
......@@ -583,37 +583,37 @@ class ConcurrentHashMap {
return seg;
}
hazptr_obj_batch<Atom>* batch() const noexcept {
return batch_.load(std::memory_order_acquire);
hazptr_obj_cohort<Atom>* cohort() const noexcept {
return cohort_.load(std::memory_order_acquire);
}
hazptr_obj_batch<Atom>* ensureBatch() const {
auto b = batch();
hazptr_obj_cohort<Atom>* ensureCohort() const {
auto b = cohort();
if (!b) {
auto storage = Allocator().allocate(sizeof(hazptr_obj_batch<Atom>));
auto newbatch = new (storage) hazptr_obj_batch<Atom>();
if (batch_.compare_exchange_strong(b, newbatch)) {
b = newbatch;
auto storage = Allocator().allocate(sizeof(hazptr_obj_cohort<Atom>));
auto newcohort = new (storage) hazptr_obj_cohort<Atom>();
if (cohort_.compare_exchange_strong(b, newcohort)) {
b = newcohort;
} else {
newbatch->~hazptr_obj_batch<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_batch<Atom>));
newcohort->~hazptr_obj_cohort<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_cohort<Atom>));
}
}
return b;
}
void batch_shutdown_cleanup() {
auto b = batch();
void cohort_shutdown_cleanup() {
auto b = cohort();
if (b) {
b->~hazptr_obj_batch<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_batch<Atom>));
b->~hazptr_obj_cohort<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_cohort<Atom>));
}
}
mutable Atom<SegmentT*> segments_[NumShards];
size_t size_{0};
size_t max_size_{0};
mutable Atom<hazptr_obj_batch<Atom>*> batch_{nullptr};
mutable Atom<hazptr_obj_cohort<Atom>*> cohort_{nullptr};
};
#if FOLLY_SSE_PREREQ(4, 2) && !FOLLY_MOBILE
......
......@@ -237,9 +237,9 @@ class UnboundedQueue {
struct Consumer {
Atom<Segment*> head;
Atom<Ticket> ticket;
hazptr_obj_batch<Atom> batch;
hazptr_obj_cohort<Atom> cohort;
explicit Consumer(Segment* s) : head(s), ticket(0) {
s->set_batch_no_tag(&batch); // defined in hazptr_obj
s->set_cohort_no_tag(&cohort); // defined in hazptr_obj
}
};
struct Producer {
......@@ -559,7 +559,7 @@ class UnboundedQueue {
Segment* allocNextSegment(Segment* s) {
auto t = s->minTicket() + SegmentSize;
Segment* next = new Segment(t);
next->set_batch_no_tag(&c_.batch); // defined in hazptr_obj
next->set_cohort_no_tag(&c_.cohort); // defined in hazptr_obj
next->acquire_ref_safe(); // defined in hazptr_obj_base_linked
if (!s->casNextSegment(next)) {
delete next;
......
......@@ -190,7 +190,7 @@ RequestContext::StateHazptr::StateHazptr(const StateHazptr& o) {
}
RequestContext::StateHazptr::~StateHazptr() {
batch_.shutdown_and_reclaim();
cohort_.shutdown_and_reclaim();
auto p = combined();
if (p) {
delete p;
......@@ -214,7 +214,7 @@ RequestContext::StateHazptr::ensureCombined() {
}
void RequestContext::StateHazptr::setCombined(Combined* p) {
p->set_batch_tag(&batch_);
p->set_cohort_tag(&cohort_);
combined_.store(p, std::memory_order_release);
}
......
......@@ -366,7 +366,7 @@ class RequestContext {
// Hazard pointer-protected combined structure for request data
// and callbacks.
struct Combined;
hazptr_obj_batch<> batch_; // For destruction order
hazptr_obj_cohort<> cohort_; // For destruction order
std::atomic<Combined*> combined_{nullptr};
std::mutex mutex_;
......
......@@ -59,9 +59,9 @@ class hazptr_obj;
template <template <typename> class Atom = std::atomic>
class hazptr_obj_list;
/** hazptr_obj_batch */
/** hazptr_obj_cohort */
template <template <typename> class Atom = std::atomic>
class hazptr_obj_batch;
class hazptr_obj_cohort;
/** hazptr_obj_retired_list */
template <template <typename> class Atom = std::atomic>
......
......@@ -53,7 +53,7 @@ constexpr int hazptr_domain_rcount_threshold() {
* Notes on destruction order, tagged objects, locking and deadlock
* avoidance:
* - Tagged objects support reclamation order guarantees. A call to
* cleanup_batch_tag(tag) guarantees that all objects with the
* cleanup_cohort_tag(tag) guarantees that all objects with the
* specified tag are reclaimed before the function returns.
* - Due to the strict order, access to the set of tagged objects
* needs synchronization and care must be taken to avoid deadlock.
......@@ -66,7 +66,7 @@ constexpr int hazptr_domain_rcount_threshold() {
* reclaimed objects. This type is needed to guarantee an upper
* bound on unreclaimed reclaimable objects.
* - Type B: A Type B reclamation operation is triggered by a call
* to the function cleanup_batch_tag for a specific tag. All
* to the function cleanup_cohort_tag for a specific tag. All
* objects with the specified tag must be reclaimed
* unconditionally before returning from such a function
* call. Hazard pointers are not checked. This type of reclamation
......@@ -187,9 +187,9 @@ class hazptr_domain {
wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
}
/** cleanup_batch_tag */
void cleanup_batch_tag(const hazptr_obj_batch<Atom>* batch) noexcept {
auto tag = reinterpret_cast<uintptr_t>(batch) + kTagBit;
/** cleanup_cohort_tag */
void cleanup_cohort_tag(const hazptr_obj_cohort<Atom>* cohort) noexcept {
auto tag = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
auto obj = tagged_.pop_all(RetiredList::kAlsoLock);
ObjList match, nomatch;
list_match_tag(tag, obj, match, nomatch);
......@@ -219,7 +219,7 @@ class hazptr_domain {
void
list_match_tag(uintptr_t tag, Obj* obj, ObjList& match, ObjList& nomatch) {
list_match_condition(
obj, match, nomatch, [tag](Obj* o) { return o->batch_tag() == tag; });
obj, match, nomatch, [tag](Obj* o) { return o->cohort_tag() == tag; });
}
private:
......@@ -236,7 +236,7 @@ class hazptr_domain {
hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>;
friend class hazptr_obj<Atom>;
friend class hazptr_obj_batch<Atom>;
friend class hazptr_obj_cohort<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL
friend class hazptr_tc<Atom>;
#endif
......@@ -277,7 +277,7 @@ class hazptr_domain {
if (l.empty()) {
return;
}
uintptr_t btag = l.head()->batch_tag();
uintptr_t btag = l.head()->cohort_tag();
bool tagged = ((btag & kTagBit) == kTagBit);
RetiredList& rlist = tagged ? tagged_ : untagged_;
Atom<uint64_t>& sync_time =
......@@ -285,7 +285,7 @@ class hazptr_domain {
/*** Full fence ***/ asymmetricLightBarrier();
/* Only tagged lists need to be locked because tagging is used to
* guarantee the identification of all objects with a specific
* tag. Locking pcrotects against concurrent hazptr_cleanup_tag()
* tag. Locking protects against concurrent hazptr_cleanup_tag()
* calls missing tagged objects. */
bool lock =
tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked;
......@@ -342,7 +342,7 @@ class hazptr_domain {
for (; hprec; hprec = hprec->next()) {
hs.insert(hprec->hazptr());
}
/* Check objets against hazard pointer values */
/* Check objects against hazard pointer values */
ObjList match, nomatch;
list_match_condition(obj, match, nomatch, [&](Obj* o) {
return hs.count(o->raw_ptr()) > 0;
......
......@@ -42,42 +42,42 @@ namespace folly {
* Data members:
* - next_: link to next object in private singly linked lists.
* - reclaim_: reclamation function for this object.
* - batch_tag_: A pointer to a batch (a linked list where the object
* is to be pushed when retired). It can also be used as a tag (see
* below). See details below.
* - cohort_tag_: A pointer to a cohort (a linked list where the
* object is to be pushed when retired). It can also be used as a
* tag (see below). See details below.
*
* Batches, Tags, Tagged Objects, and Untagged Objects:
* Cohorts, Tags, Tagged Objects, and Untagged Objects:
*
* - Batches: Batches (instances of hazptr_obj_batch) are sets of
* retired hazptr_obj-s. Batches are used to keep related objects
* - Cohorts: Cohorts (instances of hazptr_obj_cohort) are sets of
* retired hazptr_obj-s. Cohorts are used to keep related objects
* together instead of being spread across thread local structures
* and/or mixed with unrelated objects.
*
* - Tags: A tag is a unique identifier used for fast identification
* of related objects. Tags are implemented as addresses of
* batches, with the lowest bit set (to save the space of separate
* batch and tag data members and to differentiate from batches of
* cohorts, with the lowest bit set (to save the space of separate
* cohort and tag data members and to differentiate from cohorts of
* untagged objects.
*
* - Tagged objects: Objects are tagged for fast identification. The
* primary use case is for guaranteeing the destruction of all
* objects with a certain tag (e.g., the destruction of all Key and
* Value objects that were part of a Folly ConcurrentHashMap
* instance). Member function set_batch_tag makes an object tagged.
* instance). Member function set_cohort_tag makes an object tagged.
*
* - Untagged objects: Objects that do not need to be identified
* separately from unrelated objects are not tagged (to keep tagged
* objects uncluttered). Untagged objects may or may not be
* associated with batches. An example of untagged objects
* associated with batches are Segment-s of Folly UnboundedQueue.
* associated with cohorts. An example of untagged objects
* associated with cohorts are Segment-s of Folly UnboundedQueue.
* Although such objects do not need to be tagged, keeping them in
* batches helps avoid cases of a few missing objects delaying the
* cohorts helps avoid cases of a few missing objects delaying the
* reclamation of large numbers of link-counted objects. Objects
* are untagged either by default or after calling
* set_batch_no_tag.
* set_cohort_no_tag.
*
* - Thread Safety: Member functions set_batch_tag and
* set_batch_no_tag are not thread-safe. Thread safety must be
* - Thread Safety: Member functions set_cohort_tag and
* set_cohort_no_tag are not thread-safe. Thread safety must be
* ensured by the calling thread.
*/
template <template <typename> class Atom>
......@@ -102,21 +102,21 @@ class hazptr_obj {
ReclaimFnPtr reclaim_;
hazptr_obj<Atom>* next_;
uintptr_t batch_tag_;
uintptr_t cohort_tag_;
public:
/** Constructors */
/* All constructors set next_ to this in order to catch misuse bugs
such as double retire. By default, objects are untagged and not
associated with a batch. */
associated with a cohort. */
hazptr_obj() noexcept : next_(this), batch_tag_(0) {}
hazptr_obj() noexcept : next_(this), cohort_tag_(0) {}
hazptr_obj(const hazptr_obj<Atom>& o) noexcept
: next_(this), batch_tag_(o.batch_tag_) {}
: next_(this), cohort_tag_(o.cohort_tag_) {}
hazptr_obj(hazptr_obj<Atom>&& o) noexcept
: next_(this), batch_tag_(o.batch_tag_) {}
: next_(this), cohort_tag_(o.cohort_tag_) {}
/** Copy operator */
hazptr_obj<Atom>& operator=(const hazptr_obj<Atom>&) noexcept {
......@@ -128,31 +128,31 @@ class hazptr_obj {
return *this;
}
/** batch_tag */
uintptr_t batch_tag() {
return batch_tag_;
/** cohort_tag */
uintptr_t cohort_tag() {
return cohort_tag_;
}
/** batch */
hazptr_obj_batch<Atom>* batch() {
uintptr_t btag = batch_tag_;
/** cohort */
hazptr_obj_cohort<Atom>* cohort() {
uintptr_t btag = cohort_tag_;
btag -= btag & kTagBit;
return reinterpret_cast<hazptr_obj_batch<Atom>*>(btag);
return reinterpret_cast<hazptr_obj_cohort<Atom>*>(btag);
}
/** tagged */
bool tagged() {
return (batch_tag_ & kTagBit) == kTagBit;
return (cohort_tag_ & kTagBit) == kTagBit;
}
/** set_batch_tag: Set batch and make object tagged. */
void set_batch_tag(hazptr_obj_batch<Atom>* batch) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch) + kTagBit;
/** set_cohort_tag: Set cohort and make object tagged. */
void set_cohort_tag(hazptr_obj_cohort<Atom>* cohort) {
cohort_tag_ = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
}
/** set_batch_no_tag: Set batch and make object untagged. */
void set_batch_no_tag(hazptr_obj_batch<Atom>* batch) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch);
/** set_cohort_no_tag: Set cohort and make object untagged. */
void set_cohort_no_tag(hazptr_obj_cohort<Atom>* cohort) {
cohort_tag_ = reinterpret_cast<uintptr_t>(cohort);
}
private:
......@@ -161,7 +161,7 @@ class hazptr_obj {
friend class hazptr_obj_base;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base_refcounted;
friend class hazptr_obj_batch<Atom>;
friend class hazptr_obj_cohort<Atom>;
friend class hazptr_priv<Atom>;
hazptr_obj<Atom>* next() const noexcept {
......@@ -188,10 +188,10 @@ class hazptr_obj {
}
void push_obj(hazptr_domain<Atom>& domain) {
auto b = batch();
if (b) {
auto coh = cohort();
if (coh) {
DCHECK_EQ(&domain, &default_hazptr_domain<Atom>());
b->push_obj(this);
coh->push_obj(this);
} else {
push_to_retired(domain);
}
......@@ -277,19 +277,19 @@ class hazptr_obj_list {
}; // hazptr_obj_list
/**
* hazptr_obj_batch
* hazptr_obj_cohort
*
* List of retired objects. For objects to be retred to a batch,
* either of the hazptr_obj member functions set_batch_tag or
* set_batch_no_tag needs to be called before the object is retired.
* List of retired objects. For objects to be retred to a cohort,
* either of the hazptr_obj member functions set_cohort_tag or
* set_cohort_no_tag needs to be called before the object is retired.
*
* See description of hazptr_obj for notes on batches, tags, and
* See description of hazptr_obj for notes on cohorts, tags, and
* tageed and untagged objects.
*
* [Note: For now supports only the default domain.]
*/
template <template <typename> class Atom>
class hazptr_obj_batch {
class hazptr_obj_cohort {
using Obj = hazptr_obj<Atom>;
using List = hazptr_detail::linked_list<Obj>;
using SharedList = hazptr_detail::shared_head_tail_list<Obj, Atom>;
......@@ -303,17 +303,17 @@ class hazptr_obj_batch {
public:
/** Constructor */
hazptr_obj_batch() noexcept
hazptr_obj_cohort() noexcept
: l_(), count_(0), active_(true), pushed_to_domain_tagged_{false} {}
/** Not copyable or moveable */
hazptr_obj_batch(const hazptr_obj_batch& o) = delete;
hazptr_obj_batch(hazptr_obj_batch&& o) = delete;
hazptr_obj_batch& operator=(const hazptr_obj_batch&& o) = delete;
hazptr_obj_batch& operator=(hazptr_obj_batch&& o) = delete;
hazptr_obj_cohort(const hazptr_obj_cohort& o) = delete;
hazptr_obj_cohort(hazptr_obj_cohort&& o) = delete;
hazptr_obj_cohort& operator=(const hazptr_obj_cohort&& o) = delete;
hazptr_obj_cohort& operator=(hazptr_obj_cohort&& o) = delete;
/** Destructor */
~hazptr_obj_batch() {
~hazptr_obj_cohort() {
if (active_) {
shutdown_and_reclaim();
}
......@@ -332,7 +332,7 @@ class hazptr_obj_batch {
reclaim_list(obj);
}
if (pushed_to_domain_tagged_.load(std::memory_order_relaxed)) {
default_hazptr_domain<Atom>().cleanup_batch_tag(this);
default_hazptr_domain<Atom>().cleanup_cohort_tag(this);
}
DCHECK(l_.empty());
}
......@@ -403,7 +403,7 @@ class hazptr_obj_batch {
}
}
}
}; // hazptr_obj_batch
}; // hazptr_obj_cohort
/**
* hazptr_obj_retired_list
......
......@@ -42,7 +42,7 @@ using folly::hazptr_holder;
using folly::hazptr_local;
using folly::hazptr_obj_base;
using folly::hazptr_obj_base_linked;
using folly::hazptr_obj_batch;
using folly::hazptr_obj_cohort;
using folly::hazptr_retire;
using folly::hazptr_root;
using folly::hazptr_tc;
......@@ -813,16 +813,16 @@ void priv_dtor_test() {
}
template <template <typename> class Atom = std::atomic>
void batch_test() {
void cohort_test() {
int num = 10001;
using NodeT = Node<Atom>;
c_.clear();
{
hazptr_obj_batch<Atom> batch;
hazptr_obj_cohort<Atom> cohort;
auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) {
auto p = new NodeT;
p->set_batch_no_tag(&batch);
p->set_cohort_no_tag(&cohort);
p->retire();
}
});
......@@ -833,11 +833,11 @@ void batch_test() {
hazptr_cleanup<Atom>();
c_.clear();
{
hazptr_obj_batch<Atom> batch;
hazptr_obj_cohort<Atom> cohort;
auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) {
auto p = new NodeT;
p->set_batch_tag(&batch);
p->set_cohort_tag(&cohort);
p->retire();
}
});
......@@ -851,10 +851,10 @@ void batch_test() {
template <template <typename> class Atom = std::atomic>
void recursive_destruction_test() {
struct Foo : public hazptr_obj_base<Foo, Atom> {
hazptr_obj_batch<Atom> batch_;
hazptr_obj_cohort<Atom> cohort_;
Foo* foo_{nullptr};
explicit Foo(hazptr_obj_batch<Atom>* b) {
this->set_batch_tag(b);
explicit Foo(hazptr_obj_cohort<Atom>* b) {
this->set_cohort_tag(b);
c_.inc_ctors();
}
~Foo() {
......@@ -867,8 +867,8 @@ void recursive_destruction_test() {
}
foo_ = foo;
}
hazptr_obj_batch<Atom>* batch() {
return &batch_;
hazptr_obj_cohort<Atom>* cohort() {
return &cohort_;
}
};
......@@ -879,13 +879,13 @@ void recursive_destruction_test() {
std::vector<std::thread> threads(nthr);
for (int tid = 0; tid < nthr; ++tid) {
threads[tid] = DSched::thread([&, tid]() {
hazptr_obj_batch<Atom> b0;
hazptr_obj_cohort<Atom> b0;
Foo* foo0 = new Foo(&b0);
for (int i = tid; i < num1; i += nthr) {
Foo* foo1 = new Foo(foo0->batch());
Foo* foo1 = new Foo(foo0->cohort());
foo0->set(foo1);
for (int j = 0; j < num2; ++j) {
foo1->set(new Foo(foo1->batch()));
foo1->set(new Foo(foo1->cohort()));
}
}
foo0->retire();
......@@ -902,10 +902,10 @@ void recursive_destruction_test() {
void fork_test() {
folly::start_hazptr_thread_pool_executor();
auto trigger_reclamation = [] {
hazptr_obj_batch b;
hazptr_obj_cohort b;
for (int i = 0; i < 2001; ++i) {
auto p = new Node;
p->set_batch_no_tag(&b);
p->set_cohort_no_tag(&b);
p->retire();
}
};
......@@ -1187,13 +1187,13 @@ TEST_F(HazptrPreInitTest, dsched_priv_dtor) {
priv_dtor_test<DeterministicAtomic>();
}
TEST(HazptrTest, batch) {
batch_test();
TEST(HazptrTest, cohort) {
cohort_test();
}
TEST(HazptrTest, dsched_batch) {
TEST(HazptrTest, dsched_cohort) {
DSched sched(DSched::uniform(0));
batch_test<DeterministicAtomic>();
cohort_test<DeterministicAtomic>();
}
TEST(HazptrTest, recursive_destruction) {
......@@ -1446,20 +1446,20 @@ uint64_t cleanup_bench(std::string name, int nthreads) {
return bench(name, ops, repFn);
}
uint64_t batch_bench(std::string name, int nthreads) {
uint64_t cohort_bench(std::string name, int nthreads) {
struct Foo : public hazptr_obj_base<Foo> {};
// Push unrelated objects into the domain tagged list
hazptr_obj_batch batch;
hazptr_obj_cohort cohort;
for (int i = 0; i < 999; ++i) {
auto p = new Foo;
p->set_batch_tag(&batch);
p->set_cohort_tag(&cohort);
p->retire();
}
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
hazptr_obj_batch b;
hazptr_obj_cohort b;
}
};
auto endFn = [] {};
......@@ -1504,8 +1504,8 @@ void benches() {
}
std::cout << "1/1000 hazptr_cleanup ";
cleanup_bench("", i);
std::cout << "Life cycle of unused tagged obj batch ";
batch_bench("", i);
std::cout << "Life cycle of unused tagged obj cohort ";
cohort_bench("", i);
}
}
......@@ -1536,7 +1536,7 @@ allocate/retire/reclaim object 70 ns 68 ns 67 ns
20-item list protect all - own hazptr 28 ns 28 ns 28 ns
20-item list protect all 31 ns 29 ns 29 ns
1/1000 hazptr_cleanup 2 ns 1 ns 1 ns
Life cycle of unused tagged obj batch 1 ns 1 ns 1 ns
Life cycle of unused tagged obj cohort 1 ns 1 ns 1 ns
================================ 10 threads ================================
10x construct/destruct hazptr_holder 11 ns 8 ns 8 ns
10x construct/destruct hazptr_array<1> 8 ns 7 ns 7 ns
......@@ -1555,5 +1555,5 @@ allocate/retire/reclaim object 20 ns 17 ns 16 ns
20-item list protect all - own hazptr 4 ns 4 ns 4 ns
20-item list protect all 5 ns 4 ns 4 ns
1/1000 hazptr_cleanup 119 ns 113 ns 97 ns
Life cycle of unused tagged obj batch 0 ns 0 ns 0 ns
Life cycle of unused tagged obj cohort 0 ns 0 ns 0 ns
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment