Commit e56afab1 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

hazptr: Link counting. Protecting descendants of protected objects. Mutable...

hazptr: Link counting. Protecting descendants of protected objects. Mutable and immutable links. Automatic retirement.

Summary:
The class hazptr_obj_linked includes all the reference counting and automatic retirement mechanisms. It supports:
- Mutable and Immutable links.
- Certain and uncertain removal. With certain removal users call retire. With uncertain users call unlink when an object is unlinked. The library determines automatically when to retire such object.
- E.g., UnboundedQueue uses immutable links with certain removal.
- E.g., ConcurrentHashMap uses mutable links with uncertain removal.

Reviewed By: djwatson

Differential Revision: D7674658

fbshipit-source-id: 948f5c3690367deaa0e2023a2def3bed2c22b9f0
parent f8651741
......@@ -471,6 +471,7 @@ nobase_follyinclude_HEADERS = \
synchronization/HazptrDomain.h \
synchronization/HazptrHolder.h \
synchronization/HazptrObj.h \
synchronization/HazptrObjLinked.h \
synchronization/HazptrRec.h \
synchronization/HazptrThrLocal.h \
synchronization/LifoSem.h \
......
......@@ -50,19 +50,40 @@ class hazptr_rec;
template <template <typename> class Atom = std::atomic>
class hazptr_obj;
/** hazptr_obj_list */
template <template <typename> class Atom = std::atomic>
class hazptr_obj_list;
/** hazptr_deleter */
template <typename T, typename D>
class hazptr_deleter;
/** hazptr_obj_base */
template <
typename T,
template <typename> class Atom = std::atomic,
typename Deleter = std::default_delete<T>>
typename D = std::default_delete<T>>
class hazptr_obj_base;
/** hazptr_obj_base_refcounted */
///
/// Classes related to link counted objects and automatic retirement.
/// Defined in HazptrLinked.h
///
/** hazptr_root */
template <typename T, template <typename> class Atom = std::atomic>
class hazptr_root;
/** hazptr_obj_linked */
template <template <typename> class Atom = std::atomic>
class hazptr_obj_linked;
/** hazptr_obj_base_linked */
template <
typename T,
template <typename> class Atom = std::atomic,
typename Deleter = std::default_delete<T>>
class hazptr_obj_base_refcounted;
class hazptr_obj_base_linked;
///
/// Classes and functions related to thread local structures.
......@@ -105,9 +126,7 @@ hazptr_domain<Atom>& default_hazptr_domain();
/** hazptr_domain_push_retired */
template <template <typename> class Atom = std::atomic>
void hazptr_domain_push_retired(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int rcount,
hazptr_obj_list<Atom>& l,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** hazptr_retire */
......
......@@ -19,6 +19,7 @@
#include <folly/synchronization/HazptrDomain.h>
#include <folly/synchronization/HazptrHolder.h>
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrObjLinked.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/synchronization/HazptrThrLocal.h>
......@@ -125,6 +126,15 @@
/// typically is linear in the number of threads using hazard
/// pointers.
///
/// Protecting Linked Structures and Automatic Retirement
/// -----------------------------------------------------
/// Hazard pointers provide link counting API to protect linked
/// structures. It is capable of automatic retirement of objects even
/// when the removal of objects is uncertain. It also supports
/// optimizations when links are known to be immutable. All the link
/// counting features incur no extra overhead for readers.
/// See HazptrObjLinked.h for more details.
///
/// Alternative Safe Reclamation Methods
/// ------------------------------------
/// - Locking (exclusive or shared):
......
......@@ -82,10 +82,11 @@ class hazptr_domain {
};
auto node = new hazptr_retire_node(obj, std::move(reclaim));
node->reclaim_ = [](hazptr_obj<Atom>* p) {
node->reclaim_ = [](hazptr_obj<Atom>* p, hazptr_obj_list<Atom>&) {
delete static_cast<hazptr_retire_node*>(p);
};
push_retired(node, node, 1);
hazptr_obj_list<Atom> l(node);
push_retired(l);
}
/** cleanup */
......@@ -96,9 +97,7 @@ class hazptr_domain {
private:
friend void hazptr_domain_push_retired<Atom>(
hazptr_obj<Atom>*,
hazptr_obj<Atom>*,
int,
hazptr_obj_list<Atom>&,
hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL
......@@ -117,22 +116,22 @@ class hazptr_domain {
}
/** push_retired */
void push_retired(hazptr_obj<Atom>* head, hazptr_obj<Atom>* tail, int count) {
void push_retired(hazptr_obj_list<Atom>& l, bool check = true) {
/*** Full fence ***/ asymmetricLightBarrier();
while (true) {
auto r = retired();
tail->set_next(r);
l.tail()->set_next(r);
if (retired_.compare_exchange_weak(
r, head, std::memory_order_release, std::memory_order_acquire)) {
r,
l.head(),
std::memory_order_release,
std::memory_order_acquire)) {
break;
}
}
rcount_.fetch_add(count, std::memory_order_release);
if (try_timed_cleanup()) {
return;
}
if (reached_threshold(rcount(), hcount())) {
try_bulk_reclaim();
rcount_.fetch_add(l.count(), std::memory_order_release);
if (check) {
check_cleanup_and_reclaim();
}
}
......@@ -160,12 +159,16 @@ class hazptr_domain {
auto retired = retired_.exchange(nullptr);
while (retired) {
auto obj = retired;
hazptr_obj_list<Atom> l;
while (obj) {
auto next = obj->next();
DCHECK(obj != next);
(*(obj->reclaim()))(obj);
(*(obj->reclaim()))(obj, l);
obj = next;
}
if (l.count()) {
push_retired(l);
}
retired = retired_.exchange(nullptr);
}
}
......@@ -185,6 +188,15 @@ class hazptr_domain {
}
}
void check_cleanup_and_reclaim() {
if (try_timed_cleanup()) {
return;
}
if (reached_threshold(rcount(), hcount())) {
try_bulk_reclaim();
}
}
void relaxed_cleanup() noexcept {
#if FOLLY_HAZPTR_THR_LOCAL
hazptr_obj<Atom>* h = nullptr;
......@@ -195,11 +207,12 @@ class hazptr_domain {
}
if (h) {
DCHECK(t);
push_retired(h, t, 0);
hazptr_obj_list<Atom> l(h, t, 0);
push_retired(l);
}
#endif
rcount_.store(0, std::memory_order_release);
bulk_reclaim();
bulk_reclaim(true);
}
void await_zero_bulk_reclaims() {
......@@ -223,45 +236,46 @@ class hazptr_domain {
bulk_reclaim();
}
void bulk_reclaim() {
void bulk_reclaim(bool transitive = false) {
num_bulk_reclaims_.fetch_add(1, std::memory_order_acquire);
auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
auto rec = hazptrs_.load(std::memory_order_acquire);
/* Part 1 - read hazard pointer values into private search structure */
std::unordered_set<const void*> hashset; // TOTO: lock-free fixed hash set
for (; rec; rec = rec->next()) {
hashset.insert(rec->hazptr());
while (true) {
auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
auto rec = hazptrs_.load(std::memory_order_acquire);
/* Part 1 - read hazard pointer values into private search structure */
std::unordered_set<const void*> hashset; // TOTO: lock-free fixed hash set
for (; rec; rec = rec->next()) {
hashset.insert(rec->hazptr());
}
/* Part 2 - for each retired object, reclaim if no match */
if (bulk_lookup_and_reclaim(obj, hashset) || !transitive) {
break;
}
}
/* Part 2 - for each retired object, reclaim if no match */
bulk_lookup_and_reclaim(obj, hashset);
num_bulk_reclaims_.fetch_sub(1, std::memory_order_release);
}
void bulk_lookup_and_reclaim(
bool bulk_lookup_and_reclaim(
hazptr_obj<Atom>* obj,
const std::unordered_set<const void*>& hashset) {
int rcount = 0;
hazptr_obj<Atom>* head = nullptr;
hazptr_obj<Atom>* tail = nullptr;
hazptr_obj_list<Atom> children;
hazptr_obj_list<Atom> matched;
while (obj) {
auto next = obj->next();
DCHECK_NE(obj, next);
if (hashset.count(obj->raw_ptr()) == 0) {
(*(obj->reclaim()))(obj);
(*(obj->reclaim()))(obj, children);
} else {
obj->set_next(head);
head = obj;
if (tail == nullptr) {
tail = obj;
}
++rcount;
matched.push(obj);
}
obj = next;
}
if (tail) {
push_retired(head, tail, rcount);
bool done = (children.count() == 0);
matched.splice(children);
if (matched.count() > 0) {
push_retired(matched, false /* don't call bulk_reclaim recursively */);
}
return done;
}
bool try_timed_cleanup() {
......@@ -335,13 +349,12 @@ FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& default_hazptr_domain() {
/** hazptr_domain_push_retired: push a list of retired objects into a domain */
template <template <typename> class Atom>
void hazptr_domain_push_retired(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int rcount,
hazptr_obj_list<Atom>& l,
hazptr_domain<Atom>& domain) noexcept {
domain.push_retired(head, tail, rcount);
domain.push_retired(l);
}
/** hazptr_retire */
template <template <typename> class Atom, typename T, typename D>
FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
default_hazptr_domain<Atom>().retire(obj, std::move(reclaim));
......
......@@ -38,7 +38,18 @@ namespace folly {
*/
template <template <typename> class Atom>
class hazptr_obj {
using ReclaimFnPtr = void (*)(hazptr_obj*);
using ReclaimFnPtr = void (*)(hazptr_obj<Atom>*, hazptr_obj_list<Atom>&);
template <template <typename> class>
friend class hazptr_domain;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base_linked;
template <template <typename> class>
friend class hazptr_obj_list;
template <template <typename> class>
friend class hazptr_priv;
ReclaimFnPtr reclaim_;
hazptr_obj<Atom>* next_;
......@@ -95,14 +106,15 @@ class hazptr_obj {
}
}
void do_retire(hazptr_domain<Atom>& domain) {
void push_to_retired(hazptr_domain<Atom>& domain) {
#if FOLLY_HAZPTR_THR_LOCAL
if (&domain == &default_hazptr_domain<Atom>()) {
hazptr_priv_tls<Atom>().push(this);
return;
}
#endif
hazptr_domain_push_retired(this, this, 1, domain);
hazptr_obj_list<Atom> l(this);
hazptr_domain_push_retired(l, domain);
}
FOLLY_NOINLINE void pre_retire_check_fail() noexcept {
......@@ -111,113 +123,134 @@ class hazptr_obj {
}; // hazptr_obj
/**
* hazptr_obj_base
* hazptr_obj_list
*
* Base template for objects protected by hazard pointers.
* List of hazptr_obj-s.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base : public hazptr_obj<Atom> {
D deleter_; // TODO: EBO
template <template <typename> class Atom>
class hazptr_obj_list {
hazptr_obj<Atom>* head_;
hazptr_obj<Atom>* tail_;
int count_;
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(
D deleter = {},
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
pre_retire(std::move(deleter));
set_reclaim();
this->do_retire(domain); // defined in hazptr_obj
hazptr_obj_list() noexcept : head_(nullptr), tail_(nullptr), count_(0) {}
explicit hazptr_obj_list(hazptr_obj<Atom>* obj) noexcept
: head_(obj), tail_(obj), count_(1) {}
explicit hazptr_obj_list(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int count) noexcept
: head_(head), tail_(tail), count_(count) {}
hazptr_obj<Atom>* head() {
return head_;
}
void retire(hazptr_domain<Atom>& domain) {
retire({}, domain);
hazptr_obj<Atom>* tail() {
return tail_;
}
private:
void pre_retire(D deleter) {
this->pre_retire_check(); // defined in hazptr_obj
deleter_ = std::move(deleter);
int count() {
return count_;
}
void set_reclaim() {
this->reclaim_ = [](hazptr_obj<Atom>* p) {
auto hobp = static_cast<hazptr_obj_base<T, Atom, D>*>(p);
auto obj = static_cast<T*>(hobp);
hobp->deleter_(obj);
};
void push(hazptr_obj<Atom>* obj) {
obj->set_next(head_);
head_ = obj;
if (tail_ == nullptr) {
tail_ = obj;
}
++count_;
}
}; // hazptr_obj_base
void splice(hazptr_obj_list<Atom>& l) {
if (l.count() == 0) {
return;
}
if (count() == 0) {
head_ = l.head();
} else {
tail_->set_next(l.head());
}
tail_ = l.tail();
count_ += l.count();
l.clear();
}
void clear() {
head_ = nullptr;
tail_ = nullptr;
count_ = 0;
}
}; // hazptr_obj_list
/**
* hazptr_obj_base_refcounted
* hazptr_deleter
*
* Base template for reference counted objects protected by hazard
* pointers.
* For empty base optimization.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base_refcounted : public hazptr_obj<Atom> {
Atom<uint32_t> refcount_{0};
template <typename T, typename D>
class hazptr_deleter {
D deleter_;
public:
void set_deleter(D d = {}) {
deleter_ = std::move(d);
}
void delete_obj(T* p) {
deleter_(p);
}
};
template <typename T>
class hazptr_deleter<T, std::default_delete<T>> {
public:
void set_deleter(std::default_delete<T> = {}) {}
void delete_obj(T* p) {
delete p;
}
};
/**
* hazptr_obj_base
*
* Base template for objects protected by hazard pointers.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base : public hazptr_obj<Atom>, public hazptr_deleter<T, D> {
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(
D deleter = {},
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
this->pre_retire(std::move(deleter)); // defined in hazptr_obj
pre_retire(std::move(deleter));
set_reclaim();
this->do_retire(domain); // defined in hazptr_obj
this->push_to_retired(domain); // defined in hazptr_obj
}
void retire(hazptr_domain<Atom>& domain) {
retire({}, domain);
}
/* Increments the reference count. */
void acquire_ref() noexcept {
refcount_.fetch_add(1u, std::memory_order_acq_rel);
}
/* The same as acquire_ref() except that in addition the caller
* guarantees that the call is made in a thread-safe context, e.g.,
* the object is not yet shared. This is just an optimization to
* save an atomic read-modify-write operation. */
void acquire_ref_safe() noexcept {
auto oldval = refcount_.load(std::memory_order_acquire);
refcount_.store(oldval + 1u, std::memory_order_release);
}
/* Decrements the reference count and returns true if the object is
* safe to reclaim. */
bool release_ref() noexcept {
auto oldval = refcount_.load(std::memory_order_acquire);
if (oldval > 0u) {
oldval = refcount_.fetch_sub(1u, std::memory_order_acq_rel);
} else {
if (kIsDebug) {
refcount_.store(~0u);
}
}
return oldval == 0;
}
private:
void pre_retire(D deleter) {
this->pre_retire_check(); // defined in hazptr_obj
deleter_ = std::move(deleter);
this->set_deleter(std::move(deleter));
}
void set_reclaim() {
this->reclaim_ = [](hazptr_obj<Atom>* p) {
auto hrobp = static_cast<hazptr_obj_base_refcounted<T, Atom, D>*>(p);
if (hrobp->release_ref()) {
auto obj = static_cast<T*>(hrobp);
hrobp->deleter_(obj);
}
this->reclaim_ = [](hazptr_obj<Atom>* p, hazptr_obj_list<Atom>&) {
auto hobp = static_cast<hazptr_obj_base<T, Atom, D>*>(p);
auto obj = static_cast<T*>(hobp);
hobp->delete_obj(obj);
};
}
}; // hazptr_obj_base_refcounted
}; // hazptr_obj_base
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/synchronization/HazptrObj.h>
#include <glog/logging.h>
#include <atomic>
#include <stack>
///
/// Classes related to link counted objects and automatic retirement.
///
namespace folly {
/**
* hazptr_root
*
* Link to counted objects. When destroyed unlinks the linked object
* if any.
*
* Template parameter T must support a member function unlink(),
* inherited from hazptr_obj_base_linked.
*
* Use example: Bucket heads in ConcurrentHashMap.
*/
template <typename T, template <typename> class Atom>
class hazptr_root {
Atom<T*> link_;
public:
explicit hazptr_root(T* p = nullptr) noexcept : link_(p) {}
~hazptr_root() {
auto p = link_.load(std::memory_order_relaxed);
if (p) {
p->unlink();
}
}
const Atom<T*>& operator()() const noexcept {
return link_;
}
Atom<T*>& operator()() noexcept {
return link_;
}
}; // hazptr_root
/**
* hazptr_obj_linked
*
* Base class template for link counted objects.
* Supports:
* - Protecting descendants of protected objects.
* - One-pass reclamation of long immutable chains of objects.
* - Automatic reclamation of acyclic structures.
*
* Two inbound link counts are maintained per object:
* - Link count: Represents the number of links from mutable paths.
* - Ref count: Represents the number of links from immutable paths.
* [Note: The ref count is one less than such links plus one if
* the object hasn't gone through matching with hazard pointers
* without finding a match. That is, a new object without inbound
* links has a ref count of 0 and an about-to-be-reclaimed object
* can be viewed to have a ref count of -1.]
*
* User code can increment the link and ref counts by calling
* acquire_link and acquire_ref or their variants that require the
* user to guarantee thread safety. There are no public functions to
* decrement the counts explicitly. Counts are decremented implicitly
* as described in hazptr_obj_base_linked.
*/
template <template <typename> class Atom>
class hazptr_obj_linked : public hazptr_obj<Atom> {
using Count = uint32_t;
static constexpr Count kRef = 1u;
static constexpr Count kLink = 1u << 16;
static constexpr Count kRefMask = kLink - 1u;
static constexpr Count kLinkMask = ~kRefMask;
Atom<Count> count_{0};
public:
void acquire_link() noexcept {
count_inc(kLink);
}
void acquire_link_safe() noexcept {
count_inc_safe(kLink);
}
void acquire_ref() noexcept {
count_inc(kRef);
}
void acquire_ref_safe() noexcept {
count_inc_safe(kRef);
}
private:
template <typename, template <typename> class, typename>
friend class hazptr_obj_base_linked;
void count_inc(Count add) {
auto oldval = count_.fetch_add(add, std::memory_order_acq_rel);
DCHECK_LT(oldval & kLinkMask, kLinkMask);
DCHECK_LT(oldval & kRefMask, kRefMask);
}
void count_inc_safe(Count add) {
auto oldval = count_.load(std::memory_order_relaxed);
count_.store(oldval + add, std::memory_order_release);
DCHECK_LT(oldval & kLinkMask, kLinkMask);
DCHECK_LT(oldval & kRefMask, kRefMask);
}
bool count_cas(Count& oldval, Count newval) {
return count_.compare_exchange_weak(
oldval, newval, std::memory_order_acq_rel, std::memory_order_acquire);
}
bool release_link() noexcept {
auto sub = kLink;
auto oldval = count_.load(std::memory_order_acquire);
while (true) {
DCHECK_GT(oldval & kLinkMask, 0u);
if (oldval == kLink) {
count_.store(0u, std::memory_order_release);
return true;
}
if (count_cas(oldval, oldval - sub)) {
return false;
}
}
}
bool release_ref() noexcept {
auto sub = kRef;
auto oldval = count_.load(std::memory_order_acquire);
while (true) {
if (oldval == 0u) {
if (kIsDebug) {
count_.store(kRefMask);
}
return true;
}
DCHECK_GT(oldval & kRefMask, 0u);
if (count_cas(oldval, oldval - sub)) {
return false;
}
}
}
bool downgrade_link() noexcept {
auto oldval = count_.load(std::memory_order_acquire);
auto sub = kLink - kRef;
while (true) {
if (oldval == kLink) {
count_.store(kRef, std::memory_order_release);
return true;
}
if (count_cas(oldval, oldval - sub)) {
return (oldval & kLinkMask) == kLink;
}
}
}
}; // hazptr_obj_linked
/**
* hazptr_obj_base_linked
*
* Base class template for link counted objects.
*
* Supports both *explicit* and *implicit* object retirement, depending
* on whether object removal is *certain* or *uncertain*.
*
* A derived object's removal is certain when it is always possible
* to reason based only on the local state of user code when an
* object is removed, i.e., becomes unreachable from static
* roots. Otherwise, removal is uncertain.
*
* For example, Removal in UnboundedQueue is certain, whereas removal
* is ConcurrentHashMap is uncertain.
*
* If removal is certain, user code can call retire() explicitly.
* Otherwise, user code should call unlink() whenever an an inbound
* link to the object is changed. Calls to unlink() automatically
* retire the object when the link count is decremented to 0. [Note:
* A ref count greater than 0 does not delay retiring an object.]
*
* Derived type T must define a member function template
* template <typename S>
* void push_links(bool m, S& s) {
* if (m) { // m stands mutable links
* // for each outbound mutable pointer p call
* // s.push(p);
* } else {
* // for each outbound immutable pointer p call
* // s.push(p);
* }
* }
*
* T may have both, either, or none of the tow types of outbound
* links. For example, UnboundedQueue Segment has an immutable
* link, and ConcurrentHashMap NodeT has a mutable link.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base_linked : public hazptr_obj_linked<Atom>,
public hazptr_deleter<T, D> {
using Stack = std::stack<hazptr_obj_base_linked<T, Atom, D>*>;
public:
void retire() {
this->pre_retire_check(); // defined in hazptr_obj
set_reclaim();
this->push_to_retired(
default_hazptr_domain<Atom>()); // defined in hazptr_obj
}
/* unlink: Retire object if last link is released. */
void unlink() {
if (this->release_link()) { // defined in hazptr_obj_linked
downgrade_retire_immutable_descendants();
retire();
}
}
private:
void set_reclaim() noexcept {
this->reclaim_ = [](hazptr_obj<Atom>* p, hazptr_obj_list<Atom>& l) {
auto obj = static_cast<hazptr_obj_base_linked<T, Atom, D>*>(p);
if (obj->release_ref()) { // defined in hazptr_obj_linked
obj->release_delete_immutable_descendants();
obj->release_retire_mutable_children(l);
obj->delete_self();
}
};
}
void downgrade_retire_immutable_descendants() {
Stack s;
call_push_links(false, s);
while (!s.empty()) {
auto p = s.top();
s.pop();
if (p && p->downgrade_link()) {
p->call_push_links(false, s);
p->retire();
}
}
}
void release_delete_immutable_descendants() {
Stack s;
call_push_links(false, s);
while (!s.empty()) {
auto p = s.top();
s.pop();
if (p && p->release_ref()) {
p->call_push_links(false, s);
p->delete_self();
}
}
}
void release_retire_mutable_children(hazptr_obj_list<Atom>& l) {
Stack s;
call_push_links(true, s);
while (!s.empty()) {
auto p = s.top();
s.pop();
if (p->release_link()) {
p->pre_retire_check(); // defined in hazptr_obj
p->set_reclaim();
l.push(p); // treated as if retired immediately
}
}
}
void call_push_links(bool m, Stack& s) {
static_cast<T*>(this)->push_links(m, s); // to be defined in T
}
void delete_self() {
this->delete_obj(static_cast<T*>(this)); // defined in hazptr_deleter
}
}; // hazptr_obj_base_linked
} // namespace folly
......@@ -207,7 +207,8 @@ class hazptr_priv {
collect(h, t);
if (h) {
DCHECK(t);
hazptr_domain_push_retired<Atom>(h, t, rcount_);
hazptr_obj_list<Atom> l(h, t, rcount_);
hazptr_domain_push_retired<Atom>(l);
rcount_ = 0;
}
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment