Commit b2df58ec authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

hazptr: Batches and tags for managing destruction order.

Summary:
Add batches and tags for objects protectable by hazard pointers.
Add fine-grained cleanup of tagged objects (lower overhead than calling hazptr_cleanup).

Design sketch:
- The domain structure contains an untagged list and one or more tagged lists (more than one to reduce contention).
- These domain lists support lock-free push and lock-free unlocked pop all. Tagged lists are locked by pop all operations and unlocked by push_unlock operations. Push operations are  lock-free even when a list is locked for pop all operations.
- Batches are lists of retired objects.
- Each object contains the address of a batch (or null). The lowest bit indicates if the batch address is used also as a tag.
- hazptr_cleanup_batch_tag(tag) reclaims all objects with that tag without checking hazard pointers.
- All objects in a domain tagged list contain tagged objects that hash to the index of the list.
- Untagged object are those that are safe to reclaim at shutdown time, i.e., their deleter only reclaims memory without other dependencies.
- For example, retired UnboundedQueue segments do not depend on user-defined destructors and their deleters merely reclaim memory using the default deleter, and therefore can be untagged.
- On the other hand, the deleters of ConcurrentHashMap buckets and nodes depend on user-defined code and therefore their destruction must be managed explicitly. Therefore they are tagged.
- Batches can be used even without tagging. For example retired UnboundedQueue segments are collected in batches because they have link-counting interdependencies and batches help keep related segments together.

Reviewed By: davidtgoldblatt

Differential Revision: D10147101

fbshipit-source-id: 8820cf46ad8942c7362d91543bfbd9fe07e27b7a
parent 77f06ada
......@@ -54,6 +54,14 @@ class hazptr_obj;
template <template <typename> class Atom = std::atomic>
class hazptr_obj_list;
/** hazptr_obj_batch */
template <template <typename> class Atom = std::atomic>
class hazptr_obj_batch;
/** hazptr_obj_retired_list */
template <template <typename> class Atom = std::atomic>
class hazptr_obj_retired_list;
/** hazptr_deleter */
template <typename T, typename D>
class hazptr_deleter;
......@@ -123,6 +131,12 @@ class hazptr_domain;
template <template <typename> class Atom = std::atomic>
hazptr_domain<Atom>& default_hazptr_domain();
/** hazptr_domain_push_list */
template <template <typename> class Atom = std::atomic>
void hazptr_domain_push_list(
hazptr_obj_list<Atom>& l,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** hazptr_domain_push_retired */
template <template <typename> class Atom = std::atomic>
void hazptr_domain_push_retired(
......@@ -142,6 +156,12 @@ template <template <typename> class Atom = std::atomic>
void hazptr_cleanup(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** hazptr_cleanup_batch_tag */
template <template <typename> class Atom = std::atomic>
void hazptr_cleanup_batch_tag(
const hazptr_obj_batch<Atom>* batch,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** Global default domain defined in Hazptr.cpp */
extern hazptr_domain<std::atomic> default_domain;
......
......@@ -25,6 +25,7 @@
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <atomic>
#include <functional>
#include <unordered_set> // for hash set in bulk_reclaim
///
......@@ -50,9 +51,20 @@ constexpr int hazptr_domain_rcount_threshold() {
*/
template <template <typename> class Atom>
class hazptr_domain {
using Obj = hazptr_obj<Atom>;
using ObjList = hazptr_obj_list<Atom>;
using RetiredList = hazptr_obj_retired_list<Atom>;
using Set = std::unordered_set<const void*>;
static constexpr int kThreshold = detail::hazptr_domain_rcount_threshold();
static constexpr int kMultiplier = 2;
static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds
static constexpr uint8_t kLogNumTaggedLists = 6;
static constexpr uint16_t kNumTaggedLists = 1 << kLogNumTaggedLists;
static constexpr uint16_t kTaggedListIDMask = kNumTaggedLists - 1;
static constexpr uintptr_t kTagBit = hazptr_obj<Atom>::kTagBit;
static_assert(kNumTaggedLists <= 1024, "Too many tagged lists.");
Atom<hazptr_rec<Atom>*> hazptrs_{nullptr};
Atom<hazptr_obj<Atom>*> retired_{nullptr};
......@@ -65,6 +77,9 @@ class hazptr_domain {
Atom<uint16_t> num_bulk_reclaims_{0};
bool shutdown_{false};
RetiredList untagged_;
RetiredList tagged_[kNumTaggedLists];
public:
/** Constructor */
hazptr_domain() = default;
......@@ -74,6 +89,9 @@ class hazptr_domain {
shutdown_ = true;
reclaim_all_objects();
free_hazptr_recs();
for (uint16_t i = 0; i < kNumTaggedLists; ++i) {
DCHECK(tagged_[i].empty());
}
}
hazptr_domain(const hazptr_domain&) = delete;
......@@ -81,7 +99,6 @@ class hazptr_domain {
hazptr_domain& operator=(const hazptr_domain&) = delete;
hazptr_domain& operator=(hazptr_domain&&) = delete;
public:
/** retire - nonintrusive - allocates memory */
template <typename T, typename D = std::default_delete<T>>
void retire(T* obj, D reclaim = {}) {
......@@ -105,13 +122,30 @@ class hazptr_domain {
wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
}
/** cleanup_batch_tag */
void cleanup_batch_tag(const hazptr_obj_batch<Atom>* batch) noexcept {
auto tag = reinterpret_cast<uintptr_t>(batch) + kTagBit;
RetiredList& rlist = tagged_[hash_tag(tag)];
ObjList match, nomatch;
auto obj = rlist.pop_all(RetiredList::kAlsoLock);
list_match_condition(
obj, match, nomatch, [tag](Obj* o) { return o->batch_tag() == tag; });
rlist.push_unlock(nomatch);
obj = match.head();
reclaim_list_transitive(obj);
}
private:
friend void hazptr_domain_push_list<Atom>(
hazptr_obj_list<Atom>&,
hazptr_domain<Atom>&) noexcept;
friend void hazptr_domain_push_retired<Atom>(
hazptr_obj_list<Atom>&,
bool check,
hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>;
friend class hazptr_obj<Atom>;
friend class hazptr_obj_batch<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL
friend class hazptr_tc<Atom>;
#endif
......@@ -147,6 +181,113 @@ class hazptr_domain {
}
}
/** push_list */
void push_list(ObjList& l) {
if (l.empty()) {
return;
}
uintptr_t btag = l.head()->batch_tag();
bool tagged = ((btag & kTagBit) == kTagBit);
RetiredList& rlist = tagged ? tagged_[hash_tag(btag)] : untagged_;
/*** Full fence ***/ asymmetricLightBarrier();
/* Only tagged lists need to be locked because tagging is used to
* guarantee the identification of all objects with a specific
* tag. Locking pcrotects against concurrent hazptr_cleanup_tag()
* calls missing tagged objects. */
bool lock =
tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked;
rlist.push(l, lock);
check_threshold_and_reclaim(rlist, lock);
}
uint16_t hash_tag(uintptr_t tag) {
size_t h = std::hash<uintptr_t>{}(tag);
return h & kTaggedListIDMask;
}
/** threshold */
int threshold() {
auto thresh = kThreshold;
return std::max(thresh, kMultiplier * hcount());
}
/** check_threshold_and_reclaim */
void check_threshold_and_reclaim(RetiredList& rlist, bool lock) {
if (!(lock && rlist.check_lock()) &&
rlist.check_threshold_try_zero_count(threshold())) {
do_reclamation(rlist, lock);
}
}
/** do_reclamation */
void do_reclamation(RetiredList& rlist, bool lock) {
auto obj = rlist.pop_all(lock == RetiredList::kAlsoLock);
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
auto hprec = hazptrs_.load(std::memory_order_acquire);
/* Read hazard pointer values into private search structure */
Set hs;
for (; hprec; hprec = hprec->next()) {
hs.insert(hprec->hazptr());
}
/* Check objets against hazard pointer values */
ObjList match, nomatch;
list_match_condition(obj, match, nomatch, [&](Obj* o) {
return hs.count(o->raw_ptr()) > 0;
});
/* Reclaim unmatched objects */
hazptr_obj_list<Atom> children;
reclaim_list(nomatch.head(), children);
match.splice(children);
/* Push back matched and children of unmatched objects */
if (lock) {
rlist.push_unlock(match);
} else {
rlist.push(match, false);
}
}
/** lookup_and_reclaim */
void lookup_and_reclaim(Obj* obj, const Set& hs, ObjList& keep) {
while (obj) {
auto next = obj->next();
DCHECK_NE(obj, next);
if (hs.count(obj->raw_ptr()) == 0) {
(*(obj->reclaim()))(obj, keep);
} else {
keep.push(obj);
}
obj = next;
}
}
/** list_match_condition */
template <typename Cond>
void list_match_condition(
Obj* obj,
ObjList& match,
ObjList& nomatch,
const Cond& cond) {
while (obj) {
auto next = obj->next();
DCHECK_NE(obj, next);
if (cond(obj)) {
match.push(obj);
} else {
nomatch.push(obj);
}
obj = next;
}
}
/** reclaim_list */
void reclaim_list(Obj* head, ObjList& children) {
while (head) {
auto next = head->next();
(*(head->reclaim()))(head, children);
head = next;
}
}
hazptr_rec<Atom>* head() const noexcept {
return hazptrs_.load(std::memory_order_acquire);
}
......@@ -168,20 +309,17 @@ class hazptr_domain {
}
void reclaim_all_objects() {
auto retired = retired_.exchange(nullptr);
while (retired) {
auto obj = retired;
hazptr_obj_list<Atom> l;
while (obj) {
auto next = obj->next();
DCHECK(obj != next);
(*(obj->reclaim()))(obj, l);
obj = next;
}
if (l.count()) {
push_retired(l);
auto head = retired_.exchange(nullptr);
reclaim_list_transitive(head);
head = untagged_.pop_all(RetiredList::kDontLock);
reclaim_list_transitive(head);
}
retired = retired_.exchange(nullptr);
void reclaim_list_transitive(Obj* head) {
while (head) {
ObjList children;
reclaim_list(head, children);
head = children.head();
}
}
......@@ -373,6 +511,14 @@ void hazptr_domain_push_retired(
domain.push_retired(l, check);
}
/** hazptr_domain_push_list */
template <template <typename> class Atom>
void hazptr_domain_push_list(
hazptr_obj_list<Atom>& l,
hazptr_domain<Atom>& domain) noexcept {
domain.push_list(l);
}
/** hazptr_retire */
template <template <typename> class Atom, typename T, typename D>
FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
......@@ -385,4 +531,12 @@ void hazptr_cleanup(hazptr_domain<Atom>& domain) noexcept {
domain.cleanup();
}
/** hazptr_cleanup_tag: Reclaims objects asssociated with a tag */
template <template <typename> class Atom>
void hazptr_cleanup_batch_tag(
const hazptr_obj_batch<Atom>* batch,
hazptr_domain<Atom>& domain) noexcept {
domain.cleanup_batch_tag(batch);
}
} // namespace folly
This diff is collapsed.
......@@ -239,8 +239,15 @@ class hazptr_obj_base_linked : public hazptr_obj_linked<Atom>,
void retire() {
this->pre_retire_check(); // defined in hazptr_obj
set_reclaim();
this->push_to_retired(
default_hazptr_domain<Atom>()); // defined in hazptr_obj
auto& domain = default_hazptr_domain<Atom>();
auto btag = this->batch_tag();
if (btag == 0u) {
this->push_to_retired(domain); // defined in hazptr_obj
} else {
btag -= btag & 1u;
auto batch = reinterpret_cast<hazptr_obj_batch<Atom>*>(btag);
batch->push_obj(this, domain);
}
}
/* unlink: Retire object if last link is released. */
......
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Portability.h>
#include <folly/synchronization/detail/Sleeper.h>
#include <glog/logging.h>
#include <atomic>
/// Linked list class templates used in the hazard pointer library:
/// - linked_list: Sequential linked list that uses a pre-existing
/// members next() and set_next();.
/// - shared_head_tail_list: Thread-safe linked list that maintains
/// head and tail pointers. Supports push and pop_all.
/// - shared_head_only_list: Thread-safe linked lockable list that
/// maintains only a head pointer. Supports push and pop_all.
namespace folly {
namespace hazptr_detail {
/**
* linked_list
*
* Template parameter Node must support set_next
*
*/
template <typename Node>
class linked_list {
Node* head_;
Node* tail_;
public:
linked_list() noexcept : head_(nullptr), tail_(nullptr) {}
explicit linked_list(Node* head, Node* tail) noexcept
: head_(head), tail_(tail) {}
Node* head() const noexcept {
return head_;
}
Node* tail() const noexcept {
return tail_;
}
bool empty() const noexcept {
return head() == nullptr;
}
void push(Node* node) noexcept {
node->set_next(nullptr);
if (tail_) {
tail_->set_next(node);
} else {
head_ = node;
}
tail_ = node;
}
void splice(linked_list& l) {
if (head() == nullptr) {
head_ = l.head();
} else {
tail_->set_next(l.head());
}
tail_ = l.tail();
l.clear();
}
void clear() {
head_ = nullptr;
tail_ = nullptr;
}
}; // linked_list
/**
* shared_head_tail_list
*
* Maintains head and tail pointers. Supports push and pop all
* operations. Pop all operation is wait-free.
*/
template <typename Node, template <typename> class Atom = std::atomic>
class shared_head_tail_list {
Atom<Node*> head_;
Atom<Node*> tail_;
public:
shared_head_tail_list() noexcept : head_(nullptr), tail_(nullptr) {}
shared_head_tail_list(shared_head_tail_list&& o) noexcept {
head_.store(o.head(), std::memory_order_relaxed);
tail_.store(o.tail(), std::memory_order_relaxed);
o.head_.store(nullptr, std::memory_order_relaxed);
o.tail_.store(nullptr, std::memory_order_relaxed);
}
shared_head_tail_list& operator=(shared_head_tail_list&& o) noexcept {
head_.store(o.head(), std::memory_order_relaxed);
tail_.store(o.tail(), std::memory_order_relaxed);
o.head_.store(nullptr, std::memory_order_relaxed);
o.tail_.store(nullptr, std::memory_order_relaxed);
return *this;
}
~shared_head_tail_list() {
DCHECK(head() == nullptr);
DCHECK(tail() == nullptr);
}
void push(Node* node) noexcept {
bool done = false;
while (!done) {
if (tail()) {
done = push_in_non_empty_list(node);
} else {
done = push_in_empty_list(node);
}
}
}
linked_list<Node> pop_all() noexcept {
auto h = exchange_head();
auto t = (h != nullptr) ? exchange_tail() : nullptr;
return linked_list<Node>(h, t);
}
bool empty() const noexcept {
return head() == nullptr;
}
private:
Node* head() const noexcept {
return head_.load(std::memory_order_acquire);
}
Node* tail() const noexcept {
return tail_.load(std::memory_order_acquire);
}
void set_head(Node* node) noexcept {
head_.store(node, std::memory_order_release);
}
bool cas_head(Node* expected, Node* node) noexcept {
return head_.compare_exchange_weak(
expected, node, std::memory_order_acq_rel, std::memory_order_relaxed);
}
bool cas_tail(Node* expected, Node* node) noexcept {
return tail_.compare_exchange_weak(
expected, node, std::memory_order_acq_rel, std::memory_order_relaxed);
}
Node* exchange_head() noexcept {
return head_.exchange(nullptr, std::memory_order_acq_rel);
}
Node* exchange_tail() noexcept {
return tail_.exchange(nullptr, std::memory_order_acq_rel);
}
bool push_in_non_empty_list(Node* node) noexcept {
auto h = head();
if (h) {
node->set_next(h); // Node must support set_next
if (cas_head(h, node)) {
return true;
}
}
return false;
}
bool push_in_empty_list(Node* node) noexcept {
Node* t = nullptr;
node->set_next(nullptr); // Node must support set_next
if (cas_tail(t, node)) {
set_head(node);
return true;
}
return false;
}
}; // shared_head_tail_list
/**
* shared_head_only_list
*
* A shared singly linked list that maintains only a head pointer. It
* supports pop all and push list operations. Optionally the list may
* be locked for pop all operations. Pop all operations have locked
* and wait-free variants. Push operations are always lock-free.
*
* Not all combinations of operationsa are mutually operable. The
* following are valid combinations:
* - push(kMayBeLocked), pop_all(kAlsoLock), push_unlock
* - push(kMayNotBeLocked), pop_all(kDontLock)
*/
template <typename Node, template <typename> class Atom = std::atomic>
class shared_head_only_list {
Atom<uintptr_t> head_{0}; // lowest bit is a lock for pop all
static constexpr uintptr_t kLockBit = 1u;
static constexpr uintptr_t kUnlocked = 0u;
public:
static constexpr bool kAlsoLock = true;
static constexpr bool kDontLock = false;
static constexpr bool kMayBeLocked = true;
static constexpr bool kMayNotBeLocked = false;
public:
void push(linked_list<Node>& l, bool may_be_locked) noexcept {
if (l.empty()) {
return;
}
auto oldval = head();
while (true) {
auto newval = reinterpret_cast<uintptr_t>(l.head());
auto ptrval = oldval;
auto lockbit = oldval & kLockBit;
if (may_be_locked == kMayBeLocked) {
ptrval -= lockbit;
newval += lockbit;
} else {
DCHECK_EQ(lockbit, kUnlocked);
}
auto ptr = reinterpret_cast<Node*>(ptrval);
l.tail()->set_next(ptr); // Node must support set_next
if (cas_head(oldval, newval)) {
break;
}
}
}
Node* pop_all(bool lock) noexcept {
return lock == kAlsoLock ? pop_all_lock() : pop_all_no_lock();
}
void push_unlock(linked_list<Node>& l) noexcept {
auto oldval = head();
DCHECK_EQ(oldval & kLockBit, kLockBit); // Should be already locked
auto ptrval = oldval - kLockBit;
auto ptr = reinterpret_cast<Node*>(ptrval);
auto t = l.tail();
if (t) {
t->set_next(ptr); // Node must support set_next
}
auto newval =
(t == nullptr) ? ptrval : reinterpret_cast<uintptr_t>(l.head());
set_head(newval);
}
bool check_lock() const noexcept {
return (head() & kLockBit) == kLockBit;
}
bool empty() const noexcept {
return head() == 0u;
}
private:
uintptr_t head() const noexcept {
return head_.load(std::memory_order_acquire);
}
void set_head(uintptr_t val) noexcept {
head_.store(val, std::memory_order_release);
}
uintptr_t exchange_head() noexcept {
auto newval = reinterpret_cast<uintptr_t>(nullptr);
auto oldval = head_.exchange(newval, std::memory_order_acq_rel);
return oldval;
}
bool cas_head(uintptr_t& oldval, uintptr_t newval) noexcept {
return head_.compare_exchange_weak(
oldval, newval, std::memory_order_acq_rel, std::memory_order_acquire);
}
Node* pop_all_no_lock() noexcept {
auto oldval = exchange_head();
DCHECK_EQ(oldval & kLockBit, kUnlocked);
return reinterpret_cast<Node*>(oldval);
}
Node* pop_all_lock() noexcept {
folly::detail::Sleeper s;
auto oldval = head();
while (true) {
auto lockbit = oldval & kLockBit;
if (lockbit == kUnlocked) {
auto newval = reinterpret_cast<uintptr_t>(nullptr) + kLockBit;
if (cas_head(oldval, newval)) {
return reinterpret_cast<Node*>(oldval);
}
}
s.sleep();
}
}
}; // shared_head_only_list
} // namespace hazptr_detail
} // namespace folly
......@@ -35,11 +35,13 @@ DEFINE_int64(num_ops, 1003, "Number of ops or pairs of ops per rep");
using folly::default_hazptr_domain;
using folly::hazptr_array;
using folly::hazptr_cleanup;
using folly::hazptr_cleanup_batch_tag;
using folly::hazptr_domain;
using folly::hazptr_holder;
using folly::hazptr_local;
using folly::hazptr_obj_base;
using folly::hazptr_obj_base_linked;
using folly::hazptr_obj_batch;
using folly::hazptr_retire;
using folly::hazptr_root;
using folly::hazptr_tc;
......@@ -809,6 +811,45 @@ void priv_dtor_test() {
ASSERT_EQ(c_.dtors(), 1);
}
template <template <typename> class Atom = std::atomic>
void batch_test() {
int num = 10001;
using NodeT = Node<Atom>;
c_.clear();
{
hazptr_obj_batch<Atom> batch;
auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) {
auto p = new NodeT;
p->set_batch_no_tag(&batch);
p->retire();
}
});
DSched::join(thr);
batch.shutdown_and_reclaim();
}
ASSERT_EQ(c_.ctors(), num);
// ASSERT_GT(c_.dtors(), 0);
hazptr_cleanup<Atom>();
c_.clear();
{
hazptr_obj_batch<Atom> batch;
auto thr = DSched::thread([&]() {
for (int i = 0; i < num; ++i) {
auto p = new NodeT;
p->set_batch_tag(&batch);
p->retire();
}
});
DSched::join(thr);
batch.shutdown_and_reclaim();
hazptr_cleanup_batch_tag<Atom>(&batch);
}
ASSERT_EQ(c_.ctors(), num);
ASSERT_GT(c_.dtors(), 0);
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void lifo_test() {
for (int i = 0; i < FLAGS_num_reps; ++i) {
......@@ -1064,6 +1105,15 @@ TEST_F(HazptrPreInitTest, dsched_priv_dtor) {
priv_dtor_test<DeterministicAtomic>();
}
TEST(HazptrTest, batch) {
batch_test();
}
TEST(HazptrTest, dsched_batch) {
DSched sched(DSched::uniform(0));
batch_test<DeterministicAtomic>();
}
TEST(HazptrTest, lifo) {
lifo_test();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment