Commit b0e23aef authored by Maged Michael's avatar Maged Michael Committed by Facebook GitHub Bot

hazptr_domain: Add tagged list sharding

Summary:
Changes:
- Make the tagged list sharded.
- Manage a common count for both tagged and untagged cohort objects.
- Combine timed asynchronous reclamation for both tagged and untagged cohort objects.
- Integrate checking the threshold for asynchronous reclamation with managing timed asynchronous reclamation.
- Combine the asynchronous reclamation of both tagged and untagged objects.

Reviewed By: davidtgoldblatt

Differential Revision: D24136818

fbshipit-source-id: 8df1b8eeef1df7f14b5d68ed5ad82d0459a381f5
parent 61788481
...@@ -106,8 +106,9 @@ constexpr int hazptr_domain_rcount_threshold() { ...@@ -106,8 +106,9 @@ constexpr int hazptr_domain_rcount_threshold() {
template <template <typename> class Atom> template <template <typename> class Atom>
class hazptr_domain { class hazptr_domain {
using Obj = hazptr_obj<Atom>; using Obj = hazptr_obj<Atom>;
using List = hazptr_detail::linked_list<Obj>;
using ObjList = hazptr_obj_list<Atom>; using ObjList = hazptr_obj_list<Atom>;
using RetiredList = hazptr_obj_retired_list<Atom>; using RetiredList = hazptr_detail::shared_head_only_list<Obj, Atom>;
using Set = std::unordered_set<const void*>; using Set = std::unordered_set<const void*>;
using ExecFn = folly::Executor* (*)(); using ExecFn = folly::Executor* (*)();
...@@ -117,6 +118,12 @@ class hazptr_domain { ...@@ -117,6 +118,12 @@ class hazptr_domain {
static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds
static constexpr uintptr_t kTagBit = hazptr_obj<Atom>::kTagBit; static constexpr uintptr_t kTagBit = hazptr_obj<Atom>::kTagBit;
static constexpr int kNumShards = 8;
static constexpr int kShardMask = kNumShards - 1;
static_assert(
(kNumShards & kShardMask) == 0,
"kNumShards must be a power of 2");
static folly::Executor* get_default_executor() { static folly::Executor* get_default_executor() {
return &folly::QueuedImmediateExecutor::instance(); return &folly::QueuedImmediateExecutor::instance();
} }
...@@ -132,11 +139,11 @@ class hazptr_domain { ...@@ -132,11 +139,11 @@ class hazptr_domain {
Atom<uint16_t> num_bulk_reclaims_{0}; Atom<uint16_t> num_bulk_reclaims_{0};
bool shutdown_{false}; bool shutdown_{false};
RetiredList untagged_; RetiredList untagged_;
RetiredList tagged_; RetiredList tagged_[kNumShards];
Atom<int> count_{0};
Obj* unprotected_; // List of unprotected objects being reclaimed Obj* unprotected_; // List of unprotected objects being reclaimed
ObjList children_; // Children of unprotected objects being reclaimed ObjList children_; // Children of unprotected objects being reclaimed
Atom<uint64_t> tagged_sync_time_{0}; Atom<uint64_t> due_time_{0};
Atom<uint64_t> untagged_sync_time_{0};
Atom<ExecFn> exec_fn_{nullptr}; Atom<ExecFn> exec_fn_{nullptr};
Atom<int> exec_backlog_{0}; Atom<int> exec_backlog_{0};
...@@ -149,7 +156,7 @@ class hazptr_domain { ...@@ -149,7 +156,7 @@ class hazptr_domain {
shutdown_ = true; shutdown_ = true;
reclaim_all_objects(); reclaim_all_objects();
free_hazptr_recs(); free_hazptr_recs();
DCHECK(tagged_.empty()); DCHECK(tagged_empty());
} }
hazptr_domain(const hazptr_domain&) = delete; hazptr_domain(const hazptr_domain&) = delete;
...@@ -191,29 +198,18 @@ class hazptr_domain { ...@@ -191,29 +198,18 @@ class hazptr_domain {
/** cleanup_cohort_tag */ /** cleanup_cohort_tag */
void cleanup_cohort_tag(const hazptr_obj_cohort<Atom>* cohort) noexcept { void cleanup_cohort_tag(const hazptr_obj_cohort<Atom>* cohort) noexcept {
auto tag = reinterpret_cast<uintptr_t>(cohort) + kTagBit; auto tag = reinterpret_cast<uintptr_t>(cohort) + kTagBit;
auto obj = tagged_.pop_all(RetiredList::kAlsoLock); auto shard = calc_shard(tag);
auto obj = tagged_[shard].pop_all(RetiredList::kAlsoLock);
ObjList match, nomatch; ObjList match, nomatch;
list_match_tag(tag, obj, match, nomatch); list_match_tag(tag, obj, match, nomatch);
if (unprotected_) { // There must be ongoing do_reclamation List l(nomatch.head(), nomatch.tail());
ObjList match2, nomatch2; tagged_[shard].push_unlock(l);
list_match_tag(tag, unprotected_, match2, nomatch2); add_count(-match.count());
match.splice(match2);
unprotected_ = nomatch2.head();
}
if (children_.head()) {
ObjList match2, nomatch2;
list_match_tag(tag, children_.head(), match2, nomatch2);
match.splice(match2);
children_ = std::move(nomatch2);
}
auto count = nomatch.count();
nomatch.set_count(0);
tagged_.push_unlock(nomatch);
obj = match.head(); obj = match.head();
reclaim_list_transitive(obj); reclaim_list_transitive(obj);
if (count >= threshold()) { int count = match.count() + nomatch.count();
check_threshold_and_reclaim( if (count > kListTooLarge) {
tagged_, RetiredList::kAlsoLock, tagged_sync_time_); hazptr_warning_list_too_large(tag, shard, count);
} }
} }
...@@ -242,6 +238,39 @@ class hazptr_domain { ...@@ -242,6 +238,39 @@ class hazptr_domain {
friend class hazptr_tc<Atom>; friend class hazptr_tc<Atom>;
#endif #endif
int load_count() {
return count_.load(std::memory_order_acquire);
}
void add_count(int val) {
count_.fetch_add(val, std::memory_order_release);
}
int exchange_count(int val) {
return count_.exchange(val, std::memory_order_acq_rel);
}
bool cas_count(int& expected, int newval) {
return count_.compare_exchange_weak(
expected, newval, std::memory_order_acq_rel, std::memory_order_relaxed);
}
uint64_t load_due_time() {
return due_time_.load(std::memory_order_acquire);
}
void set_due_time() {
uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
due_time_.store(time + kSyncTimePeriod, std::memory_order_release);
}
bool cas_due_time(uint64_t& expected, uint64_t newval) {
return due_time_.compare_exchange_strong(
expected, newval, std::memory_order_acq_rel, std::memory_order_relaxed);
}
/** hprec_acquire */ /** hprec_acquire */
hazptr_rec<Atom>* hprec_acquire() { hazptr_rec<Atom>* hprec_acquire() {
auto rec = try_acquire_existing_hprec(); auto rec = try_acquire_existing_hprec();
...@@ -280,18 +309,15 @@ class hazptr_domain { ...@@ -280,18 +309,15 @@ class hazptr_domain {
} }
uintptr_t btag = l.head()->cohort_tag(); uintptr_t btag = l.head()->cohort_tag();
bool tagged = ((btag & kTagBit) == kTagBit); bool tagged = ((btag & kTagBit) == kTagBit);
RetiredList& rlist = tagged ? tagged_ : untagged_;
Atom<uint64_t>& sync_time =
tagged ? tagged_sync_time_ : untagged_sync_time_;
/*** Full fence ***/ asymmetricLightBarrier(); /*** Full fence ***/ asymmetricLightBarrier();
/* Only tagged lists need to be locked because tagging is used to List ll(l.head(), l.tail());
* guarantee the identification of all objects with a specific if (!tagged) {
* tag. Locking protects against concurrent hazptr_cleanup_tag() untagged_.push(ll, RetiredList::kMayNotBeLocked);
* calls missing tagged objects. */ } else {
bool lock = tagged_[calc_shard(btag)].push(ll, RetiredList::kMayBeLocked);
tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked; }
rlist.push(l, lock); add_count(l.count());
check_threshold_and_reclaim(rlist, lock, sync_time); check_threshold_and_reclaim();
} }
/** threshold */ /** threshold */
...@@ -301,61 +327,109 @@ class hazptr_domain { ...@@ -301,61 +327,109 @@ class hazptr_domain {
} }
/** check_threshold_and_reclaim */ /** check_threshold_and_reclaim */
void check_threshold_and_reclaim( void check_threshold_and_reclaim() {
RetiredList& rlist, int rcount = check_count_threshold();
bool lock, if (rcount == 0) {
Atom<uint64_t>& sync_time) { rcount = check_due_time();
int rcount = rlist.count(); if (rcount == 0)
if (rcount > kListTooLarge) { return;
hazptr_warning_list_too_large(rlist, lock, rcount); }
}
if (!(lock && rlist.check_lock()) &&
(rlist.check_threshold_try_zero_count(threshold()) ||
check_sync_time(sync_time))) {
if (std::is_same<Atom<int>, std::atomic<int>>{} && if (std::is_same<Atom<int>, std::atomic<int>>{} &&
this == &default_hazptr_domain<Atom>() && this == &default_hazptr_domain<Atom>() &&
FLAGS_folly_hazptr_use_executor) { FLAGS_folly_hazptr_use_executor) {
invoke_reclamation_in_executor(rlist, lock); invoke_reclamation_in_executor(rcount);
} else { } else {
do_reclamation(rlist, lock); do_reclamation(rcount);
} }
} }
/** calc_shard */
size_t calc_shard(uintptr_t tag) {
size_t shard = std::hash<uintptr_t>{}(tag)&kShardMask;
DCHECK(shard < kNumShards);
return shard;
} }
/** check_sync_time_and_reclaim **/ /** check_due_time */
void check_sync_time_and_reclaim() { int check_due_time() {
if (!tagged_.check_lock() && check_sync_time()) { uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
do_reclamation(tagged_, RetiredList::kAlsoLock); std::chrono::steady_clock::now().time_since_epoch())
do_reclamation(untagged_, RetiredList::kDontLock); .count();
auto due = load_due_time();
if (time < due || !cas_due_time(due, time + kSyncTimePeriod))
return 0;
return exchange_count(0);
}
/** check_count_threshold */
int check_count_threshold() {
int rcount = load_count();
while (rcount >= threshold()) {
if (cas_count(rcount, 0)) {
set_due_time();
return rcount;
} }
} }
return 0;
}
/** do_reclamation */ /** tagged_empty */
void do_reclamation(RetiredList& rlist, bool lock) { bool tagged_empty() {
auto obj = rlist.pop_all(lock == RetiredList::kAlsoLock); for (int s = 0; s < kNumShards; ++s) {
if (!obj) { if (!tagged_[s].empty())
if (lock) { return false;
ObjList l;
rlist.push_unlock(l);
} }
return; return true;
} }
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
auto hprec = hazptrs_.load(std::memory_order_acquire); /** extract_retired_objects */
/* Read hazard pointer values into private search structure */ bool extract_retired_objects(Obj*& untagged, Obj* tagged[]) {
bool empty = true;
untagged = untagged_.pop_all(RetiredList::kDontLock);
if (untagged) {
empty = false;
}
for (int s = 0; s < kNumShards; ++s) {
/* Tagged lists need to be locked because tagging is used to
* guarantee the identification of all objects with a specific
* tag. Locking protects against concurrent hazptr_cleanup_tag()
* calls missing tagged objects. */
if (tagged_[s].check_lock()) {
tagged[s] = nullptr;
} else {
tagged[s] = tagged_[s].pop_all(RetiredList::kAlsoLock);
if (tagged[s]) {
empty = false;
} else {
List l;
tagged_[s].push_unlock(l);
}
}
}
return !empty;
}
/** load_hazptr_vals */
Set load_hazptr_vals() {
Set hs; Set hs;
auto hprec = hazptrs_.load(std::memory_order_acquire);
for (; hprec; hprec = hprec->next()) { for (; hprec; hprec = hprec->next()) {
hs.insert(hprec->hazptr()); hs.insert(hprec->hazptr());
} }
/* Check objects against hazard pointer values */ return hs;
}
/** match_tagged */
int match_tagged(Obj* tagged[], Set& hs) {
int count = 0;
for (int s = 0; s < kNumShards; ++s) {
if (tagged[s]) {
ObjList match, nomatch; ObjList match, nomatch;
list_match_condition(obj, match, nomatch, [&](Obj* o) { list_match_condition(tagged[s], match, nomatch, [&](Obj* o) {
return hs.count(o->raw_ptr()) > 0; return hs.count(o->raw_ptr()) > 0;
}); });
if (lock) { count += nomatch.count();
/* Push unprotected objects into their cohorts and push protected auto obj = nomatch.head();
objects back into the list and unlock it */
obj = nomatch.head();
while (obj) { while (obj) {
auto next = obj->next(); auto next = obj->next();
auto cohort = obj->cohort(); auto cohort = obj->cohort();
...@@ -363,14 +437,46 @@ class hazptr_domain { ...@@ -363,14 +437,46 @@ class hazptr_domain {
cohort->push_safe_obj(obj); cohort->push_safe_obj(obj);
obj = next; obj = next;
} }
rlist.push_unlock(match); List l(match.head(), match.tail());
} else { tagged_[s].push_unlock(l);
/* Reclaim unprotected objects and push protected objects and }
children of reclaimed objects into the list */ }
return count;
}
/** match_reclaim_untagged */
int match_reclaim_untagged(Obj* untagged, Set& hs) {
ObjList match, nomatch;
list_match_condition(untagged, match, nomatch, [&](Obj* o) {
return hs.count(o->raw_ptr()) > 0;
});
ObjList children; ObjList children;
int count = nomatch.count();
reclaim_unprotected_unsafe(nomatch.head(), children); reclaim_unprotected_unsafe(nomatch.head(), children);
count -= children.count();
match.splice(children); match.splice(children);
rlist.push(match, false); List l(match.head(), match.tail());
untagged_.push(l, RetiredList::kMayNotBeLocked);
return count;
}
/** do_reclamation */
void do_reclamation(int rcount) {
while (true) {
Obj* untagged;
Obj* tagged[kNumShards];
if (extract_retired_objects(untagged, tagged)) {
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
Set hs = load_hazptr_vals();
rcount -= match_tagged(tagged, hs);
rcount -= match_reclaim_untagged(untagged, hs);
}
if (rcount) {
add_count(rcount);
}
rcount = check_count_threshold();
if (rcount == 0)
return;
} }
} }
...@@ -630,7 +736,7 @@ class hazptr_domain { ...@@ -630,7 +736,7 @@ class hazptr_domain {
return rec; return rec;
} }
void invoke_reclamation_in_executor(RetiredList& rlist, bool lock) { void invoke_reclamation_in_executor(int rcount) {
auto fn = exec_fn_.load(std::memory_order_acquire); auto fn = exec_fn_.load(std::memory_order_acquire);
auto ex = fn ? fn() : get_default_executor(); auto ex = fn ? fn() : get_default_executor();
if (ex == get_default_executor()) { if (ex == get_default_executor()) {
...@@ -638,9 +744,9 @@ class hazptr_domain { ...@@ -638,9 +744,9 @@ class hazptr_domain {
} }
auto backlog = exec_backlog_.fetch_add(1, std::memory_order_relaxed); auto backlog = exec_backlog_.fetch_add(1, std::memory_order_relaxed);
if (ex) { if (ex) {
ex->add([this, &rlist, lock] { ex->add([this, rcount] {
exec_backlog_.store(0, std::memory_order_relaxed); exec_backlog_.store(0, std::memory_order_relaxed);
do_reclamation(rlist, lock); do_reclamation(rcount);
}); });
} else { } else {
LOG(INFO) << "Skip asynchronous reclamation by hazptr executor"; LOG(INFO) << "Skip asynchronous reclamation by hazptr executor";
...@@ -651,12 +757,12 @@ class hazptr_domain { ...@@ -651,12 +757,12 @@ class hazptr_domain {
} }
FOLLY_EXPORT FOLLY_NOINLINE void FOLLY_EXPORT FOLLY_NOINLINE void
hazptr_warning_list_too_large(RetiredList& rlist, bool lock, int rcount) { hazptr_warning_list_too_large(uintptr_t tag, int shard, int count) {
static std::atomic<uint64_t> warning_count{0}; static std::atomic<uint64_t> warning_count{0};
if ((warning_count++ % 10000) == 0) { if ((warning_count++ % 10000) == 0) {
LOG(WARNING) << "Hazptr retired list too large:" LOG(WARNING) << "Hazptr retired list too large:"
<< " rlist=" << &rlist << " lock=" << lock << " tag=" << tag << " shard=" << shard
<< " rcount=" << rcount; << " count=" << count;
} }
} }
...@@ -684,7 +790,7 @@ class hazptr_domain { ...@@ -684,7 +790,7 @@ class hazptr_domain {
"a call to folly::init or an alternative."; "a call to folly::init or an alternative.";
} }
} }
}; // namespace folly }; // hazptr_domain
/** /**
* Free functions related to hazptr domains * Free functions related to hazptr domains
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment