Commit aee5ab34 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

Remove obsolete version of hazard pointers under folly/experimental

Summary:
Remove obsolete version of hazard pointers under folly/experimental.
Current version is under folly/synchronization.

Reviewed By: djwatson

Differential Revision: D10441558

fbshipit-source-id: 36ef42f83a857ce4bdfff8b64cf9dcc179e0c804
parent 3bd7d93d
...@@ -104,8 +104,6 @@ REMOVE_MATCHES_FROM_LISTS(files hfiles ...@@ -104,8 +104,6 @@ REMOVE_MATCHES_FROM_LISTS(files hfiles
MATCHES MATCHES
"^${FOLLY_DIR}/build/" "^${FOLLY_DIR}/build/"
"^${FOLLY_DIR}/experimental/exception_tracer/" "^${FOLLY_DIR}/experimental/exception_tracer/"
"^${FOLLY_DIR}/experimental/hazptr/bench/"
"^${FOLLY_DIR}/experimental/hazptr/example/"
"^${FOLLY_DIR}/experimental/pushmi/" "^${FOLLY_DIR}/experimental/pushmi/"
"^${FOLLY_DIR}/futures/exercises/" "^${FOLLY_DIR}/futures/exercises/"
"^${FOLLY_DIR}/logging/example/" "^${FOLLY_DIR}/logging/example/"
......
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define HAZPTR_AMB true
#define HAZPTR_TC false
#define HAZPTR_PRIV false
#define HAZPTR_ENABLE_TLS true
#include <folly/experimental/hazptr/bench/HazptrBench.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
using namespace folly::hazptr;
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
benches(" amb - no tc");
}
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define HAZPTR_AMB true
#define HAZPTR_TC true
#define HAZPTR_PRIV true
#define HAZPTR_ENABLE_TLS true
#include <folly/experimental/hazptr/bench/HazptrBench.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
using namespace folly::hazptr;
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
benches(" amb - tc");
}
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define HAZPTR_AMB false
#define HAZPTR_TC false
#define HAZPTR_PRIV false
#define HAZPTR_ENABLE_TLS true
#include <folly/experimental/hazptr/bench/HazptrBench.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
using namespace folly::hazptr;
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
benches("no amb - no tc");
}
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define HAZPTR_AMB false
#define HAZPTR_TC true
#define HAZPTR_PRIV true
#define HAZPTR_ENABLE_TLS true
#include <folly/experimental/hazptr/bench/HazptrBench.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
using namespace folly::hazptr;
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
benches("no amb - tc");
}
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define HAZPTR_AMB true
#define HAZPTR_TC true
#define HAZPTR_PRIV true
#define HAZPTR_ONE_DOMAIN true
#define HAZPTR_ENABLE_TLS true
#include <folly/experimental/hazptr/bench/HazptrBench.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
using namespace folly::hazptr;
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
benches(" one domain");
}
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Benchmark.h>
#include <folly/experimental/hazptr/example/SWMRList.h>
#include <folly/portability/GTest.h>
#include <glog/logging.h>
#include <atomic>
#include <thread>
namespace folly {
namespace hazptr {
template <typename InitFunc, typename Func, typename EndFunc>
inline uint64_t run_once(
int nthreads,
const InitFunc& init,
const Func& fn,
const EndFunc& endFn) {
folly::BenchmarkSuspender susp;
std::atomic<bool> start{false};
std::atomic<int> started{0};
init();
std::vector<std::thread> threads(nthreads);
for (int tid = 0; tid < nthreads; ++tid) {
threads[tid] = std::thread([&, tid] {
started.fetch_add(1);
while (!start.load()) {
/* spin */;
}
fn(tid);
});
}
while (started.load() < nthreads) {
/* spin */;
}
// begin time measurement
auto tbegin = std::chrono::steady_clock::now();
susp.dismiss();
start.store(true);
for (auto& t : threads) {
t.join();
}
susp.rehire();
// end time measurement
auto tend = std::chrono::steady_clock::now();
endFn();
return std::chrono::duration_cast<std::chrono::nanoseconds>(tend - tbegin)
.count();
}
template <typename RepFunc>
inline uint64_t bench(std::string name, int ops, const RepFunc& repFn) {
int reps = 10;
uint64_t min = UINTMAX_MAX;
uint64_t max = 0;
uint64_t sum = 0;
repFn(); // sometimes first run is outlier
for (int r = 0; r < reps; ++r) {
uint64_t dur = repFn();
sum += dur;
min = std::min(min, dur);
max = std::max(max, dur);
}
const std::string unit = " ns";
uint64_t avg = sum / reps;
uint64_t res = min;
std::cout << name;
std::cout << " " << std::setw(4) << max / ops << unit;
std::cout << " " << std::setw(4) << avg / ops << unit;
std::cout << " " << std::setw(4) << res / ops << unit;
std::cout << std::endl;
return res;
}
const int ops = 1000000;
inline uint64_t listBench(std::string name, int nthreads, int size) {
auto repFn = [&] {
SWMRListSet<uint64_t> s;
auto init = [&] {
for (int i = 0; i < size; ++i) {
s.add(i);
}
};
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
s.contains(size);
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
inline uint64_t holderBench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
hazptr_holder a[10];
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
template <size_t M>
inline uint64_t arrayBench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_array<M> a;
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
template <size_t M>
inline uint64_t localBench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_local<10> a;
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
inline uint64_t retireBench(std::string name, int nthreads) {
struct Foo : hazptr_obj_base<Foo> {
int x;
};
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
Foo* p = new Foo;
p->retire();
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
const int nthr[] = {1, 10};
const int sizes[] = {10, 100};
inline void benches(std::string name) {
std::cout << "------------------------------------------- " << name << "\n";
for (int i : nthr) {
std::cout << i << " threads -- 10x construct/destruct hazptr_holder"
<< std::endl;
holderBench(name + " ", i);
holderBench(name + " - dup ", i);
std::cout << i << " threads -- 10x construct/destruct hazptr_array<10>"
<< std::endl;
arrayBench<10>(name + " ", i);
arrayBench<10>(name + " - dup ", i);
std::cout << i << " threads -- 10x construct/destruct hazptr_array<3>"
<< std::endl;
arrayBench<3>(name + " ", i);
arrayBench<3>(name + " - dup ", i);
std::cout << i << " threads -- 10x construct/destruct hazptr_local<10>"
<< std::endl;
localBench<10>(name + " ", i);
localBench<10>(name + " - dup ", i);
std::cout << i << " threads -- 10x construct/destruct hazptr_local<1>"
<< std::endl;
localBench<1>(name + " ", i);
localBench<1>(name + " - dup ", i);
std::cout << i << " threads -- allocate/retire/reclaim object" << std::endl;
retireBench(name + " ", i);
retireBench(name + " - dup ", i);
for (int j : sizes) {
std::cout << i << " threads -- " << j << "-item list" << std::endl;
listBench(name + " ", i, j);
listBench(name + " - dup ", i, j);
}
}
std::cout << "----------------------------------------------------------\n";
}
} // namespace hazptr
} // namespace folly
/*
------------------------------------------- amb - tc
1 threads -- 10x construct/destruct hazptr_holder
amb - tc 49 ns 46 ns 44 ns
amb - tc - dup 47 ns 45 ns 44 ns
1 threads -- 10x construct/destruct hazptr_array<10>
amb - tc 132 ns 122 ns 117 ns
amb - tc - dup 130 ns 122 ns 117 ns
1 threads -- 10x construct/destruct hazptr_array<3>
amb - tc 66 ns 64 ns 63 ns
amb - tc - dup 64 ns 64 ns 63 ns
1 threads -- 10x construct/destruct hazptr_local<10>
amb - tc 29 ns 27 ns 27 ns
amb - tc - dup 28 ns 27 ns 27 ns
1 threads -- 10x construct/destruct hazptr_local<1>
amb - tc 27 ns 27 ns 27 ns
amb - tc - dup 28 ns 28 ns 27 ns
1 threads -- allocate/retire/reclaim object
amb - tc 65 ns 62 ns 60 ns
amb - tc - dup 65 ns 60 ns 59 ns
1 threads -- 10-item list
amb - tc 21 ns 21 ns 20 ns
amb - tc - dup 22 ns 21 ns 21 ns
1 threads -- 100-item list
amb - tc 229 ns 224 ns 220 ns
amb - tc - dup 223 ns 219 ns 216 ns
10 threads -- 10x construct/destruct hazptr_holder
amb - tc 9 ns 8 ns 7 ns
amb - tc - dup 9 ns 8 ns 8 ns
10 threads -- 10x construct/destruct hazptr_array<10>
amb - tc 27 ns 23 ns 15 ns
amb - tc - dup 26 ns 20 ns 13 ns
10 threads -- 10x construct/destruct hazptr_array<3>
amb - tc 11 ns 11 ns 7 ns
amb - tc - dup 11 ns 9 ns 7 ns
10 threads -- 10x construct/destruct hazptr_local<10>
amb - tc 5 ns 3 ns 3 ns
amb - tc - dup 3 ns 3 ns 3 ns
10 threads -- 10x construct/destruct hazptr_local<1>
amb - tc 3 ns 3 ns 3 ns
amb - tc - dup 5 ns 4 ns 3 ns
10 threads -- allocate/retire/reclaim object
amb - tc 17 ns 15 ns 14 ns
amb - tc - dup 17 ns 15 ns 14 ns
10 threads -- 10-item list
amb - tc 4 ns 4 ns 2 ns
amb - tc - dup 4 ns 4 ns 3 ns
10 threads -- 100-item list
amb - tc 33 ns 31 ns 24 ns
amb - tc - dup 33 ns 32 ns 30 ns
----------------------------------------------------------
------------------------------------------- no amb - no tc
1 threads -- construct/destruct 10 hazptr_holder-s
no amb - no tc 2518 ns 2461 ns 2431 ns
no amb - no tc - dup 2499 ns 2460 ns 2420 ns
1 threads -- allocate/retire/reclaim object
no amb - no tc 85 ns 83 ns 81 ns
no amb - no tc - dup 83 ns 82 ns 81 ns
1 threads -- 10-item list
no amb - no tc 655 ns 644 ns 639 ns
no amb - no tc - dup 658 ns 645 ns 641 ns
1 threads -- 100-item list
no amb - no tc 2175 ns 2142 ns 2124 ns
no amb - no tc - dup 2294 ns 2228 ns 2138 ns
10 threads -- construct/destruct 10 hazptr_holder-s
no amb - no tc 3893 ns 2932 ns 1391 ns
no amb - no tc - dup 3157 ns 2927 ns 2726 ns
10 threads -- allocate/retire/reclaim object
no amb - no tc 152 ns 134 ns 127 ns
no amb - no tc - dup 141 ns 133 ns 128 ns
10 threads -- 10-item list
no amb - no tc 532 ns 328 ns 269 ns
no amb - no tc - dup 597 ns 393 ns 271 ns
10 threads -- 100-item list
no amb - no tc 757 ns 573 ns 412 ns
no amb - no tc - dup 819 ns 643 ns 420 ns
----------------------------------------------------------
------------------------------------------- amb - no tc
1 threads -- construct/destruct 10 hazptr_holder-s
amb - no tc 2590 ns 2481 ns 2422 ns
amb - no tc - dup 2519 ns 2468 ns 2424 ns
1 threads -- allocate/retire/reclaim object
amb - no tc 69 ns 68 ns 67 ns
amb - no tc - dup 69 ns 68 ns 67 ns
1 threads -- 10-item list
amb - no tc 524 ns 510 ns 492 ns
amb - no tc - dup 514 ns 507 ns 496 ns
1 threads -- 100-item list
amb - no tc 761 ns 711 ns 693 ns
amb - no tc - dup 717 ns 694 ns 684 ns
10 threads -- construct/destruct 10 hazptr_holder-s
amb - no tc 3302 ns 2908 ns 1612 ns
amb - no tc - dup 3220 ns 2909 ns 1641 ns
10 threads -- allocate/retire/reclaim object
amb - no tc 129 ns 123 ns 110 ns
amb - no tc - dup 135 ns 127 ns 120 ns
10 threads -- 10-item list
amb - no tc 512 ns 288 ns 256 ns
amb - no tc - dup 275 ns 269 ns 263 ns
10 threads -- 100-item list
amb - no tc 297 ns 289 ns 284 ns
amb - no tc - dup 551 ns 358 ns 282 ns
----------------------------------------------------------
------------------------------------------- no amb - tc
1 threads -- construct/destruct 10 hazptr_holder-s
no amb - tc 56 ns 55 ns 55 ns
no amb - tc - dup 56 ns 54 ns 54 ns
1 threads -- allocate/retire/reclaim object
no amb - tc 63 ns 62 ns 62 ns
no amb - tc - dup 64 ns 63 ns 62 ns
1 threads -- 10-item list
no amb - tc 190 ns 188 ns 187 ns
no amb - tc - dup 193 ns 186 ns 182 ns
1 threads -- 100-item list
no amb - tc 1859 ns 1698 ns 1666 ns
no amb - tc - dup 1770 ns 1717 ns 1673 ns
10 threads -- construct/destruct 10 hazptr_holder-s
no amb - tc 19 ns 11 ns 7 ns
no amb - tc - dup 11 ns 8 ns 7 ns
10 threads -- allocate/retire/reclaim object
no amb - tc 9 ns 8 ns 8 ns
no amb - tc - dup 10 ns 9 ns 8 ns
10 threads -- 10-item list
no amb - tc 40 ns 25 ns 21 ns
no amb - tc - dup 24 ns 23 ns 21 ns
10 threads -- 100-item list
no amb - tc 215 ns 208 ns 188 ns
no amb - tc - dup 215 ns 209 ns 197 ns
----------------------------------------------------------
*/
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <glog/logging.h>
#ifdef HAZPTR_DEBUG
#define HAZPTR_DEBUG_ HAZPTR_DEBUG
#else
#define HAZPTR_DEBUG_ false
#endif
#define HAZPTR_DEBUG_PRINT(...) \
do { \
if (HAZPTR_DEBUG_) { \
VLOG(2) << __func__ << " --- " << __VA_ARGS__; \
} \
} while (false)
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
namespace hazptr {
template <typename T>
class LockFreeLIFO {
class Node : public hazptr_obj_base<Node> {
friend LockFreeLIFO;
public:
~Node() {
HAZPTR_DEBUG_PRINT(this);
}
private:
Node(T v, Node* n) : value_(v), next_(n) {
HAZPTR_DEBUG_PRINT(this);
}
T value_;
Node* next_;
};
public:
LockFreeLIFO() {
HAZPTR_DEBUG_PRINT(this);
}
~LockFreeLIFO() {
HAZPTR_DEBUG_PRINT(this);
}
void push(T val) {
HAZPTR_DEBUG_PRINT(this);
auto pnode = new Node(val, head_.load());
while (!head_.compare_exchange_weak(pnode->next_, pnode)) {
;
}
}
bool pop(T& val) {
HAZPTR_DEBUG_PRINT(this);
hazptr_holder hptr;
Node* pnode = head_.load();
do {
if (pnode == nullptr) {
return false;
}
if (!hptr.try_protect(pnode, head_)) {
continue;
}
auto next = pnode->next_;
if (head_.compare_exchange_weak(pnode, next)) {
break;
}
} while (true);
hptr.reset();
val = pnode->value_;
pnode->retire();
return true;
}
private:
std::atomic<Node*> head_ = {nullptr};
};
} // namespace hazptr
} // namespace folly
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
namespace hazptr {
/** Set implemented as an ordered singly-linked list.
*
* Multiple writers may add or remove elements. Multiple reader
* threads may search the set concurrently with each other and with
* the writers' operations.
*/
template <typename T>
class MWMRListSet {
class Node : public hazptr_obj_base<Node> {
friend MWMRListSet;
T elem_;
std::atomic<uint64_t> refcount_{1};
std::atomic<Node*> next_{nullptr};
// Node must be refcounted for wait-free access: A deleted node
// may have hazptrs pointing at it, so the rest of the list (or at
// least, what existed at the time of the hazptr load) must still
// be accessible.
void release() {
if (refcount_.fetch_sub(1) == 1) {
this->retire();
}
}
// Optimization in the case that we know there are no hazptrs pointing
// at the list.
void releaseFast() {
if (refcount_.load(std::memory_order_relaxed) == 1) {
auto next = getPtr(next_.load(std::memory_order_relaxed));
if (next) {
next->releaseFast();
next_.store(nullptr, std::memory_order_relaxed);
}
delete this;
}
}
void acquire() {
DCHECK(refcount_.load() != 0);
refcount_.fetch_add(1);
}
public:
explicit Node(T e) : elem_(e) {
HAZPTR_DEBUG_PRINT(this << " " << e);
}
~Node() {
HAZPTR_DEBUG_PRINT(this);
auto next = getPtr(next_.load(std::memory_order_relaxed));
if (next) {
next->release();
}
}
};
static bool getDeleted(Node* ptr) {
return uintptr_t(ptr) & 1;
}
static Node* getPtr(Node* ptr) {
return (Node*)(uintptr_t(ptr) & ~1UL);
}
mutable std::atomic<Node*> head_ = {nullptr};
// Remove a single deleted item.
// Although it doesn't have to be our item.
//
// Note that standard lock-free Michael linked lists put this in the
// contains() path, while this implementation leaves it only in
// remove(), such that contains() is wait-free.
void fixlist(
hazptr_holder& hptr_prev,
hazptr_holder& hptr_curr,
std::atomic<Node*>*& prev,
Node*& curr) const {
while (true) {
prev = &head_;
curr = hptr_curr.get_protected(*prev, getPtr);
while (getPtr(curr)) {
auto next = getPtr(curr)->next_.load(std::memory_order_acquire);
if (getDeleted(next)) {
auto nextp = getPtr(next);
if (nextp) {
nextp->acquire();
}
// Try to fix
auto curr_no_mark = getPtr(curr);
if (prev->compare_exchange_weak(curr_no_mark, nextp)) {
// Physically delete
curr_no_mark->release();
return;
} else {
if (nextp) {
nextp->release();
}
break;
}
}
prev = &(getPtr(curr)->next_);
curr = hptr_prev.get_protected(getPtr(curr)->next_, getPtr);
swap(hptr_curr, hptr_prev);
}
DCHECK(getPtr(curr));
}
}
/* wait-free set search */
bool find(
const T& val,
hazptr_holder& hptr_prev,
hazptr_holder& hptr_curr,
std::atomic<Node*>*& prev,
Node*& curr) const {
prev = &head_;
curr = hptr_curr.get_protected(*prev, getPtr);
while (getPtr(curr)) {
auto next = getPtr(curr)->next_.load(std::memory_order_acquire);
if (!getDeleted(next)) {
if (getPtr(curr)->elem_ == val) {
return true;
} else if (!(getPtr(curr)->elem_ < val)) {
break; // Because the list is sorted.
}
}
prev = &(getPtr(curr)->next_);
curr = hptr_prev.get_protected(getPtr(curr)->next_, getPtr);
/* Swap does not change the values of the owned hazard
* pointers themselves. After the swap, The hazard pointer
* owned by hptr_prev continues to protect the node that
* contains the pointer *prev. The hazard pointer owned by
* hptr_curr will continue to protect the node that contains
* the old *prev (unless the old prev was &head), which no
* longer needs protection, so hptr_curr's hazard pointer is
* now free to protect *curr in the next iteration (if curr !=
* null).
*/
swap(hptr_curr, hptr_prev);
}
return false;
}
public:
explicit MWMRListSet() {}
~MWMRListSet() {
Node* next = head_.load();
if (next) {
next->releaseFast();
}
}
bool add(T v) {
hazptr_holder hptr_prev;
hazptr_holder hptr_curr;
std::atomic<Node*>* prev;
Node* cur;
auto newnode = folly::make_unique<Node>(v);
while (true) {
if (find(v, hptr_prev, hptr_curr, prev, cur)) {
return false;
}
newnode->next_.store(cur, std::memory_order_relaxed);
auto cur_no_mark = getPtr(cur);
if (prev->compare_exchange_weak(cur_no_mark, newnode.get())) {
newnode.release();
return true;
}
// Ensure ~Node() destructor doesn't destroy next_
newnode->next_.store(nullptr, std::memory_order_relaxed);
}
}
bool remove(const T& v) {
hazptr_holder hptr_prev;
hazptr_holder hptr_curr;
std::atomic<Node*>* prev;
Node* curr;
while (true) {
if (!find(v, hptr_prev, hptr_curr, prev, curr)) {
return false;
}
auto next = getPtr(curr)->next_.load(std::memory_order_acquire);
auto next_no_mark = getPtr(next); // Ensure only one deleter wins
// Logically delete
if (!getPtr(curr)->next_.compare_exchange_weak(
next_no_mark, (Node*)(uintptr_t(next_no_mark) | 1))) {
continue;
}
if (next) {
next->acquire();
}
// Swing prev around
auto curr_no_mark = getPtr(curr); /* ensure not deleted */
if (prev->compare_exchange_weak(curr_no_mark, next)) {
// Physically delete
curr->release();
return true;
}
if (next) {
next->release();
}
// Someone else modified prev. Call fixlist
// to unlink deleted element by re-walking list.
fixlist(hptr_prev, hptr_curr, prev, curr);
}
}
bool contains(const T& v) const {
hazptr_holder hptr_prev;
hazptr_holder hptr_curr;
std::atomic<Node*>* prev;
Node* curr;
return find(v, hptr_prev, hptr_curr, prev, curr);
}
};
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
namespace hazptr {
/** Set implemented as an ordered singly-linked list.
*
* A single writer thread may add or remove elements. Multiple reader
* threads may search the set concurrently with each other and with
* the writer's operations.
*/
template <typename T>
class SWMRListSet {
template <typename Node>
struct Reclaimer {
void operator()(Node* p) {
HAZPTR_DEBUG_PRINT(p << " " << sizeof(Node));
delete p;
}
};
class Node : public hazptr_obj_base<Node, Reclaimer<Node>> {
friend SWMRListSet;
T elem_;
std::atomic<Node*> next_;
Node(T e, Node* n) : elem_(e), next_(n) {
HAZPTR_DEBUG_PRINT(this << " " << e << " " << n);
}
public:
~Node() {
HAZPTR_DEBUG_PRINT(this);
}
};
std::atomic<Node*> head_ = {nullptr};
/* Used by the single writer */
void locate_lower_bound(const T& v, std::atomic<Node*>*& prev) const {
auto curr = prev->load(std::memory_order_relaxed);
while (curr) {
if (curr->elem_ >= v) {
break;
}
prev = &(curr->next_);
curr = curr->next_.load(std::memory_order_relaxed);
}
return;
}
public:
~SWMRListSet() {
Node* next;
for (auto p = head_.load(); p; p = next) {
next = p->next_.load();
delete p;
}
}
bool add(T v) {
auto prev = &head_;
locate_lower_bound(v, prev);
auto curr = prev->load(std::memory_order_relaxed);
if (curr && curr->elem_ == v) {
return false;
}
prev->store(new Node(std::move(v), curr));
return true;
}
bool remove(const T& v) {
auto prev = &head_;
locate_lower_bound(v, prev);
auto curr = prev->load(std::memory_order_relaxed);
if (!curr || curr->elem_ != v) {
return false;
}
Node* curr_next = curr->next_.load();
// Patch up the actual list...
prev->store(curr_next, std::memory_order_release);
// ...and only then null out the removed node.
curr->next_.store(nullptr, std::memory_order_release);
curr->retire();
return true;
}
/* Used by readers */
bool contains(const T& val) const {
/* Two hazard pointers for hand-over-hand traversal. */
hazptr_local<2> hptr;
hazptr_holder* hptr_prev = &hptr[0];
hazptr_holder* hptr_curr = &hptr[1];
while (true) {
auto prev = &head_;
auto curr = prev->load(std::memory_order_acquire);
while (true) {
if (!curr) {
return false;
}
if (!hptr_curr->try_protect(curr, *prev)) {
break;
}
auto next = curr->next_.load(std::memory_order_acquire);
if (prev->load(std::memory_order_acquire) != curr) {
break;
}
if (curr->elem_ == val) {
return true;
} else if (!(curr->elem_ < val)) {
return false; // because the list is sorted
}
prev = &(curr->next_);
curr = next;
std::swap(hptr_curr, hptr_prev);
}
}
}
};
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/hazptr.h>
#include <string>
namespace folly {
namespace hazptr {
/** Wide CAS.
*/
class WideCAS {
using T = std::string;
class Node : public hazptr_obj_base<Node> {
friend WideCAS;
T val_;
Node() : val_(T()) {
HAZPTR_DEBUG_PRINT(this << " " << val_);
}
explicit Node(T v) : val_(v) {
HAZPTR_DEBUG_PRINT(this << " " << v);
}
public:
~Node() {
HAZPTR_DEBUG_PRINT(this);
}
};
std::atomic<Node*> p_ = {new Node()};
public:
WideCAS() = default;
~WideCAS() {
HAZPTR_DEBUG_PRINT(this << " " << p_.load());
delete p_.load();
}
bool cas(T& u, T& v) {
HAZPTR_DEBUG_PRINT(this << " " << u << " " << v);
Node* n = new Node(v);
hazptr_holder hptr;
Node* p;
do {
p = hptr.get_protected(p_);
if (p->val_ != u) {
delete n;
return false;
}
if (p_.compare_exchange_weak(p, n)) {
break;
}
} while (true);
hptr.reset();
p->retire();
HAZPTR_DEBUG_PRINT(this << " " << p << " " << u << " " << n << " " << v);
return true;
}
};
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* override-include-guard */
#ifndef HAZPTR_H
#error "This should only be included by hazptr.h"
#endif
/* quality of implementation switches */
// NOTE: The #ifndef pattern is prone to ODR violation. Its use for
// quality of implementation options is temporary. Eventually these
// options should be added to the API in future API extensions.
#ifndef HAZPTR_AMB
#define HAZPTR_AMB true
#endif
#ifndef HAZPTR_TC
// If we are targeting a mobile device, do not use thread caching as a
// precaution. Performance is potentially suboptimal without thread
// local support
#if FOLLY_MOBILE
#define HAZPTR_TC false
#else
#define HAZPTR_TC true
#endif
#endif
#ifndef HAZPTR_TC_SIZE
#define HAZPTR_TC_SIZE 10
#endif
#ifndef HAZPTR_PRIV
// If we are targeting a mobile device, do not use thread caching as a
// precaution. Performance is potentially suboptimal without thread
// local support
#if FOLLY_MOBILE
#define HAZPTR_PRIV false
#else
#define HAZPTR_PRIV true
#endif
#endif
#ifndef HAZPTR_ENABLE_TLS
#if HAZPTR_TC || HAZPTR_PRIV
#define HAZPTR_ENABLE_TLS true
#else
#define HAZPTR_ENABLE_TLS false
#endif
#endif
#ifndef HAZPTR_PRIV_THRESHOLD
#define HAZPTR_PRIV_THRESHOLD 20
#endif
#ifndef HAZPTR_ONE_DOMAIN
#define HAZPTR_ONE_DOMAIN false
#endif
#ifndef HAZPTR_SCAN_MULT
#define HAZPTR_SCAN_MULT 2
#endif
#ifndef HAZPTR_SCAN_THRESHOLD
#define HAZPTR_SCAN_THRESHOLD 1000
#endif
/* stats switch */
#ifndef HAZPTR_STATS
#define HAZPTR_STATS false
#endif
#include <folly/SingletonThreadLocal.h>
#include <folly/concurrency/CacheLocality.h>
#include <folly/experimental/hazptr/debug.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <mutex> // for thread caching
#include <unordered_set> // for hash set in bulk reclamation
namespace folly {
namespace hazptr {
/**
* Helper classes and functions
*/
/** hazptr_stats */
class hazptr_stats;
#if HAZPTR_STATS
#define INC_HAZPTR_STATS(x) hazptr_stats_.x()
#else
#define INC_HAZPTR_STATS(x)
#endif
/** hazptr_mb */
class hazptr_mb {
public:
static void light();
static void heavy();
};
/**
* TLS structures
*/
/** TLS life state */
enum hazptr_tls_state { TLS_ALIVE, TLS_UNINITIALIZED, TLS_DESTROYED };
/** hazptr_tc structures
* Thread caching of hazptr_rec-s that belong to the default domain.
*/
struct hazptr_tc_entry {
hazptr_rec* hprec_;
void fill(hazptr_rec* hprec);
hazptr_rec* get();
void evict();
};
static_assert(
std::is_trivial<hazptr_tc_entry>::value,
"hazptr_tc_entry must be trivial"
" to avoid a branch to check initialization");
struct hazptr_tc {
hazptr_tc_entry entry_[HAZPTR_TC_SIZE];
size_t count_;
bool local_; // for debug mode only
public:
hazptr_tc_entry& operator[](size_t i);
hazptr_rec* get();
bool put(hazptr_rec* hprec);
size_t count();
};
static_assert(
std::is_trivial<hazptr_tc>::value,
"hazptr_tc must be trivial to avoid a branch to check initialization");
bool hazptr_tc_enabled();
bool hazptr_priv_enabled();
hazptr_tc* hazptr_tc_tls();
void hazptr_tc_init(hazptr_tc& tc);
void hazptr_tc_shutdown(hazptr_tc& tc);
hazptr_rec* hazptr_tc_try_get();
bool hazptr_tc_try_put(hazptr_rec* hprec);
/** hazptr_priv structures
* Thread private lists of retired objects that belong to the default domain.
*/
class hazptr_priv {
std::atomic<hazptr_obj*> head_;
std::atomic<hazptr_obj*> tail_;
int rcount_;
bool active_;
hazptr_priv* prev_;
hazptr_priv* next_;
public:
void init() {
head_ = nullptr;
tail_ = nullptr;
rcount_ = 0;
active_ = true;
}
bool active() {
return active_;
}
hazptr_priv* prev() {
return prev_;
}
hazptr_priv* next() {
return next_;
}
bool empty() {
return head() == nullptr;
}
void set_prev(hazptr_priv* rec) {
prev_ = rec;
}
void set_next(hazptr_priv* rec) {
next_ = rec;
}
void clear_active() {
active_ = false;
}
void push(hazptr_obj* obj) {
while (true) {
if (tail()) {
if (pushInNonEmptyList(obj)) {
break;
}
} else {
if (pushInEmptyList(obj)) {
break;
}
}
}
if (++rcount_ >= HAZPTR_PRIV_THRESHOLD) {
push_all_to_domain();
}
}
void push_all_to_domain() {
auto& domain = default_hazptr_domain();
hazptr_obj* h = nullptr;
hazptr_obj* t = nullptr;
collect(h, t);
if (h) {
DCHECK(t);
domain.pushRetired(h, t, rcount_);
}
rcount_ = 0;
domain.tryBulkReclaim();
domain.tryTimedCleanup();
}
void collect(hazptr_obj*& colHead, hazptr_obj*& colTail) {
// This function doesn't change rcount_.
// The value rcount_ is accurate excluding the effects of collect().
auto h = exchangeHead();
if (h) {
auto t = exchangeTail();
DCHECK(t);
if (colTail) {
colTail->set_next(h);
} else {
colHead = h;
}
colTail = t;
}
}
private:
hazptr_obj* head() {
return head_.load(std::memory_order_acquire);
}
hazptr_obj* tail() {
return tail_.load(std::memory_order_acquire);
}
void setHead(hazptr_obj* obj) {
head_.store(obj, std::memory_order_release);
}
bool casHead(hazptr_obj* expected, hazptr_obj* obj) {
return head_.compare_exchange_weak(
expected, obj, std::memory_order_acq_rel, std::memory_order_relaxed);
}
bool casTail(hazptr_obj* expected, hazptr_obj* obj) {
return tail_.compare_exchange_weak(
expected, obj, std::memory_order_acq_rel, std::memory_order_relaxed);
}
hazptr_obj* exchangeHead() {
return head_.exchange(nullptr, std::memory_order_acq_rel);
}
hazptr_obj* exchangeTail() {
return tail_.exchange(nullptr, std::memory_order_acq_rel);
}
bool pushInNonEmptyList(hazptr_obj* obj) {
auto h = head();
if (h) {
obj->set_next(h);
if (casHead(h, obj)) {
return true;
}
}
return false;
}
bool pushInEmptyList(hazptr_obj* obj) {
hazptr_obj* t = nullptr;
obj->set_next(nullptr);
if (casTail(t, obj)) {
setHead(obj);
return true;
}
return false;
}
};
static_assert(
folly::kCpplibVer || std::is_trivial<hazptr_priv>::value,
"hazptr_priv must be trivial to avoid a branch to check initialization");
void hazptr_priv_init(hazptr_priv& priv);
void hazptr_priv_shutdown(hazptr_priv& priv);
bool hazptr_priv_try_retire(hazptr_obj* obj);
/** tls globals */
struct hazptr_tls_globals_ {
hazptr_tls_state tls_state{TLS_UNINITIALIZED};
hazptr_tc tc;
hazptr_priv priv;
hazptr_tls_globals_() {
HAZPTR_DEBUG_PRINT(this);
tls_state = TLS_ALIVE;
hazptr_tc_init(tc);
hazptr_priv_init(priv);
}
~hazptr_tls_globals_() {
HAZPTR_DEBUG_PRINT(this);
CHECK(tls_state == TLS_ALIVE);
hazptr_tc_shutdown(tc);
hazptr_priv_shutdown(priv);
tls_state = TLS_DESTROYED;
}
};
struct HazptrTag {};
typedef folly::SingletonThreadLocal<hazptr_tls_globals_, HazptrTag> PrivList;
FOLLY_ALWAYS_INLINE hazptr_tls_globals_& hazptr_tls_globals() {
return PrivList::get();
}
/**
* hazptr_domain
*/
inline constexpr hazptr_domain::hazptr_domain(memory_resource* mr) noexcept
: mr_(mr) {}
/**
* hazptr_obj_base
*/
template <typename T, typename D>
inline void hazptr_obj_base<T, D>::retire(hazptr_domain& domain, D deleter) {
HAZPTR_DEBUG_PRINT(this << " " << &domain);
retireCheck();
deleter_ = std::move(deleter);
reclaim_ = [](hazptr_obj* p) {
auto hobp = static_cast<hazptr_obj_base*>(p);
auto obj = static_cast<T*>(hobp);
hobp->deleter_(obj);
};
if (HAZPTR_PRIV &&
(HAZPTR_ONE_DOMAIN || (&domain == &default_hazptr_domain()))) {
if (hazptr_priv_try_retire(this)) {
return;
}
}
domain.objRetire(this);
}
/**
* hazptr_obj_base_refcounted
*/
template <typename T, typename D>
inline void hazptr_obj_base_refcounted<T, D>::retire(
hazptr_domain& domain,
D deleter) {
HAZPTR_DEBUG_PRINT(this << " " << &domain);
preRetire(deleter);
if (HAZPTR_PRIV &&
(HAZPTR_ONE_DOMAIN || (&domain == &default_hazptr_domain()))) {
if (hazptr_priv_try_retire(this)) {
return;
}
}
domain.objRetire(this);
}
template <typename T, typename D>
inline void hazptr_obj_base_refcounted<T, D>::acquire_ref() {
HAZPTR_DEBUG_PRINT(this);
auto oldval = refcount_.fetch_add(1);
DCHECK(oldval >= 0);
}
template <typename T, typename D>
inline void hazptr_obj_base_refcounted<T, D>::acquire_ref_safe() {
HAZPTR_DEBUG_PRINT(this);
auto oldval = refcount_.load(std::memory_order_acquire);
DCHECK(oldval >= 0);
refcount_.store(oldval + 1, std::memory_order_release);
}
template <typename T, typename D>
inline bool hazptr_obj_base_refcounted<T, D>::release_ref() {
HAZPTR_DEBUG_PRINT(this);
auto oldval = refcount_.load(std::memory_order_acquire);
if (oldval > 0) {
oldval = refcount_.fetch_sub(1);
} else {
if (kIsDebug) {
refcount_.store(static_cast<decltype(refcount_)>(-1));
}
}
HAZPTR_DEBUG_PRINT(this << " " << oldval);
DCHECK(oldval >= 0);
return oldval == 0;
}
template <typename T, typename D>
inline void hazptr_obj_base_refcounted<T, D>::preRetire(D deleter) {
deleter_ = std::move(deleter);
retireCheck();
reclaim_ = [](hazptr_obj* p) {
auto hrobp = static_cast<hazptr_obj_base_refcounted*>(p);
if (hrobp->release_ref()) {
auto obj = static_cast<T*>(hrobp);
hrobp->deleter_(obj);
}
};
}
/**
* hazptr_rec
*/
class alignas(hardware_destructive_interference_size) hazptr_rec {
friend class hazptr_domain;
friend class hazptr_holder;
friend struct hazptr_tc_entry;
std::atomic<const void*> hazptr_{nullptr};
hazptr_rec* next_{nullptr};
std::atomic<bool> active_{false};
void set(const void* p) noexcept;
const void* get() const noexcept;
void clear() noexcept;
bool isActive() noexcept;
bool tryAcquire() noexcept;
void release() noexcept;
};
/**
* hazptr_holder
*/
FOLLY_ALWAYS_INLINE hazptr_holder::hazptr_holder(hazptr_domain& domain) {
domain_ = &domain;
if (LIKELY(
HAZPTR_TC &&
(HAZPTR_ONE_DOMAIN || &domain == &default_hazptr_domain()))) {
auto hprec = hazptr_tc_try_get();
if (LIKELY(hprec != nullptr)) {
hazptr_ = hprec;
HAZPTR_DEBUG_PRINT(this << " " << domain_ << " " << hazptr_);
return;
}
}
hazptr_ = domain_->hazptrAcquire();
HAZPTR_DEBUG_PRINT(this << " " << domain_ << " " << hazptr_);
if (hazptr_ == nullptr) {
std::bad_alloc e;
throw e;
}
}
FOLLY_ALWAYS_INLINE hazptr_holder::hazptr_holder(std::nullptr_t) noexcept {
domain_ = nullptr;
hazptr_ = nullptr;
HAZPTR_DEBUG_PRINT(this << " " << domain_ << " " << hazptr_);
}
FOLLY_ALWAYS_INLINE hazptr_holder::~hazptr_holder() {
HAZPTR_DEBUG_PRINT(this);
if (LIKELY(hazptr_ != nullptr)) {
DCHECK(domain_ != nullptr);
hazptr_->clear();
if (LIKELY(
HAZPTR_TC &&
(HAZPTR_ONE_DOMAIN || domain_ == &default_hazptr_domain()))) {
if (LIKELY(hazptr_tc_try_put(hazptr_))) {
return;
}
}
domain_->hazptrRelease(hazptr_);
}
}
FOLLY_ALWAYS_INLINE hazptr_holder::hazptr_holder(hazptr_holder&& rhs) noexcept {
domain_ = rhs.domain_;
hazptr_ = rhs.hazptr_;
rhs.domain_ = nullptr;
rhs.hazptr_ = nullptr;
}
FOLLY_ALWAYS_INLINE
hazptr_holder& hazptr_holder::operator=(hazptr_holder&& rhs) noexcept {
/* Self-move is a no-op. */
if (LIKELY(this != &rhs)) {
this->~hazptr_holder();
new (this) hazptr_holder(std::move(rhs));
}
return *this;
}
template <typename T>
FOLLY_ALWAYS_INLINE bool hazptr_holder::try_protect(
T*& ptr,
const std::atomic<T*>& src) noexcept {
return try_protect(ptr, src, [](T* t) { return t; });
}
template <typename T, typename Func>
FOLLY_ALWAYS_INLINE bool hazptr_holder::try_protect(
T*& ptr,
const std::atomic<T*>& src,
Func f) noexcept {
HAZPTR_DEBUG_PRINT(this << " " << ptr << " " << &src);
reset(f(ptr));
/*** Full fence ***/ hazptr_mb::light();
T* p = src.load(std::memory_order_acquire);
if (UNLIKELY(p != ptr)) {
ptr = p;
reset();
return false;
}
return true;
}
template <typename T>
FOLLY_ALWAYS_INLINE T* hazptr_holder::get_protected(
const std::atomic<T*>& src) noexcept {
return get_protected(src, [](T* t) { return t; });
}
template <typename T, typename Func>
FOLLY_ALWAYS_INLINE T* hazptr_holder::get_protected(
const std::atomic<T*>& src,
Func f) noexcept {
T* p = src.load(std::memory_order_relaxed);
while (!try_protect(p, src, f)) {
}
HAZPTR_DEBUG_PRINT(this << " " << p << " " << &src);
return p;
}
template <typename T>
FOLLY_ALWAYS_INLINE void hazptr_holder::reset(const T* ptr) noexcept {
auto p = static_cast<hazptr_obj*>(const_cast<T*>(ptr));
HAZPTR_DEBUG_PRINT(this << " " << ptr << " p:" << p);
DCHECK(hazptr_); // UB if *this is empty
hazptr_->set(p);
}
FOLLY_ALWAYS_INLINE void hazptr_holder::reset(std::nullptr_t) noexcept {
HAZPTR_DEBUG_PRINT(this);
DCHECK(hazptr_); // UB if *this is empty
hazptr_->clear();
}
FOLLY_ALWAYS_INLINE void hazptr_holder::swap(hazptr_holder& rhs) noexcept {
HAZPTR_DEBUG_PRINT(
this << " " << this->hazptr_ << " " << this->domain_ << " -- " << &rhs
<< " " << rhs.hazptr_ << " " << rhs.domain_);
if (!HAZPTR_ONE_DOMAIN) {
std::swap(this->domain_, rhs.domain_);
}
std::swap(this->hazptr_, rhs.hazptr_);
}
FOLLY_ALWAYS_INLINE void swap(hazptr_holder& lhs, hazptr_holder& rhs) noexcept {
lhs.swap(rhs);
}
/**
* hazptr_array
*/
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_array<M>::hazptr_array() {
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
if (HAZPTR_TC) {
auto ptc = hazptr_tc_tls();
if (LIKELY(ptc != nullptr)) {
auto& tc = *ptc;
auto count = tc.count();
if (M <= count) {
size_t offset = count - M;
for (size_t i = 0; i < M; ++i) {
auto hprec = tc[offset + i].hprec_;
DCHECK(hprec != nullptr);
HAZPTR_DEBUG_PRINT(i << " " << &h[i]);
new (&h[i]) hazptr_holder(nullptr);
h[i].hazptr_ = hprec;
HAZPTR_DEBUG_PRINT(
i << " " << &h[i] << " " << h[i].domain_ << " " << h[i].hazptr_);
}
tc.count_ = offset;
return;
}
}
}
// slow path
for (size_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder;
HAZPTR_DEBUG_PRINT(
i << " " << &h[i] << " " << h[i].domain_ << " " << h[i].hazptr_);
}
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_array<M>::hazptr_array(
hazptr_array&& other) noexcept {
HAZPTR_DEBUG_PRINT(this << " " << M << " " << &other);
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
auto hother = reinterpret_cast<hazptr_holder*>(&other.raw_);
for (size_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder(std::move(hother[i]));
HAZPTR_DEBUG_PRINT(i << " " << &h[i] << " " << &hother[i]);
}
empty_ = other.empty_;
other.empty_ = true;
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_array<M>::hazptr_array(std::nullptr_t) noexcept {
HAZPTR_DEBUG_PRINT(this << " " << M);
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
for (size_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder(nullptr);
HAZPTR_DEBUG_PRINT(i << " " << &h[i]);
}
empty_ = true;
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_array<M>::~hazptr_array() {
if (empty_) {
return;
}
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
if (HAZPTR_TC) {
auto ptc = hazptr_tc_tls();
if (LIKELY(ptc != nullptr)) {
auto& tc = *ptc;
auto count = tc.count();
if ((M <= HAZPTR_TC_SIZE) && (count + M <= HAZPTR_TC_SIZE)) {
for (size_t i = 0; i < M; ++i) {
h[i].reset();
tc[count + i].hprec_ = h[i].hazptr_;
HAZPTR_DEBUG_PRINT(i << " " << &h[i]);
new (&h[i]) hazptr_holder(nullptr);
HAZPTR_DEBUG_PRINT(
i << " " << &h[i] << " " << h[i].domain_ << " " << h[i].hazptr_);
}
tc.count_ = count + M;
return;
}
}
}
// slow path
for (size_t i = 0; i < M; ++i) {
h[i].domain_ = &default_hazptr_domain();
h[i].~hazptr_holder();
}
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_array<M>& hazptr_array<M>::operator=(
hazptr_array&& other) noexcept {
HAZPTR_DEBUG_PRINT(this << " " << M << " " << &other);
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
for (size_t i = 0; i < M; ++i) {
h[i] = std::move(other[i]);
HAZPTR_DEBUG_PRINT(i << " " << &h[i] << " " << &other[i]);
}
empty_ = other.empty_;
other.empty_ = true;
return *this;
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_holder& hazptr_array<M>::operator[](
size_t i) noexcept {
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
DCHECK(i < M);
return h[i];
}
/**
* hazptr_local
*/
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_local<M>::hazptr_local() {
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
if (HAZPTR_TC) {
auto ptc = hazptr_tc_tls();
if (LIKELY(ptc != nullptr)) {
auto& tc = *ptc;
auto count = tc.count();
if (M <= count) {
if (kIsDebug) {
DCHECK(!tc.local_);
tc.local_ = true;
}
// Fast path
for (size_t i = 0; i < M; ++i) {
auto hprec = tc[i].hprec_;
DCHECK(hprec != nullptr);
HAZPTR_DEBUG_PRINT(i << " " << &h[i]);
new (&h[i]) hazptr_holder(nullptr);
h[i].hazptr_ = hprec;
HAZPTR_DEBUG_PRINT(
i << " " << &h[i] << " " << h[i].domain_ << " " << h[i].hazptr_);
}
return;
}
}
}
// Slow path
slow_path_ = true;
for (size_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder;
HAZPTR_DEBUG_PRINT(
i << " " << &h[i] << " " << h[i].domain_ << " " << h[i].hazptr_);
}
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_local<M>::~hazptr_local() {
if (LIKELY(!slow_path_)) {
if (kIsDebug) {
auto ptc = hazptr_tc_tls();
DCHECK(ptc != nullptr);
auto& tc = *ptc;
DCHECK(tc.local_);
tc.local_ = false;
}
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
for (size_t i = 0; i < M; ++i) {
h[i].reset();
}
return;
}
// Slow path
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
for (size_t i = 0; i < M; ++i) {
h[i].~hazptr_holder();
}
}
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_holder& hazptr_local<M>::operator[](
size_t i) noexcept {
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
DCHECK(i < M);
return h[i];
}
////////////////////////////////////////////////////////////////////////////////
// [TODO]:
// - Control of reclamation (when and by whom)
// - End-to-end lock-free implementation
/** Definition of default_hazptr_domain() */
FOLLY_ALWAYS_INLINE hazptr_domain& default_hazptr_domain() {
HAZPTR_DEBUG_PRINT(&default_domain_);
return default_domain_;
}
template <typename T, typename D>
FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
default_hazptr_domain().retire(obj, std::move(reclaim));
}
inline void hazptr_cleanup(hazptr_domain& domain) {
domain.cleanup();
}
/** hazptr_rec */
FOLLY_ALWAYS_INLINE void hazptr_rec::set(const void* p) noexcept {
HAZPTR_DEBUG_PRINT(this << " " << p);
hazptr_.store(p, std::memory_order_release);
}
inline const void* hazptr_rec::get() const noexcept {
auto p = hazptr_.load(std::memory_order_acquire);
HAZPTR_DEBUG_PRINT(this << " " << p);
return p;
}
FOLLY_ALWAYS_INLINE void hazptr_rec::clear() noexcept {
HAZPTR_DEBUG_PRINT(this);
hazptr_.store(nullptr, std::memory_order_release);
}
inline bool hazptr_rec::isActive() noexcept {
return active_.load(std::memory_order_acquire);
}
inline bool hazptr_rec::tryAcquire() noexcept {
bool active = isActive();
if (!active &&
active_.compare_exchange_strong(
active, true, std::memory_order_release, std::memory_order_relaxed)) {
HAZPTR_DEBUG_PRINT(this);
return true;
}
return false;
}
inline void hazptr_rec::release() noexcept {
HAZPTR_DEBUG_PRINT(this);
active_.store(false, std::memory_order_release);
}
/** hazptr_obj */
inline const void* hazptr_obj::getObjPtr() const {
HAZPTR_DEBUG_PRINT(this);
return this;
}
/** hazptr_domain */
template <typename T, typename D>
void hazptr_domain::retire(T* obj, D reclaim) {
struct hazptr_retire_node : hazptr_obj {
std::unique_ptr<T, D> obj_;
hazptr_retire_node(T* retireObj, D toReclaim)
: obj_{retireObj, std::move(toReclaim)} {}
};
auto node = new hazptr_retire_node(obj, std::move(reclaim));
node->reclaim_ = [](hazptr_obj* p) {
delete static_cast<hazptr_retire_node*>(p);
};
objRetire(node);
}
inline hazptr_domain::~hazptr_domain() {
HAZPTR_DEBUG_PRINT(this);
{ /* reclaim all remaining retired objects */
hazptr_obj* next;
auto retired = retired_.exchange(nullptr);
while (retired) {
for (auto p = retired; p; p = next) {
next = p->next_;
DCHECK(p != next);
HAZPTR_DEBUG_PRINT(this << " " << p);
(*(p->reclaim_))(p);
}
retired = retired_.exchange(nullptr);
}
}
/* Leak the data for the default domain to avoid destruction order
* issues with thread caches.
*/
if (this != &default_hazptr_domain()) {
/* free all hazptr_rec-s */
hazptr_rec* next;
for (auto p = hazptrs_.load(std::memory_order_acquire); p; p = next) {
next = p->next_;
DCHECK(!p->isActive());
mr_->deallocate(static_cast<void*>(p), sizeof(hazptr_rec));
}
}
}
inline void hazptr_domain::tryTimedCleanup() {
uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
auto prevtime = syncTime_.load(std::memory_order_relaxed);
if (time < prevtime ||
!syncTime_.compare_exchange_strong(
prevtime, time + syncTimePeriod_, std::memory_order_relaxed)) {
return;
}
cleanup();
}
inline void hazptr_domain::cleanup() {
hazptr_obj* h = nullptr;
hazptr_obj* t = nullptr;
for (hazptr_tls_globals_& tls : PrivList::accessAllThreads()) {
tls.priv.collect(h, t);
}
if (h) {
DCHECK(t);
pushRetired(h, t, 0);
}
bulkReclaim();
}
inline hazptr_rec* hazptr_domain::hazptrAcquire() {
hazptr_rec* p;
hazptr_rec* next;
for (p = hazptrs_.load(std::memory_order_acquire); p; p = next) {
next = p->next_;
if (p->tryAcquire()) {
return p;
}
}
p = static_cast<hazptr_rec*>(mr_->allocate(sizeof(hazptr_rec)));
HAZPTR_DEBUG_PRINT(this << " " << p << " " << sizeof(hazptr_rec));
if (p == nullptr) {
return nullptr;
}
p->active_.store(true, std::memory_order_relaxed);
p->next_ = hazptrs_.load(std::memory_order_acquire);
while (!hazptrs_.compare_exchange_weak(
p->next_, p, std::memory_order_release, std::memory_order_acquire)) {
/* keep trying */;
}
auto hcount = hcount_.fetch_add(1);
HAZPTR_DEBUG_PRINT(
this << " " << p << " " << sizeof(hazptr_rec) << " " << hcount);
return p;
}
inline void hazptr_domain::hazptrRelease(hazptr_rec* p) noexcept {
HAZPTR_DEBUG_PRINT(this << " " << p);
p->release();
}
inline int
hazptr_domain::pushRetired(hazptr_obj* head, hazptr_obj* tail, int count) {
/*** Full fence ***/ hazptr_mb::light();
tail->next_ = retired_.load(std::memory_order_acquire);
while (!retired_.compare_exchange_weak(
tail->next_,
head,
std::memory_order_release,
std::memory_order_acquire)) {
}
return rcount_.fetch_add(count) + count;
}
inline bool hazptr_domain::reachedThreshold(int rcount) {
return (
rcount >= HAZPTR_SCAN_THRESHOLD &&
rcount >= HAZPTR_SCAN_MULT * hcount_.load(std::memory_order_acquire));
}
inline void hazptr_domain::objRetire(hazptr_obj* p) {
auto rcount = pushRetired(p, p, 1);
if (reachedThreshold(rcount)) {
tryBulkReclaim();
}
}
inline void hazptr_domain::tryBulkReclaim() {
HAZPTR_DEBUG_PRINT(this);
do {
auto hcount = hcount_.load(std::memory_order_acquire);
auto rcount = rcount_.load(std::memory_order_acquire);
if (rcount < HAZPTR_SCAN_THRESHOLD || rcount < HAZPTR_SCAN_MULT * hcount) {
return;
}
if (rcount_.compare_exchange_weak(
rcount, 0, std::memory_order_release, std::memory_order_relaxed)) {
break;
}
} while (true);
bulkReclaim();
}
inline void hazptr_domain::bulkReclaim() {
HAZPTR_DEBUG_PRINT(this);
/*** Full fence ***/ hazptr_mb::heavy();
auto p = retired_.exchange(nullptr, std::memory_order_acquire);
auto h = hazptrs_.load(std::memory_order_acquire);
std::unordered_set<const void*> hs; // TODO lock-free alternative
for (; h; h = h->next_) {
hs.insert(h->get());
}
int rcount = 0;
hazptr_obj* retired = nullptr;
hazptr_obj* tail = nullptr;
hazptr_obj* next;
for (; p; p = next) {
next = p->next_;
DCHECK(p != next);
if (hs.count(p->getObjPtr()) == 0) {
HAZPTR_DEBUG_PRINT(this << " " << p);
(*(p->reclaim_))(p);
} else {
p->next_ = retired;
retired = p;
if (tail == nullptr) {
tail = p;
}
++rcount;
}
}
if (tail) {
pushRetired(retired, tail, rcount);
}
}
/** hazptr_stats */
class hazptr_stats {
public:
~hazptr_stats();
void light();
void heavy();
void seq_cst();
private:
std::atomic<uint64_t> light_{0};
std::atomic<uint64_t> heavy_{0};
std::atomic<uint64_t> seq_cst_{0};
};
extern hazptr_stats hazptr_stats_;
inline hazptr_stats::~hazptr_stats() {
HAZPTR_DEBUG_PRINT(this << " light " << light_.load());
HAZPTR_DEBUG_PRINT(this << " heavy " << heavy_.load());
HAZPTR_DEBUG_PRINT(this << " seq_cst " << seq_cst_.load());
}
FOLLY_ALWAYS_INLINE void hazptr_stats::light() {
if (HAZPTR_STATS) {
/* atomic */ ++light_;
}
}
inline void hazptr_stats::heavy() {
if (HAZPTR_STATS) {
/* atomic */ ++heavy_;
}
}
inline void hazptr_stats::seq_cst() {
if (HAZPTR_STATS) {
/* atomic */ ++seq_cst_;
}
}
/** hazptr_mb */
FOLLY_ALWAYS_INLINE void hazptr_mb::light() {
HAZPTR_DEBUG_PRINT("");
if (HAZPTR_AMB) {
folly::asymmetricLightBarrier();
INC_HAZPTR_STATS(light);
} else {
atomic_thread_fence(std::memory_order_seq_cst);
INC_HAZPTR_STATS(seq_cst);
}
}
inline void hazptr_mb::heavy() {
HAZPTR_DEBUG_PRINT("");
if (HAZPTR_AMB) {
folly::asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
INC_HAZPTR_STATS(heavy);
} else {
atomic_thread_fence(std::memory_order_seq_cst);
INC_HAZPTR_STATS(seq_cst);
}
}
/**
* TLS structures
*/
/**
* hazptr_tc structures
*/
/** hazptr_tc_entry */
FOLLY_ALWAYS_INLINE void hazptr_tc_entry::fill(hazptr_rec* hprec) {
hprec_ = hprec;
HAZPTR_DEBUG_PRINT(this << " " << hprec);
}
FOLLY_ALWAYS_INLINE hazptr_rec* hazptr_tc_entry::get() {
auto hprec = hprec_;
HAZPTR_DEBUG_PRINT(this << " " << hprec);
return hprec;
}
inline void hazptr_tc_entry::evict() {
auto hprec = hprec_;
hprec->release();
HAZPTR_DEBUG_PRINT(this << " " << hprec);
}
/** hazptr_tc */
FOLLY_ALWAYS_INLINE hazptr_tc_entry& hazptr_tc::operator[](size_t i) {
DCHECK(i <= HAZPTR_TC_SIZE);
return entry_[i];
}
FOLLY_ALWAYS_INLINE hazptr_rec* hazptr_tc::get() {
if (LIKELY(count_ != 0)) {
auto hprec = entry_[--count_].get();
HAZPTR_DEBUG_PRINT(this << " " << hprec);
return hprec;
}
HAZPTR_DEBUG_PRINT(this << " nullptr");
return nullptr;
}
FOLLY_ALWAYS_INLINE bool hazptr_tc::put(hazptr_rec* hprec) {
if (LIKELY(count_ < HAZPTR_TC_SIZE)) {
entry_[count_++].fill(hprec);
HAZPTR_DEBUG_PRINT(this << " " << count_ - 1);
return true;
}
return false;
}
FOLLY_ALWAYS_INLINE size_t hazptr_tc::count() {
return count_;
}
/** hazptr_tc free functions */
FOLLY_ALWAYS_INLINE hazptr_tc* hazptr_tc_tls() {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
if (LIKELY(hazptr_tls_globals().tls_state == TLS_ALIVE)) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
return &hazptr_tls_globals().tc;
} else if (hazptr_tls_globals().tls_state == TLS_UNINITIALIZED) {
return &hazptr_tls_globals().tc;
}
return nullptr;
}
inline void hazptr_tc_init(hazptr_tc& tc) {
HAZPTR_DEBUG_PRINT(&tc);
tc.count_ = 0;
if (kIsDebug) {
tc.local_ = false;
}
}
inline void hazptr_tc_shutdown(hazptr_tc& tc) {
HAZPTR_DEBUG_PRINT(&tc);
for (size_t i = 0; i < tc.count_; ++i) {
tc.entry_[i].evict();
}
}
FOLLY_ALWAYS_INLINE hazptr_rec* hazptr_tc_try_get() {
HAZPTR_DEBUG_PRINT(TLS_UNINITIALIZED << TLS_ALIVE << TLS_DESTROYED);
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
if (LIKELY(hazptr_tls_globals().tls_state == TLS_ALIVE)) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
return hazptr_tls_globals().tc.get();
} else if (hazptr_tls_globals().tls_state == TLS_UNINITIALIZED) {
return hazptr_tls_globals().tc.get();
}
return nullptr;
}
FOLLY_ALWAYS_INLINE bool hazptr_tc_try_put(hazptr_rec* hprec) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
if (LIKELY(hazptr_tls_globals().tls_state == TLS_ALIVE)) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
return hazptr_tls_globals().tc.put(hprec);
}
return false;
}
/**
* hazptr_priv
*/
inline void hazptr_priv_init(hazptr_priv& priv) {
HAZPTR_DEBUG_PRINT(&priv);
priv.init();
}
inline void hazptr_priv_shutdown(hazptr_priv& priv) {
HAZPTR_DEBUG_PRINT(&priv);
DCHECK(priv.active());
priv.clear_active();
if (!priv.empty()) {
priv.push_all_to_domain();
}
}
inline bool hazptr_priv_try_retire(hazptr_obj* obj) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
if (hazptr_tls_globals().tls_state == TLS_ALIVE) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
hazptr_tls_globals().priv.push(obj);
return true;
} else if (hazptr_tls_globals().tls_state == TLS_UNINITIALIZED) {
HAZPTR_DEBUG_PRINT(hazptr_tls_globals().tls_state);
hazptr_tls_globals().priv.push(obj);
return true;
}
return false;
}
} // namespace hazptr
} // namespace folly
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
namespace hazptr {
FOLLY_STATIC_CTOR_PRIORITY_MAX hazptr_domain default_domain_;
hazptr_stats hazptr_stats_;
bool hazptr_tc_enabled() {
return HAZPTR_TC;
}
bool hazptr_priv_enabled() {
return HAZPTR_PRIV;
}
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define HAZPTR_H
#include <atomic>
/* Stand-in for C++17 std::pmr::memory_resource */
#include <folly/experimental/hazptr/memory_resource.h>
namespace folly {
namespace hazptr {
/** hazptr_rec: Private class that contains hazard pointers. */
class hazptr_rec;
/** hazptr_obj: Private class for objects protected by hazard pointers. */
class hazptr_obj;
/** hazptr_obj_base: Base template for objects protected by hazard pointers. */
template <typename T, typename Deleter>
class hazptr_obj_base;
/** hazptr_obj_base_refcounted:
* Base template for reference counted objects protected by hazard pointers.
*/
template <typename T, typename Deleter>
class hazptr_obj_base_refcounted;
/** hazptr_local: Optimized template for bulk construction and destruction of
* hazard pointers */
template <size_t M>
class hazptr_array;
/** hazptr_local: Optimized template for locally-used hazard pointers */
template <size_t M>
class hazptr_local;
/** hazptr_priv: Per-thread list of retired objects pushed in bulk to domain */
class hazptr_priv;
/** hazptr_domain: Class of hazard pointer domains. Each domain manages a set
* of hazard pointers and a set of retired objects. */
class hazptr_domain {
memory_resource* mr_;
std::atomic<hazptr_rec*> hazptrs_ = {nullptr};
std::atomic<hazptr_obj*> retired_ = {nullptr};
/* Using signed int for rcount_ because it may transiently be
* negative. Using signed int for all integer variables that may be
* involved in calculations related to the value of rcount_. */
std::atomic<int> hcount_ = {0};
std::atomic<int> rcount_ = {0};
static constexpr uint64_t syncTimePeriod_{2000000000}; // in ns
std::atomic<uint64_t> syncTime_{0};
public:
constexpr explicit hazptr_domain(
memory_resource* = get_default_resource()) noexcept;
~hazptr_domain();
hazptr_domain(const hazptr_domain&) = delete;
hazptr_domain(hazptr_domain&&) = delete;
hazptr_domain& operator=(const hazptr_domain&) = delete;
hazptr_domain& operator=(hazptr_domain&&) = delete;
/** Free-function retire. May allocate memory */
template <typename T, typename D = std::default_delete<T>>
void retire(T* obj, D reclaim = {});
void cleanup();
void tryTimedCleanup();
private:
friend class hazptr_holder;
template <typename, typename>
friend class hazptr_obj_base;
template <typename, typename>
friend class hazptr_obj_base_refcounted;
friend class hazptr_priv;
void objRetire(hazptr_obj*);
hazptr_rec* hazptrAcquire();
void hazptrRelease(hazptr_rec*) noexcept;
int pushRetired(hazptr_obj* head, hazptr_obj* tail, int count);
bool reachedThreshold(int rcount);
void tryBulkReclaim();
void bulkReclaim();
};
/** Get the default hazptr_domain */
hazptr_domain& default_hazptr_domain();
extern hazptr_domain default_domain_;
/** Free-function retire, that operates on the default domain */
template <typename T, typename D = std::default_delete<T>>
void hazptr_retire(T* obj, D reclaim = {});
/** hazptr_cleanup
* Reclaims all reclaimable objects retired to the domain before this call.
*/
void hazptr_cleanup(hazptr_domain& domain = default_hazptr_domain());
/** Definition of hazptr_obj */
class hazptr_obj {
friend class hazptr_domain;
template <typename, typename>
friend class hazptr_obj_base;
template <typename, typename>
friend class hazptr_obj_base_refcounted;
friend class hazptr_priv;
void (*reclaim_)(hazptr_obj*);
hazptr_obj* next_;
public:
// All constructors set next_ to this in order to catch misuse bugs like
// double retire.
hazptr_obj() noexcept : next_(this) {}
hazptr_obj(const hazptr_obj&) noexcept : next_(this) {}
hazptr_obj(hazptr_obj&&) noexcept : next_(this) {}
hazptr_obj& operator=(const hazptr_obj&) {
return *this;
}
hazptr_obj& operator=(hazptr_obj&&) {
return *this;
}
private:
void set_next(hazptr_obj* obj) {
next_ = obj;
}
void retireCheck() {
// Only for catching misuse bugs like double retire
if (next_ != this) {
retireCheckFail();
}
}
FOLLY_NOINLINE void retireCheckFail() {
CHECK_EQ(next_, this);
}
const void* getObjPtr() const;
};
/** Definition of hazptr_obj_base */
template <typename T, typename D = std::default_delete<T>>
class hazptr_obj_base : public hazptr_obj {
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(hazptr_domain& domain = default_hazptr_domain(), D reclaim = {});
private:
D deleter_;
};
/** Definition of hazptr_recounted_obj_base */
template <typename T, typename D = std::default_delete<T>>
class hazptr_obj_base_refcounted : public hazptr_obj {
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(hazptr_domain& domain = default_hazptr_domain(), D reclaim = {});
/* aquire_ref() increments the reference count
*
* acquire_ref_safe() is the same as acquire_ref() except that in
* addition the caller guarantees that the call is made in a
* thread-safe context, e.g., the object is not yet shared. This is
* just an optimization to save an atomic operation.
*
* release_ref() decrements the reference count and returns true if
* the object is safe to reclaim.
*/
void acquire_ref();
void acquire_ref_safe();
bool release_ref();
private:
void preRetire(D deleter);
std::atomic<uint32_t> refcount_{0};
D deleter_;
};
/** hazptr_holder: Class for automatic acquisition and release of
* hazard pointers, and interface for hazard pointer operations. */
class hazptr_holder {
template <size_t M>
friend class hazptr_array;
template <size_t M>
friend class hazptr_local;
public:
/* Constructor automatically acquires a hazard pointer. */
explicit hazptr_holder(hazptr_domain& domain = default_hazptr_domain());
/* Construct an empty hazptr_holder. */
// Note: This diverges from the proposal in P0233R4
explicit hazptr_holder(std::nullptr_t) noexcept;
/* Destructor automatically clears and releases the owned hazard pointer. */
~hazptr_holder();
hazptr_holder(const hazptr_holder&) = delete;
hazptr_holder& operator=(const hazptr_holder&) = delete;
// Note: This diverges from the proposal in P0233R4 which disallows
// move constructor and assignment operator.
hazptr_holder(hazptr_holder&&) noexcept;
hazptr_holder& operator=(hazptr_holder&&) noexcept;
/** Hazard pointer operations */
/* Returns a protected pointer from the source */
template <typename T>
T* get_protected(const std::atomic<T*>& src) noexcept;
/* Returns a protected pointer from the source, filtering
the protected pointer through function Func. Useful for
stealing bits of the pointer word */
template <typename T, typename Func>
T* get_protected(const std::atomic<T*>& src, Func f) noexcept;
/* Return true if successful in protecting ptr if src == ptr after
* setting the hazard pointer. Otherwise sets ptr to src. */
template <typename T>
bool try_protect(T*& ptr, const std::atomic<T*>& src) noexcept;
/* Return true if successful in protecting ptr if src == ptr after
* setting the hazard pointer, filtering the pointer through Func.
* Otherwise sets ptr to src. */
template <typename T, typename Func>
bool try_protect(T*& ptr, const std::atomic<T*>& src, Func f) noexcept;
/* Set the hazard pointer to ptr */
template <typename T>
void reset(const T* ptr) noexcept;
/* Set the hazard pointer to nullptr */
void reset(std::nullptr_t = nullptr) noexcept;
/* Swap ownership of hazard pointers between hazptr_holder-s. */
/* Note: The owned hazard pointers remain unmodified during the swap
* and continue to protect the respective objects that they were
* protecting before the swap, if any. */
void swap(hazptr_holder&) noexcept;
private:
hazptr_domain* domain_;
hazptr_rec* hazptr_;
};
void swap(hazptr_holder&, hazptr_holder&) noexcept;
using aligned_hazptr_holder = typename std::
aligned_storage<sizeof(hazptr_holder), alignof(hazptr_holder)>::type;
/**
* hazptr_array: Optimized for bulk construction and destruction of
* hazptr_holder-s.
*
* WARNING: Do not move from or to individual hazptr_holder-s.
* Only move the whole hazptr_array.
*/
template <size_t M = 1>
class hazptr_array {
static_assert(M > 0, "M must be a positive integer.");
public:
hazptr_array();
explicit hazptr_array(std::nullptr_t) noexcept;
hazptr_array(const hazptr_array&) = delete;
hazptr_array& operator=(const hazptr_array&) = delete;
hazptr_array(hazptr_array&& other) noexcept;
hazptr_array& operator=(hazptr_array&& other) noexcept;
~hazptr_array();
hazptr_holder& operator[](size_t i) noexcept;
private:
aligned_hazptr_holder raw_[M];
bool empty_{false};
};
/**
* hazptr_local: Optimized for construction and destruction of
* one or more hazptr_holder-s with local scope.
*
* WARNING 1: Do not move from or to individual hazptr_holder-s.
*
* WARNING 2: There can only be one hazptr_local active for the same
* thread at any time. This is not tracked and checked by the
* implementation because it would negate the performance gains of
* this class.
*/
template <size_t M = 1>
class hazptr_local {
static_assert(M > 0, "M must be a positive integer.");
public:
hazptr_local();
hazptr_local(const hazptr_local&) = delete;
hazptr_local& operator=(const hazptr_local&) = delete;
hazptr_local(hazptr_local&&) = delete;
hazptr_local& operator=(hazptr_local&&) = delete;
~hazptr_local();
hazptr_holder& operator[](size_t i) noexcept;
private:
aligned_hazptr_holder raw_[M];
bool slow_path_{false};
};
} // namespace hazptr
} // namespace folly
#include <folly/experimental/hazptr/hazptr-impl.h>
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/hazptr/memory_resource.h>
namespace folly {
namespace hazptr {
namespace {
memory_resource** default_mr_ptr() {
/* library-local */ static memory_resource* default_mr =
new_delete_resource();
HAZPTR_DEBUG_PRINT(&default_mr << " " << default_mr);
return &default_mr;
}
} // namespace
memory_resource* get_default_resource() {
HAZPTR_DEBUG_PRINT("");
return *default_mr_ptr();
}
void set_default_resource(memory_resource* mr) {
HAZPTR_DEBUG_PRINT("");
*default_mr_ptr() = mr;
}
memory_resource* new_delete_resource() {
class new_delete : public memory_resource {
public:
void* allocate(const size_t bytes, const size_t alignment = max_align_v)
override {
(void)alignment;
void* p = static_cast<void*>(new char[bytes]);
HAZPTR_DEBUG_PRINT(this << " " << p << " " << bytes);
return p;
}
void deallocate(
void* p,
const size_t bytes,
const size_t alignment = max_align_v) override {
(void)alignment;
(void)bytes;
HAZPTR_DEBUG_PRINT(p << " " << bytes);
delete[] static_cast<char*>(p);
}
};
static new_delete mr;
return &mr;
}
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
////////////////////////////////////////////////////////////////////////////////
/// Disclaimer: This is intended only as a partial stand-in for
/// std::pmr::memory_resource (C++17) as needed for developing a
/// hazptr prototype.
////////////////////////////////////////////////////////////////////////////////
#include <memory>
#include <folly/Portability.h>
#include <folly/lang/Align.h>
namespace folly {
namespace hazptr {
class memory_resource {
public:
virtual ~memory_resource() = default;
virtual void* allocate(
const size_t bytes,
const size_t alignment = max_align_v) = 0;
virtual void deallocate(
void* p,
const size_t bytes,
const size_t alignment = max_align_v) = 0;
};
memory_resource* get_default_resource();
void set_default_resource(memory_resource*);
memory_resource* new_delete_resource();
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define HAZPTR_STATS true
#define HAZPTR_SCAN_THRESHOLD 10
#include <folly/experimental/hazptr/hazptr.h>
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/example/LockFreeLIFO.h>
#include <folly/experimental/hazptr/example/MWMRSet.h>
#include <folly/experimental/hazptr/example/SWMRList.h>
#include <folly/experimental/hazptr/example/WideCAS.h>
#include <folly/experimental/hazptr/test/HazptrUse1.h>
#include <folly/experimental/hazptr/test/HazptrUse2.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <condition_variable>
#include <thread>
DEFINE_int32(num_threads, 5, "Number of threads");
DEFINE_int64(num_reps, 1, "Number of test reps");
DEFINE_int64(num_ops, 1007, "Number of ops or pairs of ops per rep");
using namespace folly::hazptr;
class HazptrTest : public testing::Test {
public:
HazptrTest() : Test() {
HAZPTR_DEBUG_PRINT("========== start of test scope");
}
~HazptrTest() override {
HAZPTR_DEBUG_PRINT("========== end of test scope");
}
};
TEST_F(HazptrTest, Test1) {
HAZPTR_DEBUG_PRINT("");
Node1* node0 = (Node1*)malloc(sizeof(Node1));
node0 = new (node0) Node1;
HAZPTR_DEBUG_PRINT("=== malloc node0 " << node0 << " " << sizeof(*node0));
Node1* node1 = (Node1*)malloc(sizeof(Node1));
node1 = new (node1) Node1;
HAZPTR_DEBUG_PRINT("=== malloc node1 " << node1 << " " << sizeof(*node1));
Node1* node2 = (Node1*)malloc(sizeof(Node1));
node2 = new (node2) Node1;
HAZPTR_DEBUG_PRINT("=== malloc node2 " << node2 << " " << sizeof(*node2));
Node1* node3 = (Node1*)malloc(sizeof(Node1));
node3 = new (node3) Node1;
HAZPTR_DEBUG_PRINT("=== malloc node3 " << node3 << " " << sizeof(*node3));
HAZPTR_DEBUG_PRINT("");
std::atomic<Node1*> shared0 = {node0};
std::atomic<Node1*> shared1 = {node1};
std::atomic<Node1*> shared2 = {node2};
std::atomic<Node1*> shared3 = {node3};
MyMemoryResource myMr;
HAZPTR_DEBUG_PRINT("=== myMr " << &myMr);
hazptr_domain myDomain0;
HAZPTR_DEBUG_PRINT("=== myDomain0 " << &myDomain0);
hazptr_domain myDomain1(&myMr);
HAZPTR_DEBUG_PRINT("=== myDomain1 " << &myDomain1);
HAZPTR_DEBUG_PRINT("");
HAZPTR_DEBUG_PRINT("=== hptr0");
hazptr_holder hptr0;
HAZPTR_DEBUG_PRINT("=== hptr1");
hazptr_holder hptr1(myDomain0);
HAZPTR_DEBUG_PRINT("=== hptr2");
hazptr_holder hptr2(myDomain1);
HAZPTR_DEBUG_PRINT("=== hptr3");
hazptr_holder hptr3;
HAZPTR_DEBUG_PRINT("");
Node1* n0 = shared0.load();
Node1* n1 = shared1.load();
Node1* n2 = shared2.load();
Node1* n3 = shared3.load();
CHECK(hptr0.try_protect(n0, shared0));
CHECK(hptr1.try_protect(n1, shared1));
hptr1.reset();
hptr1.reset(nullptr);
hptr1.reset(n2);
CHECK(hptr2.try_protect(n3, shared3));
swap(hptr1, hptr2);
hptr3.reset();
HAZPTR_DEBUG_PRINT("");
HAZPTR_DEBUG_PRINT("=== retire n0 " << n0);
n0->retire();
HAZPTR_DEBUG_PRINT("=== retire n1 " << n1);
n1->retire(default_hazptr_domain());
HAZPTR_DEBUG_PRINT("=== retire n2 " << n2);
n2->retire(myDomain0);
HAZPTR_DEBUG_PRINT("=== retire n3 " << n3);
n3->retire(myDomain1);
}
TEST_F(HazptrTest, Test2) {
Node2* node0 = new Node2;
HAZPTR_DEBUG_PRINT("=== new node0 " << node0 << " " << sizeof(*node0));
Node2* node1 = (Node2*)malloc(sizeof(Node2));
node1 = new (node1) Node2;
HAZPTR_DEBUG_PRINT("=== malloc node1 " << node1 << " " << sizeof(*node1));
Node2* node2 = (Node2*)malloc(sizeof(Node2));
node2 = new (node2) Node2;
HAZPTR_DEBUG_PRINT("=== malloc node2 " << node2 << " " << sizeof(*node2));
Node2* node3 = (Node2*)malloc(sizeof(Node2));
node3 = new (node3) Node2;
HAZPTR_DEBUG_PRINT("=== malloc node3 " << node3 << " " << sizeof(*node3));
HAZPTR_DEBUG_PRINT("");
std::atomic<Node2*> shared0 = {node0};
std::atomic<Node2*> shared1 = {node1};
std::atomic<Node2*> shared2 = {node2};
std::atomic<Node2*> shared3 = {node3};
MineMemoryResource mineMr;
HAZPTR_DEBUG_PRINT("=== mineMr " << &mineMr);
hazptr_domain mineDomain0;
HAZPTR_DEBUG_PRINT("=== mineDomain0 " << &mineDomain0);
hazptr_domain mineDomain1(&mineMr);
HAZPTR_DEBUG_PRINT("=== mineDomain1 " << &mineDomain1);
HAZPTR_DEBUG_PRINT("");
HAZPTR_DEBUG_PRINT("=== hptr0");
hazptr_holder hptr0;
HAZPTR_DEBUG_PRINT("=== hptr1");
hazptr_holder hptr1(mineDomain0);
HAZPTR_DEBUG_PRINT("=== hptr2");
hazptr_holder hptr2(mineDomain1);
HAZPTR_DEBUG_PRINT("=== hptr3");
hazptr_holder hptr3;
HAZPTR_DEBUG_PRINT("");
Node2* n0 = shared0.load();
Node2* n1 = shared1.load();
Node2* n2 = shared2.load();
Node2* n3 = shared3.load();
CHECK(hptr0.try_protect(n0, shared0));
CHECK(hptr1.try_protect(n1, shared1));
hptr1.reset();
hptr1.reset(n2);
CHECK(hptr2.try_protect(n3, shared3));
swap(hptr1, hptr2);
hptr3.reset();
HAZPTR_DEBUG_PRINT("");
HAZPTR_DEBUG_PRINT("=== retire n0 " << n0);
n0->retire(default_hazptr_domain(), &mineReclaimFnDelete);
HAZPTR_DEBUG_PRINT("=== retire n1 " << n1);
n1->retire(default_hazptr_domain(), &mineReclaimFnFree);
HAZPTR_DEBUG_PRINT("=== retire n2 " << n2);
n2->retire(mineDomain0, &mineReclaimFnFree);
HAZPTR_DEBUG_PRINT("=== retire n3 " << n3);
n3->retire(mineDomain1, &mineReclaimFnFree);
}
TEST_F(HazptrTest, LIFO) {
using T = uint32_t;
CHECK_GT(FLAGS_num_threads, 0);
for (int i = 0; i < FLAGS_num_reps; ++i) {
HAZPTR_DEBUG_PRINT("========== start of rep scope");
LockFreeLIFO<T> s;
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = std::thread([&s, tid]() {
for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
s.push(j);
T res;
while (!s.pop(res)) {
/* keep trying */
}
}
});
}
for (auto& t : threads) {
t.join();
}
HAZPTR_DEBUG_PRINT("========== end of rep scope");
}
}
TEST_F(HazptrTest, SWMRLIST) {
using T = uint64_t;
CHECK_GT(FLAGS_num_threads, 0);
for (int i = 0; i < FLAGS_num_reps; ++i) {
HAZPTR_DEBUG_PRINT("========== start of rep scope");
SWMRListSet<T> s;
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = std::thread([&s, tid]() {
for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
s.contains(j);
}
});
}
for (int j = 0; j < 10; ++j) {
s.add(j);
}
for (int j = 0; j < 10; ++j) {
s.remove(j);
}
for (auto& t : threads) {
t.join();
}
HAZPTR_DEBUG_PRINT("========== end of rep scope");
}
}
TEST_F(HazptrTest, MWMRSet) {
using T = uint64_t;
CHECK_GT(FLAGS_num_threads, 0);
for (int i = 0; i < FLAGS_num_reps; ++i) {
HAZPTR_DEBUG_PRINT("========== start of rep scope");
MWMRListSet<T> s;
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = std::thread([&s, tid]() {
for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
s.contains(j);
s.add(j);
s.remove(j);
}
});
}
for (int j = 0; j < 10; ++j) {
s.add(j);
}
for (int j = 0; j < 10; ++j) {
s.remove(j);
}
for (auto& t : threads) {
t.join();
}
HAZPTR_DEBUG_PRINT("========== end of rep scope");
}
}
TEST_F(HazptrTest, WIDECAS) {
WideCAS s;
std::string u = "";
std::string v = "11112222";
auto ret = s.cas(u, v);
CHECK(ret);
u = "";
v = "11112222";
ret = s.cas(u, v);
CHECK(!ret);
u = "11112222";
v = "22223333";
ret = s.cas(u, v);
CHECK(ret);
u = "22223333";
v = "333344445555";
ret = s.cas(u, v);
CHECK(ret);
}
TEST_F(HazptrTest, VirtualTest) {
struct Thing : public hazptr_obj_base<Thing> {
virtual ~Thing() {
HAZPTR_DEBUG_PRINT("this: " << this << " &a: " << &a << " a: " << a);
}
int a;
};
for (int i = 0; i < 100; i++) {
auto bar = new Thing;
bar->a = i;
hazptr_holder hptr;
hptr.reset(bar);
bar->retire();
EXPECT_EQ(bar->a, i);
}
}
void destructionTest(hazptr_domain& domain) {
struct Thing : public hazptr_obj_base<Thing> {
Thing* next;
hazptr_domain* domain;
int val;
Thing(int v, Thing* n, hazptr_domain* d) : next(n), domain(d), val(v) {}
~Thing() {
HAZPTR_DEBUG_PRINT(
"this: " << this << " val: " << val << " next: " << next);
if (next) {
next->retire(*domain);
}
}
};
Thing* last{nullptr};
for (int i = 0; i < 2000; i++) {
last = new Thing(i, last, &domain);
}
last->retire(domain);
}
TEST_F(HazptrTest, DestructionTest) {
{
hazptr_domain myDomain0;
destructionTest(myDomain0);
}
destructionTest(default_hazptr_domain());
}
TEST_F(HazptrTest, Move) {
struct Foo : hazptr_obj_base<Foo> {
int a;
};
for (int i = 0; i < 100; ++i) {
Foo* x = new Foo;
x->a = i;
hazptr_holder hptr0;
// Protect object
hptr0.reset(x);
// Retire object
x->retire();
// Move constructor - still protected
hazptr_holder hptr1(std::move(hptr0));
// Self move is no-op - still protected
hazptr_holder* phptr1 = &hptr1;
CHECK_EQ(phptr1, &hptr1);
hptr1 = std::move(*phptr1);
// Empty constructor
hazptr_holder hptr2(nullptr);
// Move assignment - still protected
hptr2 = std::move(hptr1);
// Access object
CHECK_EQ(x->a, i);
// Unprotect object - hptr2 is nonempty
hptr2.reset();
}
}
TEST_F(HazptrTest, Array) {
struct Foo : hazptr_obj_base<Foo> {
int a;
};
for (int i = 0; i < 100; ++i) {
Foo* x = new Foo;
x->a = i;
hazptr_array<10> hptr;
// Protect object
hptr[9].reset(x);
// Empty array
hazptr_array<10> h(nullptr);
// Move assignment
h = std::move(hptr);
// Retire object
x->retire();
// Unprotect object - hptr2 is nonempty
h[9].reset();
}
{
// Abnormal case
hazptr_array<HAZPTR_TC_SIZE + 1> h;
hazptr_array<HAZPTR_TC_SIZE + 1> h2(std::move(h));
}
}
TEST_F(HazptrTest, ArrayDtorWithoutSpaceInTCache) {
struct Foo : hazptr_obj_base<Foo> {
int a;
};
{
// Fill the thread cache
hazptr_array<HAZPTR_TC_SIZE> w;
}
{
// Emty array x
hazptr_array<HAZPTR_TC_SIZE> x(nullptr);
{
// y ctor gets elements from the thread cache filled by w dtor.
hazptr_array<HAZPTR_TC_SIZE> y;
// z ctor gets elements from the default domain.
hazptr_array<HAZPTR_TC_SIZE> z;
// Elements of y are moved to x.
x = std::move(y);
// z dtor fills the thread cache.
}
// x dtor finds the thread cache full. It has to call
// ~hazptr_holder() for each of its elements, which were
// previously taken from the thread cache by y ctor.
}
}
TEST_F(HazptrTest, Local) {
struct Foo : hazptr_obj_base<Foo> {
int a;
};
for (int i = 0; i < 100; ++i) {
Foo* x = new Foo;
x->a = i;
hazptr_local<10> hptr;
// Protect object
hptr[9].reset(x);
// Retire object
x->retire();
// Unprotect object - hptr2 is nonempty
hptr[9].reset();
}
{
// Abnormal case
hazptr_local<HAZPTR_TC_SIZE + 1> h;
}
}
/* Test ref counting */
std::atomic<int> constructed;
std::atomic<int> destroyed;
struct Foo : hazptr_obj_base_refcounted<Foo> {
int val_;
bool marked_;
Foo* next_;
Foo(int v, Foo* n) : val_(v), marked_(false), next_(n) {
HAZPTR_DEBUG_PRINT("");
constructed.fetch_add(1);
}
~Foo() {
HAZPTR_DEBUG_PRINT("");
destroyed.fetch_add(1);
if (marked_) {
return;
}
auto next = next_;
while (next) {
if (!next->release_ref()) {
return;
}
auto p = next;
next = p->next_;
p->marked_ = true;
delete p;
}
}
};
struct Dummy : hazptr_obj_base<Dummy> {};
TEST_F(HazptrTest, basic_refcount) {
constructed.store(0);
destroyed.store(0);
Foo* p = nullptr;
int num = 20;
for (int i = 0; i < num; ++i) {
p = new Foo(i, p);
if (i & 1) {
p->acquire_ref_safe();
} else {
p->acquire_ref();
}
}
hazptr_holder hptr;
hptr.reset(p);
for (auto q = p->next_; q; q = q->next_) {
q->retire();
}
int v = num;
for (auto q = p; q; q = q->next_) {
CHECK_GT(v, 0);
--v;
CHECK_EQ(q->val_, v);
}
CHECK(!p->release_ref());
CHECK_EQ(constructed.load(), num);
CHECK_EQ(destroyed.load(), 0);
p->retire();
CHECK_EQ(constructed.load(), num);
CHECK_EQ(destroyed.load(), 0);
hptr.reset();
/* retire enough objects to guarantee reclamation of Foo objects */
for (int i = 0; i < 100; ++i) {
auto a = new Dummy;
a->retire();
}
CHECK_EQ(constructed.load(), num);
CHECK_EQ(destroyed.load(), num);
}
TEST_F(HazptrTest, mt_refcount) {
constructed.store(0);
destroyed.store(0);
std::atomic<bool> ready(false);
std::atomic<int> setHazptrs(0);
std::atomic<Foo*> head{nullptr};
int num = 20;
int nthr = 10;
std::vector<std::thread> thr(nthr);
for (int i = 0; i < nthr; ++i) {
thr[i] = std::thread([&] {
while (!ready.load()) {
/* spin */
}
hazptr_holder hptr;
auto p = hptr.get_protected(head);
++setHazptrs;
/* Concurrent with removal */
int v = num;
for (auto q = p; q; q = q->next_) {
CHECK_GT(v, 0);
--v;
CHECK_EQ(q->val_, v);
}
CHECK_EQ(v, 0);
});
}
Foo* p = nullptr;
for (int i = 0; i < num; ++i) {
p = new Foo(i, p);
p->acquire_ref_safe();
}
head.store(p);
ready.store(true);
while (setHazptrs.load() < nthr) {
/* spin */
}
/* this is concurrent with traversal by reader */
head.store(nullptr);
for (auto q = p; q; q = q->next_) {
q->retire();
}
HAZPTR_DEBUG_PRINT("Foo should not be destroyed");
CHECK_EQ(constructed.load(), num);
CHECK_EQ(destroyed.load(), 0);
HAZPTR_DEBUG_PRINT("Foo may be destroyed after releasing the last reference");
if (p->release_ref()) {
delete p;
}
/* retire enough objects to guarantee reclamation of Foo objects */
for (int i = 0; i < 100; ++i) {
auto a = new Dummy;
a->retire();
}
for (int i = 0; i < nthr; ++i) {
thr[i].join();
}
CHECK_EQ(constructed.load(), num);
CHECK_EQ(destroyed.load(), num);
}
TEST_F(HazptrTest, FreeFunctionRetire) {
auto foo = new int;
hazptr_retire(foo);
auto foo2 = new int;
hazptr_retire(foo2, [](int* obj) { delete obj; });
bool retired = false;
{
hazptr_domain myDomain0;
struct delret {
bool* retired_;
delret(bool* retire) : retired_(retire) {}
~delret() {
*retired_ = true;
}
};
auto foo3 = new delret(&retired);
myDomain0.retire(foo3);
}
EXPECT_TRUE(retired);
}
TEST_F(HazptrTest, FreeFunctionCleanup) {
CHECK_GT(FLAGS_num_threads, 0);
int threadOps = 1007;
int mainOps = 19;
constructed.store(0);
destroyed.store(0);
std::atomic<int> threadsDone{0};
std::atomic<bool> mainDone{false};
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = std::thread([&, tid]() {
for (int j = tid; j < threadOps; j += FLAGS_num_threads) {
auto p = new Foo(j, nullptr);
p->retire();
}
threadsDone.fetch_add(1);
while (!mainDone.load()) {
/* spin */;
}
});
}
{ // include the main thread in the test
for (int i = 0; i < mainOps; ++i) {
auto p = new Foo(0, nullptr);
p->retire();
}
}
while (threadsDone.load() < FLAGS_num_threads) {
/* spin */;
}
CHECK_EQ(constructed.load(), threadOps + mainOps);
hazptr_cleanup();
CHECK_EQ(destroyed.load(), threadOps + mainOps);
mainDone.store(true);
for (auto& t : threads) {
t.join();
}
{ // Cleanup after using array
constructed.store(0);
destroyed.store(0);
{ hazptr_array<2> h; }
{
hazptr_array<2> h;
auto p0 = new Foo(0, nullptr);
auto p1 = new Foo(0, nullptr);
h[0].reset(p0);
h[1].reset(p1);
p0->retire();
p1->retire();
}
CHECK_EQ(constructed.load(), 2);
hazptr_cleanup();
CHECK_EQ(destroyed.load(), 2);
}
{ // Cleanup after using local
constructed.store(0);
destroyed.store(0);
{ hazptr_local<2> h; }
{
hazptr_local<2> h;
auto p0 = new Foo(0, nullptr);
auto p1 = new Foo(0, nullptr);
h[0].reset(p0);
h[1].reset(p1);
p0->retire();
p1->retire();
}
CHECK_EQ(constructed.load(), 2);
hazptr_cleanup();
CHECK_EQ(destroyed.load(), 2);
}
}
TEST_F(HazptrTest, ForkTest) {
struct Obj : hazptr_obj_base<Obj> {
int a;
};
std::mutex m;
std::condition_variable cv;
std::condition_variable cv2;
bool ready = false;
bool ready2 = false;
auto mkthread = [&]() {
hazptr_holder h;
auto p = new Obj;
std::atomic<Obj*> ap{p};
h.get_protected<Obj>(p);
p->retire();
{
std::unique_lock<std::mutex> lk(m);
ready = true;
cv.notify_one();
cv2.wait(lk, [&] { return ready2; });
}
};
std::thread t(mkthread);
hazptr_holder h;
auto p = new Obj;
std::atomic<Obj*> ap{p};
h.get_protected<Obj>(p);
p->retire();
{
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [&] { return ready; });
}
auto pid = fork();
CHECK_GE(pid, 0);
if (pid) {
{
std::lock_guard<std::mutex> g(m);
ready2 = true;
cv2.notify_one();
}
t.join();
int status;
wait(&status);
CHECK_EQ(status, 0);
} else {
// child
std::thread tchild(mkthread);
{
std::lock_guard<std::mutex> g(m);
ready2 = true;
cv2.notify_one();
}
tchild.join();
_exit(0); // Do not print gtest results
}
}
TEST_F(HazptrTest, CopyAndMoveTest) {
struct Obj : hazptr_obj_base<Obj> {
int a;
};
auto p1 = new Obj();
auto p2 = new Obj(*p1);
p1->retire();
p2->retire();
p1 = new Obj();
p2 = new Obj(std::move(*p1));
p1->retire();
p2->retire();
p1 = new Obj();
p2 = new Obj();
*p2 = *p1;
p1->retire();
p2->retire();
p1 = new Obj();
p2 = new Obj();
*p2 = std::move(*p1);
p1->retire();
p2->retire();
}
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
namespace hazptr {
class MyMemoryResource : public memory_resource {
public:
void* allocate(const size_t sz, const size_t /* align */) override {
void* p = malloc(sz);
HAZPTR_DEBUG_PRINT(p << " " << sz);
return p;
}
void deallocate(void* p, const size_t sz, const size_t /* align */) override {
HAZPTR_DEBUG_PRINT(p << " " << sz);
free(p);
}
};
template <typename Node1>
struct MyReclaimerFree {
inline void operator()(Node1* p) {
HAZPTR_DEBUG_PRINT(p << " " << sizeof(Node1));
free(p);
}
};
class Node1 : public hazptr_obj_base<Node1, MyReclaimerFree<Node1>> {
char a[100];
};
} // namespace hazptr
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/experimental/hazptr/debug.h>
#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
namespace hazptr {
class MineMemoryResource : public memory_resource {
public:
void* allocate(const size_t sz, const size_t /* align */) override {
void* p = malloc(sz);
HAZPTR_DEBUG_PRINT(p << " " << sz);
return p;
}
void deallocate(void* p, const size_t sz, const size_t /* align */) override {
HAZPTR_DEBUG_PRINT(p << " " << sz);
free(p);
}
};
class Node2 : public hazptr_obj_base<Node2, void (*)(Node2*)> {
char a[200];
};
inline void mineReclaimFnFree(Node2* p) {
HAZPTR_DEBUG_PRINT(p << " " << sizeof(Node2));
free(p);
}
inline void mineReclaimFnDelete(Node2* p) {
HAZPTR_DEBUG_PRINT(p << " " << sizeof(Node2));
delete p;
}
} // namespace hazptr
} // namespace folly
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment