Commit 40a62d63 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

UnboundedQueue: Make linking next, advancing tail, and advancing head wait-free

Summary:
Avoid spin-waiting for threads that may be descheduled by making steps that used to require waiting for action by a specific thread wait-free. The changed steps are:
- Allocating and linking the next segment
- Advancing tail to the next segment
- Advancing head to the next segment

For SPSC, the consumer still must wait for the producer to advance tail before it advances head.

Reviewed By: djwatson

Differential Revision: D8124279

fbshipit-source-id: 4dc9b2c1f333a71e7283175e0f9c4ab2eaeb7549
parent 1df0ebca
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <folly/lang/Align.h> #include <folly/lang/Align.h>
#include <folly/synchronization/Hazptr.h> #include <folly/synchronization/Hazptr.h>
#include <folly/synchronization/SaturatingSemaphore.h> #include <folly/synchronization/SaturatingSemaphore.h>
#include <folly/synchronization/WaitOptions.h>
#include <folly/synchronization/detail/Spin.h>
namespace folly { namespace folly {
...@@ -143,21 +145,28 @@ namespace folly { ...@@ -143,21 +145,28 @@ namespace folly {
/// corresponds to the producer ticket. /// corresponds to the producer ticket.
/// - Segments are organized as a singly linked list. /// - Segments are organized as a singly linked list.
/// - The producer with the first ticket in the current producer /// - The producer with the first ticket in the current producer
/// segment is solely responsible for allocating and linking the /// segment has primary responsibility for allocating and linking
/// next segment. /// the next segment. Other producers and connsumers may help do so
/// when needed if that thread is delayed.
/// - The producer with the last ticket in the current producer /// - The producer with the last ticket in the current producer
/// segment is solely responsible for advancing the tail pointer to /// segment is primarily responsible for advancing the tail pointer
/// the next segment. /// to the next segment. Other producers and consumers may help do
/// so when needed if that thread is delayed.
/// - Similarly, the consumer with the last ticket in the current /// - Similarly, the consumer with the last ticket in the current
/// consumer segment is solely responsible for advancing the head /// consumer segment is primarily responsible for advancing the head
/// pointer to the next segment. It must ensure that head never /// pointer to the next segment. Other consumers may help do so when
/// overtakes tail. /// needed if that thread is delayed.
/// - The tail pointer must not lag behind the head pointer.
/// Otherwise, the algorithm cannot be certain about the removal of
/// segment and would have to incur higher costs to ensure safe
/// reclamation. Consumers must ensure that head never overtakes
/// tail.
/// ///
/// Memory Usage: /// Memory Usage:
/// - An empty queue contains one segment. A nonempty queue contains /// - An empty queue contains one segment. A nonempty queue contains
/// one or two more segment than fits its contents. /// one or two more segment than fits its contents.
/// - Removed segments are not reclaimed until there are no threads, /// - Removed segments are not reclaimed until there are no threads,
/// producers or consumers, have references to them or their /// producers or consumers, with references to them or their
/// predecessors. That is, a lagging thread may delay the reclamation /// predecessors. That is, a lagging thread may delay the reclamation
/// of a chain of removed segments. /// of a chain of removed segments.
/// - The template parameter LgAlign can be used to reduce memory usage /// - The template parameter LgAlign can be used to reduce memory usage
...@@ -189,9 +198,6 @@ namespace folly { ...@@ -189,9 +198,6 @@ namespace folly {
/// - If the template parameter LgSegmentSize is changed, it should be /// - If the template parameter LgSegmentSize is changed, it should be
/// set adequately high to keep the amortized cost of allocation and /// set adequately high to keep the amortized cost of allocation and
/// reclamation low. /// reclamation low.
/// - Another consideration is that the queue is guaranteed to have
/// enough space for a number of consumers equal to 2^LgSegmentSize
/// for local blocking. Excess waiting consumers spin.
/// - It is recommended to measure performance with different variants /// - It is recommended to measure performance with different variants
/// when applicable, e.g., UMPMC vs UMPSC. Depending on the use /// when applicable, e.g., UMPMC vs UMPSC. Depending on the use
/// case, sometimes the variant with the higher sequential overhead /// case, sometimes the variant with the higher sequential overhead
...@@ -227,10 +233,12 @@ class UnboundedQueue { ...@@ -227,10 +233,12 @@ class UnboundedQueue {
struct Consumer { struct Consumer {
Atom<Segment*> head; Atom<Segment*> head;
Atom<Ticket> ticket; Atom<Ticket> ticket;
explicit Consumer(Segment* s) : head(s), ticket(0) {}
}; };
struct Producer { struct Producer {
Atom<Segment*> tail; Atom<Segment*> tail;
Atom<Ticket> ticket; Atom<Ticket> ticket;
explicit Producer(Segment* s) : tail(s), ticket(0) {}
}; };
alignas(Align) Consumer c_; alignas(Align) Consumer c_;
...@@ -238,13 +246,8 @@ class UnboundedQueue { ...@@ -238,13 +246,8 @@ class UnboundedQueue {
public: public:
/** constructor */ /** constructor */
UnboundedQueue() { UnboundedQueue()
setProducerTicket(0); : c_(new Segment(0)), p_(c_.head.load(std::memory_order_relaxed)) {}
setConsumerTicket(0);
Segment* s = new Segment(0);
setTail(s);
setHead(s);
}
/** destructor */ /** destructor */
~UnboundedQueue() { ~UnboundedQueue() {
...@@ -379,7 +382,7 @@ class UnboundedQueue { ...@@ -379,7 +382,7 @@ class UnboundedQueue {
Entry& e = s->entry(idx); Entry& e = s->entry(idx);
e.putItem(std::forward<Arg>(arg)); e.putItem(std::forward<Arg>(arg));
if (responsibleForAlloc(t)) { if (responsibleForAlloc(t)) {
allocNextSegment(s, t + SegmentSize); allocNextSegment(s);
} }
if (responsibleForAdvance(t)) { if (responsibleForAdvance(t)) {
advanceTail(s); advanceTail(s);
...@@ -460,10 +463,8 @@ class UnboundedQueue { ...@@ -460,10 +463,8 @@ class UnboundedQueue {
while (true) { while (true) {
Ticket t = consumerTicket(); Ticket t = consumerTicket();
if (UNLIKELY(t >= (s->minTicket() + SegmentSize))) { if (UNLIKELY(t >= (s->minTicket() + SegmentSize))) {
s = tryGetNextSegmentUntil(s, deadline); s = getAllocNextSegment(s, t);
if (s == nullptr) { DCHECK(s);
return folly::Optional<T>(); // timed out
}
continue; continue;
} }
size_t idx = index(t); size_t idx = index(t);
...@@ -513,78 +514,116 @@ class UnboundedQueue { ...@@ -513,78 +514,116 @@ class UnboundedQueue {
/** findSegment */ /** findSegment */
FOLLY_ALWAYS_INLINE FOLLY_ALWAYS_INLINE
Segment* findSegment(Segment* s, const Ticket t) const noexcept { Segment* findSegment(Segment* s, const Ticket t) noexcept {
while (UNLIKELY(t >= (s->minTicket() + SegmentSize))) { while (UNLIKELY(t >= (s->minTicket() + SegmentSize))) {
auto deadline = std::chrono::steady_clock::time_point::max(); s = getAllocNextSegment(s, t);
s = tryGetNextSegmentUntil(s, deadline); DCHECK(s);
DCHECK(s != nullptr);
} }
return s; return s;
} }
/** tryGetNextSegmentUntil */ /** getAllocNextSegment */
template <typename Clock, typename Duration> Segment* getAllocNextSegment(Segment* s, Ticket t) noexcept {
Segment* tryGetNextSegmentUntil( Segment* next = s->nextSegment();
Segment* s, if (!next) {
const std::chrono::time_point<Clock, Duration>& deadline) const noexcept { DCHECK_GE(t, s->minTicket() + SegmentSize);
// The following loop will not spin indefinitely (as long as the auto diff = t - (s->minTicket() + SegmentSize);
// number of concurrently waiting consumers does not exceeds if (diff > 0) {
// SegmentSize and the OS scheduler does not pause ready threads auto dur = std::chrono::microseconds(diff);
// indefinitely). Under such conditions, the algorithm guarantees auto deadline = std::chrono::steady_clock::now() + dur;
// that the producer reponsible for advancing the tail pointer to WaitOptions opt;
// the next segment has already acquired its ticket. opt.spin_max(dur);
while (tail() == s) { detail::spin_pause_until(
if (deadline < Clock::time_point::max() && deadline > Clock::now()) { deadline, opt, [s] { return s->nextSegment(); });
return nullptr; next = s->nextSegment();
if (next) {
return next;
}
} }
asm_volatile_pause(); next = allocNextSegment(s);
} }
Segment* next = s->nextSegment(); DCHECK(next);
DCHECK(next != nullptr);
return next; return next;
} }
/** allocNextSegment */ /** allocNextSegment */
void allocNextSegment(Segment* s, const Ticket t) { Segment* allocNextSegment(Segment* s) {
auto t = s->minTicket() + SegmentSize;
Segment* next = new Segment(t); Segment* next = new Segment(t);
if (!SPSC) { next->acquire_ref_safe(); // defined in hazptr_obj_base_linked
next->acquire_ref_safe(); // defined in hazptr_obj_base_linked if (!s->casNextSegment(next)) {
delete next;
next = s->nextSegment();
} }
DCHECK(s->nextSegment() == nullptr); DCHECK(next);
s->setNextSegment(next); return next;
} }
/** advanceTail */ /** advanceTail */
void advanceTail(Segment* s) noexcept { void advanceTail(Segment* s) noexcept {
Segment* next = s->nextSegment(); if (SPSC) {
if (!SingleProducer) { Segment* next = s->nextSegment();
// The following loop will not spin indefinitely (as long as the DCHECK(next);
// OS scheduler does not pause ready threads indefinitely). The setTail(next);
// algorithm guarantees that the producer reponsible for setting } else {
// the next pointer has already acquired its ticket. Ticket t = s->minTicket() + SegmentSize;
while (next == nullptr) { advanceTailToTicket(t);
asm_volatile_pause(); }
next = s->nextSegment(); }
/** advanceTailToTicket */
void advanceTailToTicket(Ticket t) noexcept {
Segment* s = tail();
while (s->minTicket() < t) {
Segment* next = s->nextSegment();
if (!next) {
next = allocNextSegment(s);
} }
DCHECK(next);
casTail(s, next);
s = tail();
} }
DCHECK(next != nullptr);
setTail(next);
} }
/** advanceHead */ /** advanceHead */
void advanceHead(Segment* s) noexcept { void advanceHead(Segment* s) noexcept {
auto deadline = std::chrono::steady_clock::time_point::max(); if (SPSC) {
Segment* next = tryGetNextSegmentUntil(s, deadline); while (tail() == s) {
DCHECK(next != nullptr); /* Wait for producer to advance tail. */
while (head() != s) { asm_volatile_pause();
// Wait for head to advance to the current segment first before }
// advancing head to the next segment. Otherwise, a lagging Segment* next = s->nextSegment();
// consumer responsible for advancing head from an earlier DCHECK(next);
// segment may incorrectly set head back. setHead(next);
asm_volatile_pause(); reclaimSegment(s);
} else {
Ticket t = s->minTicket() + SegmentSize;
advanceHeadToTicket(t);
}
}
/** advanceHeadToTicket */
void advanceHeadToTicket(Ticket t) noexcept {
/* Tail must not lag behind head. Otherwise, the algorithm cannot
be certain about removal of segments. */
advanceTailToTicket(t);
Segment* s = head();
if (SingleConsumer) {
DCHECK_EQ(s->minTicket() + SegmentSize, t);
Segment* next = s->nextSegment();
DCHECK(next);
setHead(next);
reclaimSegment(s);
} else {
while (s->minTicket() < t) {
Segment* next = s->nextSegment();
DCHECK(next);
if (casHead(s, next)) {
reclaimSegment(s);
s = next;
}
}
} }
setHead(next);
reclaimSegment(s);
} }
/** reclaimSegment */ /** reclaimSegment */
...@@ -625,13 +664,27 @@ class UnboundedQueue { ...@@ -625,13 +664,27 @@ class UnboundedQueue {
} }
void setHead(Segment* s) noexcept { void setHead(Segment* s) noexcept {
c_.head.store(s, std::memory_order_release); DCHECK(SingleConsumer);
c_.head.store(s, std::memory_order_relaxed);
} }
void setTail(Segment* s) noexcept { void setTail(Segment* s) noexcept {
DCHECK(SPSC);
p_.tail.store(s, std::memory_order_release); p_.tail.store(s, std::memory_order_release);
} }
bool casHead(Segment*& s, Segment* next) noexcept {
DCHECK(!SingleConsumer);
return c_.head.compare_exchange_strong(
s, next, std::memory_order_release, std::memory_order_acquire);
}
void casTail(Segment*& s, Segment* next) noexcept {
DCHECK(!SPSC);
p_.tail.compare_exchange_strong(
s, next, std::memory_order_release, std::memory_order_relaxed);
}
FOLLY_ALWAYS_INLINE void setProducerTicket(Ticket t) noexcept { FOLLY_ALWAYS_INLINE void setProducerTicket(Ticket t) noexcept {
p_.ticket.store(t, std::memory_order_release); p_.ticket.store(t, std::memory_order_release);
} }
...@@ -734,8 +787,10 @@ class UnboundedQueue { ...@@ -734,8 +787,10 @@ class UnboundedQueue {
return next_.load(std::memory_order_acquire); return next_.load(std::memory_order_acquire);
} }
void setNextSegment(Segment* s) noexcept { bool casNextSegment(Segment* next) noexcept {
next_.store(s, std::memory_order_release); Segment* expected = nullptr;
return next_.compare_exchange_strong(
expected, next, std::memory_order_release, std::memory_order_relaxed);
} }
FOLLY_ALWAYS_INLINE Ticket minTicket() const noexcept { FOLLY_ALWAYS_INLINE Ticket minTicket() const noexcept {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment