Commit 79b78eaa authored by Christopher Dykes's avatar Christopher Dykes Committed by Facebook Github Bot

Use std::this_thread::yield rather than sched_yield

Summary: They do the same thing, and the first is portable.

Reviewed By: yfeldblum

Differential Revision: D4569649

fbshipit-source-id: db0434766f674a7789d6e59335e122b4d2131e06
parent eab7000d
......@@ -156,11 +156,11 @@ pthread_rwlock_t Read 728698 24us 101ns 7.28ms 194us
#undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
#endif
#include <algorithm>
#include <atomic>
#include <string>
#include <algorithm>
#include <thread>
#include <sched.h>
#include <glog/logging.h>
#include <folly/Likely.h>
......@@ -194,7 +194,7 @@ class RWSpinLock {
void lock() {
int count = 0;
while (!LIKELY(try_lock())) {
if (++count > 1000) sched_yield();
if (++count > 1000) std::this_thread::yield();
}
}
......@@ -208,7 +208,7 @@ class RWSpinLock {
void lock_shared() {
int count = 0;
while (!LIKELY(try_lock_shared())) {
if (++count > 1000) sched_yield();
if (++count > 1000) std::this_thread::yield();
}
}
......@@ -226,7 +226,7 @@ class RWSpinLock {
void lock_upgrade() {
int count = 0;
while (!try_lock_upgrade()) {
if (++count > 1000) sched_yield();
if (++count > 1000) std::this_thread::yield();
}
}
......@@ -238,7 +238,7 @@ class RWSpinLock {
void unlock_upgrade_and_lock() {
int64_t count = 0;
while (!try_unlock_upgrade_and_lock()) {
if (++count > 1000) sched_yield();
if (++count > 1000) std::this_thread::yield();
}
}
......@@ -601,7 +601,7 @@ class RWTicketSpinLockT {
* turns.
*/
void writeLockAggressive() {
// sched_yield() is needed here to avoid a pathology if the number
// std::this_thread::yield() is needed here to avoid a pathology if the number
// of threads attempting concurrent writes is >= the number of real
// cores allocated to this process. This is less likely than the
// corresponding situation in lock_shared(), but we still want to
......@@ -610,7 +610,7 @@ class RWTicketSpinLockT {
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) {
asm_volatile_pause();
if (UNLIKELY(++count > 1000)) sched_yield();
if (UNLIKELY(++count > 1000)) std::this_thread::yield();
}
}
......@@ -623,7 +623,7 @@ class RWTicketSpinLockT {
// there are a lot of competing readers. The aggressive spinning
// can help to avoid starving writers.
//
// We don't worry about sched_yield() here because the caller
// We don't worry about std::this_thread::yield() here because the caller
// has already explicitly abandoned fairness.
while (!try_lock()) {}
}
......@@ -653,13 +653,13 @@ class RWTicketSpinLockT {
}
void lock_shared() {
// sched_yield() is important here because we can't grab the
// std::this_thread::yield() is important here because we can't grab the
// shared lock if there is a pending writeLockAggressive, so we
// need to let threads that already have a shared lock complete
int count = 0;
while (!LIKELY(try_lock_shared())) {
asm_volatile_pause();
if (UNLIKELY((++count & 1023) == 0)) sched_yield();
if (UNLIKELY((++count & 1023) == 0)) std::this_thread::yield();
}
}
......
......@@ -564,7 +564,6 @@ AC_CHECK_FUNCS([getdelim \
memset \
pow \
strerror \
sched_yield \
malloc_size \
malloc_usable_size \
memrchr \
......
......@@ -27,6 +27,7 @@
#include <condition_variable>
#include <fcntl.h>
#include <mutex>
#include <thread>
namespace folly {
......@@ -231,7 +232,7 @@ getTimeDelta(std::chrono::steady_clock::time_point* prev) {
void EventBase::waitUntilRunning() {
while (!isRunning()) {
sched_yield();
std::this_thread::yield();
}
}
......
......@@ -16,11 +16,12 @@
#include <folly/detail/CacheLocality.h>
#include <sched.h>
#include <memory>
#include <thread>
#include <unordered_map>
#include <glog/logging.h>
#include <folly/Benchmark.h>
using namespace folly::detail;
......@@ -167,7 +168,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) {
ready++;
while (!go.load()) {
sched_yield();
std::this_thread::yield();
}
std::atomic<int> localWork(0);
for (size_t i = iters; i > 0; --i) {
......@@ -187,7 +188,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) {
}
while (ready < numThreads) {
sched_yield();
std::this_thread::yield();
}
braces.dismiss();
go = true;
......@@ -208,7 +209,7 @@ static void atomicIncrBaseline(size_t iters,
while (threads.size() < numThreads) {
threads.push_back(std::thread([&]() {
while (!go.load()) {
sched_yield();
std::this_thread::yield();
}
std::atomic<size_t> localCounter(0);
std::atomic<int> localWork(0);
......
......@@ -498,7 +498,7 @@ template <class Mutex> void testConcurrency() {
// Test lock()
for (size_t n = 0; n < itersPerThread; ++n) {
v.contextualLock()->push_back((itersPerThread * threadIdx) + n);
sched_yield();
std::this_thread::yield();
}
};
runParallel(numThreads, pushNumbers);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment