Commit 79b78eaa authored by Christopher Dykes's avatar Christopher Dykes Committed by Facebook Github Bot

Use std::this_thread::yield rather than sched_yield

Summary: They do the same thing, and the first is portable.

Reviewed By: yfeldblum

Differential Revision: D4569649

fbshipit-source-id: db0434766f674a7789d6e59335e122b4d2131e06
parent eab7000d
...@@ -156,11 +156,11 @@ pthread_rwlock_t Read 728698 24us 101ns 7.28ms 194us ...@@ -156,11 +156,11 @@ pthread_rwlock_t Read 728698 24us 101ns 7.28ms 194us
#undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_ #undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
#endif #endif
#include <algorithm>
#include <atomic> #include <atomic>
#include <string> #include <string>
#include <algorithm> #include <thread>
#include <sched.h>
#include <glog/logging.h> #include <glog/logging.h>
#include <folly/Likely.h> #include <folly/Likely.h>
...@@ -194,7 +194,7 @@ class RWSpinLock { ...@@ -194,7 +194,7 @@ class RWSpinLock {
void lock() { void lock() {
int count = 0; int count = 0;
while (!LIKELY(try_lock())) { while (!LIKELY(try_lock())) {
if (++count > 1000) sched_yield(); if (++count > 1000) std::this_thread::yield();
} }
} }
...@@ -208,7 +208,7 @@ class RWSpinLock { ...@@ -208,7 +208,7 @@ class RWSpinLock {
void lock_shared() { void lock_shared() {
int count = 0; int count = 0;
while (!LIKELY(try_lock_shared())) { while (!LIKELY(try_lock_shared())) {
if (++count > 1000) sched_yield(); if (++count > 1000) std::this_thread::yield();
} }
} }
...@@ -226,7 +226,7 @@ class RWSpinLock { ...@@ -226,7 +226,7 @@ class RWSpinLock {
void lock_upgrade() { void lock_upgrade() {
int count = 0; int count = 0;
while (!try_lock_upgrade()) { while (!try_lock_upgrade()) {
if (++count > 1000) sched_yield(); if (++count > 1000) std::this_thread::yield();
} }
} }
...@@ -238,7 +238,7 @@ class RWSpinLock { ...@@ -238,7 +238,7 @@ class RWSpinLock {
void unlock_upgrade_and_lock() { void unlock_upgrade_and_lock() {
int64_t count = 0; int64_t count = 0;
while (!try_unlock_upgrade_and_lock()) { while (!try_unlock_upgrade_and_lock()) {
if (++count > 1000) sched_yield(); if (++count > 1000) std::this_thread::yield();
} }
} }
...@@ -601,7 +601,7 @@ class RWTicketSpinLockT { ...@@ -601,7 +601,7 @@ class RWTicketSpinLockT {
* turns. * turns.
*/ */
void writeLockAggressive() { void writeLockAggressive() {
// sched_yield() is needed here to avoid a pathology if the number // std::this_thread::yield() is needed here to avoid a pathology if the number
// of threads attempting concurrent writes is >= the number of real // of threads attempting concurrent writes is >= the number of real
// cores allocated to this process. This is less likely than the // cores allocated to this process. This is less likely than the
// corresponding situation in lock_shared(), but we still want to // corresponding situation in lock_shared(), but we still want to
...@@ -610,7 +610,7 @@ class RWTicketSpinLockT { ...@@ -610,7 +610,7 @@ class RWTicketSpinLockT {
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1); QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) { while (val != load_acquire(&ticket.write)) {
asm_volatile_pause(); asm_volatile_pause();
if (UNLIKELY(++count > 1000)) sched_yield(); if (UNLIKELY(++count > 1000)) std::this_thread::yield();
} }
} }
...@@ -623,7 +623,7 @@ class RWTicketSpinLockT { ...@@ -623,7 +623,7 @@ class RWTicketSpinLockT {
// there are a lot of competing readers. The aggressive spinning // there are a lot of competing readers. The aggressive spinning
// can help to avoid starving writers. // can help to avoid starving writers.
// //
// We don't worry about sched_yield() here because the caller // We don't worry about std::this_thread::yield() here because the caller
// has already explicitly abandoned fairness. // has already explicitly abandoned fairness.
while (!try_lock()) {} while (!try_lock()) {}
} }
...@@ -653,13 +653,13 @@ class RWTicketSpinLockT { ...@@ -653,13 +653,13 @@ class RWTicketSpinLockT {
} }
void lock_shared() { void lock_shared() {
// sched_yield() is important here because we can't grab the // std::this_thread::yield() is important here because we can't grab the
// shared lock if there is a pending writeLockAggressive, so we // shared lock if there is a pending writeLockAggressive, so we
// need to let threads that already have a shared lock complete // need to let threads that already have a shared lock complete
int count = 0; int count = 0;
while (!LIKELY(try_lock_shared())) { while (!LIKELY(try_lock_shared())) {
asm_volatile_pause(); asm_volatile_pause();
if (UNLIKELY((++count & 1023) == 0)) sched_yield(); if (UNLIKELY((++count & 1023) == 0)) std::this_thread::yield();
} }
} }
......
...@@ -564,7 +564,6 @@ AC_CHECK_FUNCS([getdelim \ ...@@ -564,7 +564,6 @@ AC_CHECK_FUNCS([getdelim \
memset \ memset \
pow \ pow \
strerror \ strerror \
sched_yield \
malloc_size \ malloc_size \
malloc_usable_size \ malloc_usable_size \
memrchr \ memrchr \
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <condition_variable> #include <condition_variable>
#include <fcntl.h> #include <fcntl.h>
#include <mutex> #include <mutex>
#include <thread>
namespace folly { namespace folly {
...@@ -231,7 +232,7 @@ getTimeDelta(std::chrono::steady_clock::time_point* prev) { ...@@ -231,7 +232,7 @@ getTimeDelta(std::chrono::steady_clock::time_point* prev) {
void EventBase::waitUntilRunning() { void EventBase::waitUntilRunning() {
while (!isRunning()) { while (!isRunning()) {
sched_yield(); std::this_thread::yield();
} }
} }
......
...@@ -16,11 +16,12 @@ ...@@ -16,11 +16,12 @@
#include <folly/detail/CacheLocality.h> #include <folly/detail/CacheLocality.h>
#include <sched.h>
#include <memory> #include <memory>
#include <thread> #include <thread>
#include <unordered_map> #include <unordered_map>
#include <glog/logging.h> #include <glog/logging.h>
#include <folly/Benchmark.h> #include <folly/Benchmark.h>
using namespace folly::detail; using namespace folly::detail;
...@@ -167,7 +168,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) { ...@@ -167,7 +168,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) {
ready++; ready++;
while (!go.load()) { while (!go.load()) {
sched_yield(); std::this_thread::yield();
} }
std::atomic<int> localWork(0); std::atomic<int> localWork(0);
for (size_t i = iters; i > 0; --i) { for (size_t i = iters; i > 0; --i) {
...@@ -187,7 +188,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) { ...@@ -187,7 +188,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) {
} }
while (ready < numThreads) { while (ready < numThreads) {
sched_yield(); std::this_thread::yield();
} }
braces.dismiss(); braces.dismiss();
go = true; go = true;
...@@ -208,7 +209,7 @@ static void atomicIncrBaseline(size_t iters, ...@@ -208,7 +209,7 @@ static void atomicIncrBaseline(size_t iters,
while (threads.size() < numThreads) { while (threads.size() < numThreads) {
threads.push_back(std::thread([&]() { threads.push_back(std::thread([&]() {
while (!go.load()) { while (!go.load()) {
sched_yield(); std::this_thread::yield();
} }
std::atomic<size_t> localCounter(0); std::atomic<size_t> localCounter(0);
std::atomic<int> localWork(0); std::atomic<int> localWork(0);
......
...@@ -498,7 +498,7 @@ template <class Mutex> void testConcurrency() { ...@@ -498,7 +498,7 @@ template <class Mutex> void testConcurrency() {
// Test lock() // Test lock()
for (size_t n = 0; n < itersPerThread; ++n) { for (size_t n = 0; n < itersPerThread; ++n) {
v.contextualLock()->push_back((itersPerThread * threadIdx) + n); v.contextualLock()->push_back((itersPerThread * threadIdx) + n);
sched_yield(); std::this_thread::yield();
} }
}; };
runParallel(numThreads, pushNumbers); runParallel(numThreads, pushNumbers);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment