Commit a917f122 authored by Lewis Baker's avatar Lewis Baker Committed by Facebook Github Bot

Declare folly::SpinLock methods as noexcept

Summary:
Declare folly::SpinLock constructor and methods as noexcept so that types that have a compiler-generated default-constructor (like folly::Synchronized) get a noexcept default constructor.

Also declare methods on folly::MicroSpinLock and folly::detail::Sleeper as noexcept.

Reviewed By: yfeldblum

Differential Revision: D10518727

fbshipit-source-id: 8582fd8b3893ad25ef736bd7a63465872ebe67af
parent 0d49d8d4
......@@ -43,16 +43,16 @@ namespace folly {
class SpinLock {
public:
FOLLY_ALWAYS_INLINE SpinLock() {
FOLLY_ALWAYS_INLINE SpinLock() noexcept {
lock_.init();
}
FOLLY_ALWAYS_INLINE void lock() const {
FOLLY_ALWAYS_INLINE void lock() const noexcept {
lock_.lock();
}
FOLLY_ALWAYS_INLINE void unlock() const {
FOLLY_ALWAYS_INLINE void unlock() const noexcept {
lock_.unlock();
}
FOLLY_ALWAYS_INLINE bool try_lock() const {
FOLLY_ALWAYS_INLINE bool try_lock() const noexcept {
return lock_.try_lock();
}
......@@ -63,7 +63,9 @@ class SpinLock {
template <typename LOCK>
class SpinLockGuardImpl : private boost::noncopyable {
public:
FOLLY_ALWAYS_INLINE explicit SpinLockGuardImpl(LOCK& lock) : lock_(lock) {
FOLLY_ALWAYS_INLINE explicit SpinLockGuardImpl(LOCK& lock) noexcept(
noexcept(lock.lock()))
: lock_(lock) {
lock_.lock();
}
FOLLY_ALWAYS_INLINE ~SpinLockGuardImpl() {
......
......@@ -69,15 +69,15 @@ struct MicroSpinLock {
// Initialize this MSL. It is unnecessary to call this if you
// zero-initialize the MicroSpinLock.
void init() {
void init() noexcept {
payload()->store(FREE);
}
bool try_lock() {
bool try_lock() noexcept {
return cas(FREE, LOCKED);
}
void lock() {
void lock() noexcept {
detail::Sleeper sleeper;
while (!try_lock()) {
do {
......@@ -87,17 +87,17 @@ struct MicroSpinLock {
assert(payload()->load() == LOCKED);
}
void unlock() {
void unlock() noexcept {
assert(payload()->load() == LOCKED);
payload()->store(FREE, std::memory_order_release);
}
private:
std::atomic<uint8_t>* payload() {
std::atomic<uint8_t>* payload() noexcept {
return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
}
bool cas(uint8_t compare, uint8_t newVal) {
bool cas(uint8_t compare, uint8_t newVal) noexcept {
return std::atomic_compare_exchange_strong_explicit(
payload(),
&compare,
......@@ -123,15 +123,15 @@ static_assert(
template <class T, size_t N>
struct alignas(max_align_v) SpinLockArray {
T& operator[](size_t i) {
T& operator[](size_t i) noexcept {
return data_[i].lock;
}
const T& operator[](size_t i) const {
const T& operator[](size_t i) const noexcept {
return data_[i].lock;
}
constexpr size_t size() const {
constexpr size_t size() const noexcept {
return N;
}
......
......@@ -42,9 +42,9 @@ class Sleeper {
uint32_t spinCount;
public:
Sleeper() : spinCount(0) {}
Sleeper() noexcept : spinCount(0) {}
static void sleep() {
static void sleep() noexcept {
/*
* Always sleep 0.5ms, assuming this will make the kernel put
* us down for whatever its minimum timer resolution is (in
......@@ -54,7 +54,7 @@ class Sleeper {
nanosleep(&ts, nullptr);
}
void wait() {
void wait() noexcept {
if (spinCount < kMaxActiveSpin) {
++spinCount;
asm_volatile_pause();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment