Commit 6808ac5a authored by Pranjal Raihan's avatar Pranjal Raihan Committed by Facebook GitHub Bot

Refactor MicroLock and remove its slots

Summary:
`folly::MicroLock` is actually 4 locks in one. Turns out that it's not very useful considering there is not a single use in fbcode. Instead `MicroLock` should allow a nicer API for exposing the 6 unused bits as user data.

By limiting the lock to one slot (least significant 2 bits), we can make the assumption that the 6 contiguous most significant bits are user data. This means that all the masking logic can go away (see `slotMask` and `LockGuardWithDataSlots`).

Now `lockAndLoad`, `unlockAndStore`, `LockGuardWithData` "just work" with needing to worry about user data interfering with the slots used for locking.

Reviewed By: davidtgoldblatt

Differential Revision: D25909068

fbshipit-source-id: 3f050fa7393b40cff949ed6e30b691d7f40d0edf
parent 3c764a2d
...@@ -24,22 +24,22 @@ namespace folly { ...@@ -24,22 +24,22 @@ namespace folly {
uint8_t MicroLockCore::lockSlowPath( uint8_t MicroLockCore::lockSlowPath(
uint32_t oldWord, uint32_t oldWord,
detail::Futex<>* wordPtr, detail::Futex<>* wordPtr,
uint32_t slotHeldBit, uint32_t heldBit,
unsigned maxSpins, unsigned maxSpins,
unsigned maxYields) { unsigned maxYields) noexcept {
uint32_t newWord; uint32_t newWord;
unsigned spins = 0; unsigned spins = 0;
uint32_t slotWaitBit = slotHeldBit << 1; uint32_t waitBit = heldBit << 1;
uint32_t needWaitBit = 0; uint32_t needWaitBit = 0;
retry: retry:
if ((oldWord & slotHeldBit) != 0) { if ((oldWord & heldBit) != 0) {
++spins; ++spins;
if (spins > maxSpins + maxYields) { if (spins > maxSpins + maxYields) {
// Somebody appears to have the lock. Block waiting for the // Somebody appears to have the lock. Block waiting for the
// holder to unlock the lock. We set heldbit(slot) so that the // holder to unlock the lock. We set heldbit(slot) so that the
// lock holder knows to FUTEX_WAKE us. // lock holder knows to FUTEX_WAKE us.
newWord = oldWord | slotWaitBit; newWord = oldWord | waitBit;
if (newWord != oldWord) { if (newWord != oldWord) {
if (!wordPtr->compare_exchange_weak( if (!wordPtr->compare_exchange_weak(
oldWord, oldWord,
...@@ -49,8 +49,8 @@ retry: ...@@ -49,8 +49,8 @@ retry:
goto retry; goto retry;
} }
} }
detail::futexWait(wordPtr, newWord, slotHeldBit); detail::futexWait(wordPtr, newWord, heldBit);
needWaitBit = slotWaitBit; needWaitBit = waitBit;
} else if (spins > maxSpins) { } else if (spins > maxSpins) {
// sched_yield(), but more portable // sched_yield(), but more portable
std::this_thread::yield(); std::this_thread::yield();
...@@ -61,7 +61,7 @@ retry: ...@@ -61,7 +61,7 @@ retry:
goto retry; goto retry;
} }
newWord = oldWord | slotHeldBit | needWaitBit; newWord = oldWord | heldBit | needWaitBit;
if (!wordPtr->compare_exchange_weak( if (!wordPtr->compare_exchange_weak(
oldWord, oldWord,
newWord, newWord,
...@@ -69,6 +69,6 @@ retry: ...@@ -69,6 +69,6 @@ retry:
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
goto retry; goto retry;
} }
return byteFromWord(newWord); return decodeDataFromWord(newWord);
} }
} // namespace folly } // namespace folly
...@@ -28,10 +28,8 @@ ...@@ -28,10 +28,8 @@
namespace folly { namespace folly {
/** /**
* Tiny exclusive lock that packs four lock slots into a single * Tiny exclusive lock that uses 2 bits. It is stored as 1 byte and
* byte. Each slot is an independent real, sleeping lock. The default * has APIs for using the remaining 6 bits for storing user data.
* lock and unlock functions operate on slot zero, which modifies only
* the low two bits of the host byte.
* *
* You should zero-initialize the bits of a MicroLock that you intend * You should zero-initialize the bits of a MicroLock that you intend
* to use. * to use.
...@@ -42,16 +40,15 @@ namespace folly { ...@@ -42,16 +40,15 @@ namespace folly {
* *
* You are free to put a MicroLock in a union with some other object. * You are free to put a MicroLock in a union with some other object.
* If, for example, you want to use the bottom two bits of a pointer * If, for example, you want to use the bottom two bits of a pointer
* as a lock, you can put a MicroLock in a union with the pointer and * as a lock, you can put a MicroLock in a union with the pointer,
* limit yourself to MicroLock slot zero, which will use the two * which will use the two least-significant bits in the bottom byte.
* least-significant bits in the bottom byte.
* *
* (Note that such a union is safe only because MicroLock is based on * (Note that such a union is safe only because MicroLock is based on
* a character type, and even under a strict interpretation of C++'s * a character type, and even under a strict interpretation of C++'s
* aliasing rules, character types may alias anything.) * aliasing rules, character types may alias anything.)
* *
* Unused slots in the lock can be used to store user data via * Unused bits in the lock can be used to store user data via
* lockAndLoad() and unlockAndStore(), or LockGuardWithDataSlots. * lockAndLoad() and unlockAndStore(), or LockGuardWithData.
* *
* MicroLock uses a dirty trick: it actually operates on the full * MicroLock uses a dirty trick: it actually operates on the full
* 32-bit, four-byte-aligned bit of memory into which it is embedded. * 32-bit, four-byte-aligned bit of memory into which it is embedded.
...@@ -98,201 +95,222 @@ namespace folly { ...@@ -98,201 +95,222 @@ namespace folly {
class MicroLockCore { class MicroLockCore {
protected: protected:
uint8_t lock_; uint8_t lock_;
inline detail::Futex<>* word() const; // Well, halfword on 64-bit systems /**
inline uint32_t baseShift(unsigned slot) const; * Arithmetic shift required to get to the byte from the word.
inline uint32_t heldBit(unsigned slot) const; */
inline uint32_t waitBit(unsigned slot) const; unsigned baseShift() const noexcept;
/**
* Mask for bit indicating that the flag is held.
*/
unsigned heldBit() const noexcept;
/**
* Mask for bit indicating that there is a waiter that should be woken up.
*/
unsigned waitBit() const noexcept;
static uint8_t lockSlowPath( static uint8_t lockSlowPath(
uint32_t oldWord, uint32_t oldWord,
detail::Futex<>* wordPtr, detail::Futex<>* wordPtr,
uint32_t slotHeldBit, uint32_t heldBit,
unsigned maxSpins, unsigned maxSpins,
unsigned maxYields); unsigned maxYields) noexcept;
/**
* The word (halfword on 64-bit systems) that this lock atomically operates
* on. Although the atomic operations access 4 bytes, only the byte used by
* the lock will be modified.
*/
detail::Futex<>* word() const noexcept;
/**
* Extract the lock byte from word() value.
*/
static constexpr uint8_t byteFromWord(uint32_t word) noexcept {
return kIsLittleEndian ? static_cast<uint8_t>(word & 0xff)
: static_cast<uint8_t>((word >> 24) & 0xff);
}
static constexpr unsigned kNumLockBits = 2;
static constexpr uint8_t kLockBits =
static_cast<uint8_t>((1 << kNumLockBits) - 1);
static constexpr uint8_t kDataBits = static_cast<uint8_t>(~kLockBits);
/**
* Decodes the value stored in the unused bits of the lock.
*/
static constexpr uint8_t decodeDataFromByte(uint8_t lockByte) noexcept {
return static_cast<uint8_t>(lockByte >> kNumLockBits);
}
/**
* Encodes the value for the unused bits of the lock.
*/
static constexpr uint8_t encodeDataToByte(uint8_t data) noexcept {
return static_cast<uint8_t>(data << kNumLockBits);
}
FOLLY_ALWAYS_INLINE constexpr static uint8_t byteFromWord(uint32_t word); static constexpr uint8_t decodeDataFromWord(uint32_t word) noexcept {
return decodeDataFromByte(byteFromWord(word));
}
static constexpr uint32_t encodeDataToWord(
uint32_t word,
unsigned shiftToByte,
uint8_t value) noexcept {
const uint32_t preservedBits = word & ~(kDataBits << shiftToByte);
const uint32_t newBits = encodeDataToByte(value) << shiftToByte;
return preservedBits | newBits;
}
template <typename Func> template <typename Func>
FOLLY_DISABLE_ADDRESS_SANITIZER inline void unlockAndStoreWithModifier( FOLLY_DISABLE_ADDRESS_SANITIZER void unlockAndStoreWithModifier(
unsigned slot, Func modifier) noexcept;
Func modifier);
public: public:
static constexpr unsigned kBitsPerSlot = 2; /**
* Loads the data stored in the unused bits of the lock atomically.
template <unsigned Slot> */
static constexpr uint8_t slotMask() { FOLLY_DISABLE_ADDRESS_SANITIZER uint8_t
static_assert( load(std::memory_order order = std::memory_order_seq_cst) const noexcept {
Slot < CHAR_BIT / kBitsPerSlot, "slot is out of range of uint8_t"); return decodeDataFromWord(word()->load(order));
return 0b11 << (Slot * kBitsPerSlot);
} }
/** /**
* Loads the state of this lock atomically. This is useful for introspecting * Stores the data in the unused bits of the lock atomically. Since 2 bits are
* any user data that may be placed in unused slots. * used by the lock, the most significant 2 bits of the provided value will be
* ignored.
*/ */
FOLLY_DISABLE_ADDRESS_SANITIZER inline uint8_t load( FOLLY_DISABLE_ADDRESS_SANITIZER void store(
std::memory_order order = std::memory_order_seq_cst) const { uint8_t value,
return byteFromWord(word()->load(order)); std::memory_order order = std::memory_order_seq_cst) noexcept;
}
inline void unlock(unsigned slot);
/** /**
* Unlocks the selected slot and stores the bits of the provided value in the * Unlocks the lock and stores the bits of the provided value into the data
* other slots. The two bits of the selected slot will be automatically masked * bits. Since 2 bits are used by the lock, the most significant 2 bits of the
* out from the provided value. * provided value will be ignored.
*
* For example, the following usage unlocks slot1 preserves the state of
* slot3. This indicates that slot1 and slot3 are used for locking while slot0
* and slot2 are used as data:
*
* lock.unlockAndStore(1, ~MicroLock::slotMask<3>(), value);
*/ */
inline void unlockAndStore(unsigned slot, uint8_t dataMask, uint8_t value); void unlockAndStore(uint8_t value) noexcept;
inline void unlock() { unlock(0); } void unlock() noexcept;
// Initializes all the slots. /**
inline void init() { lock_ = 0; } * Initializes the lock state and sets the data bits to 0.
*/
void init() noexcept { lock_ = 0; }
}; };
inline detail::Futex<>* MicroLockCore::word() const { inline detail::Futex<>* MicroLockCore::word() const noexcept {
uintptr_t lockptr = (uintptr_t)&lock_; uintptr_t lockptr = (uintptr_t)&lock_;
lockptr &= ~(sizeof(uint32_t) - 1); lockptr &= ~(sizeof(uint32_t) - 1);
return (detail::Futex<>*)lockptr; return (detail::Futex<>*)lockptr;
} }
inline unsigned MicroLockCore::baseShift(unsigned slot) const { inline unsigned MicroLockCore::baseShift() const noexcept {
assert(slot < CHAR_BIT / kBitsPerSlot);
unsigned offset_bytes = (unsigned)((uintptr_t)&lock_ - (uintptr_t)word()); unsigned offset_bytes = (unsigned)((uintptr_t)&lock_ - (uintptr_t)word());
return static_cast<unsigned>( return static_cast<unsigned>(
kIsLittleEndian ? offset_bytes * CHAR_BIT + slot * kBitsPerSlot kIsLittleEndian ? CHAR_BIT * offset_bytes
: CHAR_BIT * (sizeof(uint32_t) - offset_bytes - 1) + : CHAR_BIT * (sizeof(uint32_t) - offset_bytes - 1));
slot * kBitsPerSlot);
} }
inline uint32_t MicroLockCore::heldBit(unsigned slot) const { inline unsigned MicroLockCore::heldBit() const noexcept {
return 1U << (baseShift(slot) + 0); return 1U << (baseShift() + 0);
} }
inline uint32_t MicroLockCore::waitBit(unsigned slot) const { inline unsigned MicroLockCore::waitBit() const noexcept {
return 1U << (baseShift(slot) + 1); return 1U << (baseShift() + 1);
} }
constexpr inline uint8_t MicroLockCore::byteFromWord(uint32_t word) { inline void MicroLockCore::store(
return kIsLittleEndian ? static_cast<uint8_t>(word & 0xff) uint8_t value,
: static_cast<uint8_t>((word >> 24) & 0xff); std::memory_order order) noexcept {
detail::Futex<>* wordPtr = word();
const auto shiftToByte = baseShift();
auto oldWord = wordPtr->load(std::memory_order_relaxed);
while (true) {
auto newWord = encodeDataToWord(oldWord, shiftToByte, value);
if (wordPtr->compare_exchange_weak(
oldWord, newWord, order, std::memory_order_relaxed)) {
break;
}
}
} }
template <typename Func> template <typename Func>
void MicroLockCore::unlockAndStoreWithModifier(unsigned slot, Func modifier) { void MicroLockCore::unlockAndStoreWithModifier(Func modifier) noexcept {
detail::Futex<>* wordPtr = word(); detail::Futex<>* wordPtr = word();
uint32_t oldWord; uint32_t oldWord;
uint32_t newWord; uint32_t newWord;
oldWord = wordPtr->load(std::memory_order_relaxed); oldWord = wordPtr->load(std::memory_order_relaxed);
do { do {
assert(oldWord & heldBit(slot)); assert(oldWord & heldBit());
newWord = modifier(oldWord) & ~(heldBit(slot) | waitBit(slot)); newWord = modifier(oldWord) & ~(heldBit() | waitBit());
} while (!wordPtr->compare_exchange_weak( } while (!wordPtr->compare_exchange_weak(
oldWord, newWord, std::memory_order_release, std::memory_order_relaxed)); oldWord, newWord, std::memory_order_release, std::memory_order_relaxed));
if (oldWord & waitBit(slot)) { if (oldWord & waitBit()) {
detail::futexWake(wordPtr, 1, heldBit(slot)); detail::futexWake(wordPtr, 1, heldBit());
} }
} }
inline void inline void MicroLockCore::unlockAndStore(uint8_t value) noexcept {
MicroLockCore::unlockAndStore(unsigned slot, uint8_t dataMask, uint8_t value) {
unlockAndStoreWithModifier( unlockAndStoreWithModifier(
slot, [dataMask, value, shiftToByte = baseShift(0)](uint32_t oldWord) { [value, shiftToByte = baseShift()](uint32_t oldWord) {
const uint32_t preservedBits = oldWord & ~(dataMask << shiftToByte); return encodeDataToWord(oldWord, shiftToByte, value);
const uint32_t newBits = (value & dataMask) << shiftToByte;
return preservedBits | newBits;
}); });
} }
inline void MicroLockCore::unlock(unsigned slot) { inline void MicroLockCore::unlock() noexcept {
unlockAndStoreWithModifier(slot, [](uint32_t oldWord) { return oldWord; }); unlockAndStoreWithModifier([](uint32_t oldWord) { return oldWord; });
} }
template <unsigned MaxSpins = 1000, unsigned MaxYields = 0> template <unsigned MaxSpins = 1000, unsigned MaxYields = 0>
class MicroLockBase : public MicroLockCore { class MicroLockBase : public MicroLockCore {
public: public:
/** /**
* Locks the selected slot and returns the entire byte that represents the * Locks the lock and returns the data stored in the unused bits of the lock.
* state of this lock. This is useful when you want to use some of the slots * This is useful when you want to use the unused bits of the lock to store
* to store data, in which case reading and locking should be done in one * data, in which case reading and locking should be done in one atomic
* atomic operation. * operation.
*/ */
FOLLY_DISABLE_ADDRESS_SANITIZER inline uint8_t lockAndLoad(unsigned slot); FOLLY_DISABLE_ADDRESS_SANITIZER uint8_t lockAndLoad() noexcept;
inline void lock(unsigned slot) { lockAndLoad(slot); } void lock() noexcept { lockAndLoad(); }
inline void lock() { lock(0); } FOLLY_DISABLE_ADDRESS_SANITIZER bool try_lock() noexcept;
FOLLY_DISABLE_ADDRESS_SANITIZER inline bool try_lock(unsigned slot);
inline bool try_lock() { return try_lock(0); }
/** /**
* A lock guard which allows reading and writing to the unused slots as data. * A lock guard which allows reading and writing to the unused bits of the
* The template parameters are used to select the slot indices which represent * lock as data.
* data slots. The bits representing all other slots will be masked out when
* storing the user data.
*
* Example:
*
* LockGuardWithDataSlots<1, 2> guard(lock, 0);
* guard.loadedValue(); // bits of slot1 and slot2 (lock_ & 0b00111100)
* guard.storeValue(0b10101010); // stored as 0bxx1010xx (x means unchanged)
*/ */
template <unsigned... Slots> struct LockGuardWithData {
struct LockGuardWithDataSlots { explicit LockGuardWithData(MicroLockBase<MaxSpins, MaxYields>& lock)
explicit LockGuardWithDataSlots( : lock_(lock) {
MicroLockBase<MaxSpins, MaxYields>& lock, loadedValue_ = lock_.lockAndLoad();
unsigned slot = 0)
: lock_(lock), slot_(slot) {
loadedValue_ = lock_.lockAndLoad(slot_) & dataMask();
} }
~LockGuardWithDataSlots() noexcept { ~LockGuardWithData() noexcept {
if (storedValue_) { if (storedValue_) {
lock_.unlockAndStore(slot_, dataMask(), *storedValue_); lock_.unlockAndStore(*storedValue_);
} else { } else {
lock_.unlock(slot_); lock_.unlock();
} }
} }
/** /**
* The stored data with the non-data slot bits masked out (which will be 0). * The stored data bits at the time of locking.
*/ */
uint8_t loadedValue() const { return loadedValue_; } uint8_t loadedValue() const noexcept { return loadedValue_; }
/** /**
* The value that will be stored back into data lock slots when it is * The value that will be stored back into data bits when it is unlocked.
* unlocked. The non-data slot bits in the provided value will be ignored.
*/ */
void storeValue(uint8_t value) { storedValue_ = value; } void storeValue(uint8_t value) noexcept { storedValue_ = value; }
private: private:
// C++17 fold expressions would be handy here
FOLLY_ALWAYS_INLINE constexpr static uint8_t bitOr(uint8_t value) {
return value;
}
template <typename... Others>
FOLLY_ALWAYS_INLINE constexpr static uint8_t bitOr(
uint8_t value,
Others... others) {
return value | bitOr(others...);
}
constexpr static uint8_t dataMask() { return bitOr(slotMask<Slots>()...); }
MicroLockBase<MaxSpins, MaxYields>& lock_; MicroLockBase<MaxSpins, MaxYields>& lock_;
unsigned slot_;
uint8_t loadedValue_; uint8_t loadedValue_;
folly::Optional<uint8_t> storedValue_; folly::Optional<uint8_t> storedValue_;
}; };
}; };
template <unsigned MaxSpins, unsigned MaxYields> template <unsigned MaxSpins, unsigned MaxYields>
bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) { bool MicroLockBase<MaxSpins, MaxYields>::try_lock() noexcept {
// N.B. You might think that try_lock is just the fast path of lock, // N.B. You might think that try_lock is just the fast path of lock,
// but you'd be wrong. Keep in mind that other parts of our host // but you'd be wrong. Keep in mind that other parts of our host
// word might be changing while we take the lock! We're not allowed // word might be changing while we take the lock! We're not allowed
...@@ -306,12 +324,12 @@ bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) { ...@@ -306,12 +324,12 @@ bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) {
detail::Futex<>* wordPtr = word(); detail::Futex<>* wordPtr = word();
uint32_t oldWord = wordPtr->load(std::memory_order_relaxed); uint32_t oldWord = wordPtr->load(std::memory_order_relaxed);
do { do {
if (oldWord & heldBit(slot)) { if (oldWord & heldBit()) {
return false; return false;
} }
} while (!wordPtr->compare_exchange_weak( } while (!wordPtr->compare_exchange_weak(
oldWord, oldWord,
oldWord | heldBit(slot), oldWord | heldBit(),
std::memory_order_acquire, std::memory_order_acquire,
std::memory_order_relaxed)); std::memory_order_relaxed));
...@@ -319,27 +337,26 @@ bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) { ...@@ -319,27 +337,26 @@ bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) {
} }
template <unsigned MaxSpins, unsigned MaxYields> template <unsigned MaxSpins, unsigned MaxYields>
uint8_t MicroLockBase<MaxSpins, MaxYields>::lockAndLoad(unsigned slot) { uint8_t MicroLockBase<MaxSpins, MaxYields>::lockAndLoad() noexcept {
static_assert(MaxSpins + MaxYields < (unsigned)-1, "overflow"); static_assert(MaxSpins + MaxYields < (unsigned)-1, "overflow");
detail::Futex<>* wordPtr = word(); detail::Futex<>* wordPtr = word();
uint32_t oldWord; uint32_t oldWord;
oldWord = wordPtr->load(std::memory_order_relaxed); oldWord = wordPtr->load(std::memory_order_relaxed);
if ((oldWord & heldBit(slot)) == 0 && if ((oldWord & heldBit()) == 0 &&
wordPtr->compare_exchange_weak( wordPtr->compare_exchange_weak(
oldWord, oldWord,
oldWord | heldBit(slot), oldWord | heldBit(),
std::memory_order_acquire, std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
// Fast uncontended case: memory_order_acquire above is our barrier // Fast uncontended case: memory_order_acquire above is our barrier
return byteFromWord(oldWord | heldBit(slot)); return decodeDataFromWord(oldWord | heldBit());
} else { } else {
// lockSlowPath doesn't have any slot-dependent computation; it // lockSlowPath doesn't call waitBit(); it just shifts the input bit. Make
// just shifts the input bit. Make sure its shifting produces the // sure its shifting produces the same result a call to waitBit would.
// same result a call to waitBit for our slot would. assert(heldBit() << 1 == waitBit());
assert(heldBit(slot) << 1 == waitBit(slot));
// lockSlowPath emits its own memory barrier // lockSlowPath emits its own memory barrier
return lockSlowPath(oldWord, wordPtr, heldBit(slot), MaxSpins, MaxYields); return lockSlowPath(oldWord, wordPtr, heldBit(), MaxSpins, MaxYields);
} }
} }
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#pragma once #pragma once
#include <atomic> #include <atomic>
#include <limits>
#include <utility> #include <utility>
#include <folly/Likely.h> #include <folly/Likely.h>
...@@ -40,7 +39,7 @@ class basic_once_flag; ...@@ -40,7 +39,7 @@ class basic_once_flag;
// //
// An alternative flag template that can be used with call_once that uses only // An alternative flag template that can be used with call_once that uses only
// 1 byte. Internally, compact_once_flag uses folly::MicroLock and its data // 1 byte. Internally, compact_once_flag uses folly::MicroLock and its data
// slots. // storage API.
class compact_once_flag; class compact_once_flag;
// call_once // call_once
...@@ -165,12 +164,12 @@ class compact_once_flag { ...@@ -165,12 +164,12 @@ class compact_once_flag {
template <typename F, typename... Args> template <typename F, typename... Args>
FOLLY_NOINLINE void call_once_slow(F&& f, Args&&... args) { FOLLY_NOINLINE void call_once_slow(F&& f, Args&&... args) {
folly::MicroLock::LockGuardWithDataSlots<1> guard(mutex_, 0); folly::MicroLock::LockGuardWithData guard(mutex_);
if (guard.loadedValue() != 0) { if (guard.loadedValue() != 0) {
return; return;
} }
invoke(std::forward<F>(f), std::forward<Args>(args)...); invoke(std::forward<F>(f), std::forward<Args>(args)...);
guard.storeValue(std::numeric_limits<uint8_t>::max()); guard.storeValue(1);
} }
template <typename OnceFlag, typename F, typename... Args> template <typename OnceFlag, typename F, typename... Args>
...@@ -178,24 +177,20 @@ class compact_once_flag { ...@@ -178,24 +177,20 @@ class compact_once_flag {
template <typename F, typename... Args> template <typename F, typename... Args>
FOLLY_NOINLINE bool try_call_once_slow(F&& f, Args&&... args) noexcept { FOLLY_NOINLINE bool try_call_once_slow(F&& f, Args&&... args) noexcept {
folly::MicroLock::LockGuardWithDataSlots<1> guard(mutex_); folly::MicroLock::LockGuardWithData guard(mutex_);
if (guard.loadedValue() != 0) { if (guard.loadedValue() != 0) {
return true; return true;
} }
const auto pass = static_cast<bool>( const auto pass = static_cast<bool>(
invoke(std::forward<F>(f), std::forward<Args>(args)...)); invoke(std::forward<F>(f), std::forward<Args>(args)...));
guard.storeValue(pass ? std::numeric_limits<uint8_t>::max() : 0); guard.storeValue(pass ? 1 : 0);
return pass; return pass;
} }
FOLLY_ALWAYS_INLINE bool test_once() const noexcept { FOLLY_ALWAYS_INLINE bool test_once() const noexcept {
const uint8_t storedValue = mutex_.load(std::memory_order_acquire) & return mutex_.load(std::memory_order_acquire) != 0;
folly::MicroLock::slotMask<1>();
return (storedValue != 0);
} }
// We're only using slot0 of the MicroLock as a mutex, slot1 stores a bool
// indicating whether this flag has been already called.
folly::MicroLock mutex_; folly::MicroLock mutex_;
}; };
......
...@@ -249,14 +249,12 @@ struct SimpleBarrier { ...@@ -249,14 +249,12 @@ struct SimpleBarrier {
} // namespace } // namespace
TEST(SmallLocks, MicroLock) { TEST(SmallLocks, MicroLock) {
volatile uint64_t counters[4] = {0, 0, 0, 0}; volatile uint64_t counter = 0;
std::vector<std::thread> threads; std::vector<std::thread> threads;
static const unsigned nrThreads = 20; static const unsigned nrThreads = 20;
static const unsigned iterPerThread = 10000; static const unsigned iterPerThread = 10000;
SimpleBarrier startBarrier; SimpleBarrier startBarrier;
assert(iterPerThread % 4 == 0);
// Embed the lock in a larger structure to ensure that we do not // Embed the lock in a larger structure to ensure that we do not
// affect bits outside the ones MicroLock is defined to affect. // affect bits outside the ones MicroLock is defined to affect.
struct { struct {
...@@ -292,16 +290,15 @@ TEST(SmallLocks, MicroLock) { ...@@ -292,16 +290,15 @@ TEST(SmallLocks, MicroLock) {
threads.emplace_back([&] { threads.emplace_back([&] {
startBarrier.wait(); startBarrier.wait();
for (unsigned iter = 0; iter < iterPerThread; ++iter) { for (unsigned iter = 0; iter < iterPerThread; ++iter) {
unsigned slotNo = iter % 4; x.alock.lock();
x.alock.lock(slotNo); counter += 1;
counters[slotNo] += 1;
// The occasional sleep makes it more likely that we'll // The occasional sleep makes it more likely that we'll
// exercise the futex-wait path inside MicroLock. // exercise the futex-wait path inside MicroLock.
if (iter % 1000 == 0) { if (iter % 1000 == 0) {
struct timespec ts = {0, 10000}; struct timespec ts = {0, 10000};
(void)nanosleep(&ts, nullptr); (void)nanosleep(&ts, nullptr);
} }
x.alock.unlock(slotNo); x.alock.unlock();
} }
}); });
} }
...@@ -317,9 +314,7 @@ TEST(SmallLocks, MicroLock) { ...@@ -317,9 +314,7 @@ TEST(SmallLocks, MicroLock) {
EXPECT_EQ(x.a, 'a'); EXPECT_EQ(x.a, 'a');
EXPECT_EQ(x.b, (uint8_t)(origB + iterPerThread / 2)); EXPECT_EQ(x.b, (uint8_t)(origB + iterPerThread / 2));
EXPECT_EQ(x.d, (uint8_t)(origD + iterPerThread / 2)); EXPECT_EQ(x.d, (uint8_t)(origD + iterPerThread / 2));
for (unsigned i = 0; i < 4; ++i) { EXPECT_EQ(counter, ((uint64_t)nrThreads * iterPerThread));
EXPECT_EQ(counters[i], ((uint64_t)nrThreads * iterPerThread) / 4);
}
} }
TEST(SmallLocks, MicroLockTryLock) { TEST(SmallLocks, MicroLockTryLock) {
...@@ -330,50 +325,37 @@ TEST(SmallLocks, MicroLockTryLock) { ...@@ -330,50 +325,37 @@ TEST(SmallLocks, MicroLockTryLock) {
lock.unlock(); lock.unlock();
} }
TEST(SmallLocks, MicroLockSlotsAsData) { TEST(SmallLocks, MicroLockWithData) {
MicroLock lock; MicroLock lock;
lock.init(); lock.init();
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0); EXPECT_EQ(lock.load(std::memory_order_relaxed), 0);
lock.lock(0); EXPECT_EQ(lock.lockAndLoad(), 0);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b00000001); lock.unlockAndStore(42);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 42);
EXPECT_EQ(lock.lockAndLoad(1), 0b00000101);
// Mask out only the slot that is being unlocked EXPECT_EQ(lock.lockAndLoad(), 42);
lock.unlockAndStore( lock.unlock();
1, ~MicroLock::slotMask<0>(), std::numeric_limits<uint8_t>::max()); EXPECT_EQ(lock.load(std::memory_order_relaxed), 42);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11110001);
lock.init();
lock.lock(0);
{
MicroLock::LockGuardWithDataSlots<2, 3> guard(lock, 1);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b00000101);
EXPECT_EQ(guard.loadedValue(), 0);
guard.storeValue(std::numeric_limits<uint8_t>::max());
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b00000101);
}
// Only slots 2 and 3 should be set
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11110001);
{
MicroLock::LockGuardWithDataSlots<2> guard(lock, 1);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11110101);
EXPECT_EQ(guard.loadedValue(), 0b00110000);
guard.storeValue(0);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11110101);
}
// Only slot 2 should be unset
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11000001);
lock.store(12, std::memory_order_relaxed);
{ {
MicroLock::LockGuardWithDataSlots<3> guard(lock, 1); MicroLock::LockGuardWithData guard{lock};
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11000101); EXPECT_EQ(guard.loadedValue(), 12);
EXPECT_EQ(guard.loadedValue(), 0b11000000); EXPECT_EQ(lock.load(std::memory_order_relaxed), 12);
guard.storeValue(24);
// Should not have immediate effect
EXPECT_EQ(guard.loadedValue(), 12);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 12);
} }
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b11000001); EXPECT_EQ(lock.load(std::memory_order_relaxed), 24);
// Do not modify unless something is stored
// Drop the two most significant bits
lock.lock();
lock.unlockAndStore(0b10011001);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b00011001);
lock.store(0b11101110, std::memory_order_relaxed);
EXPECT_EQ(lock.load(std::memory_order_relaxed), 0b00101110);
} }
namespace { namespace {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment