Commit 8091d719 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Kill FOLLY_ALIGNED etc

Summary:
[Folly] Kill `FOLLY_ALIGNED` etc.

`alignas` is standardized as of C++11. Let us just use that.

Replace:
* `FOLLY_ALIGNED` with `alignas`
* `FOLLY_ALIGNED_MAX` with `alignas(folly::max_align_v)`
* `FOLLY_ALIGN_TO_AVOID_FALSE_SHARING` with `alignas(folly::hardware_destructive_interference_size)`

Because where `alignas` may be placed is more restrictive than where attributes may be placed, we also need to move these directives in some cases on top of doing the replacement.

Reviewed By: Orvid

Differential Revision: D6555167

fbshipit-source-id: 4b05b570bace3f8c0fe810b6dd58781dd45757f4
parent 1166fe0e
...@@ -86,7 +86,7 @@ template <> const char *const MaxString<__uint128_t>::value = ...@@ -86,7 +86,7 @@ template <> const char *const MaxString<__uint128_t>::value =
// still not overflow uint16_t. // still not overflow uint16_t.
constexpr int32_t OOR = 10000; constexpr int32_t OOR = 10000;
FOLLY_ALIGNED(16) constexpr uint16_t shift1[] = { alignas(16) constexpr uint16_t shift1[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
...@@ -115,7 +115,7 @@ FOLLY_ALIGNED(16) constexpr uint16_t shift1[] = { ...@@ -115,7 +115,7 @@ FOLLY_ALIGNED(16) constexpr uint16_t shift1[] = {
OOR, OOR, OOR, OOR, OOR, OOR // 250 OOR, OOR, OOR, OOR, OOR, OOR // 250
}; };
FOLLY_ALIGNED(16) constexpr uint16_t shift10[] = { alignas(16) constexpr uint16_t shift10[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
...@@ -144,7 +144,7 @@ FOLLY_ALIGNED(16) constexpr uint16_t shift10[] = { ...@@ -144,7 +144,7 @@ FOLLY_ALIGNED(16) constexpr uint16_t shift10[] = {
OOR, OOR, OOR, OOR, OOR, OOR // 250 OOR, OOR, OOR, OOR, OOR, OOR // 250
}; };
FOLLY_ALIGNED(16) constexpr uint16_t shift100[] = { alignas(16) constexpr uint16_t shift100[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
...@@ -173,7 +173,7 @@ FOLLY_ALIGNED(16) constexpr uint16_t shift100[] = { ...@@ -173,7 +173,7 @@ FOLLY_ALIGNED(16) constexpr uint16_t shift100[] = {
OOR, OOR, OOR, OOR, OOR, OOR // 250 OOR, OOR, OOR, OOR, OOR, OOR // 250
}; };
FOLLY_ALIGNED(16) constexpr uint16_t shift1000[] = { alignas(16) constexpr uint16_t shift1000[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20 OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
......
...@@ -303,7 +303,7 @@ inline uint32_t digits10(uint64_t v) { ...@@ -303,7 +303,7 @@ inline uint32_t digits10(uint64_t v) {
// 10^i, defined for i 0 through 19. // 10^i, defined for i 0 through 19.
// This is 20 * 8 == 160 bytes, which fits neatly into 5 cache lines // This is 20 * 8 == 160 bytes, which fits neatly into 5 cache lines
// (assuming a cache line size of 64). // (assuming a cache line size of 64).
static const uint64_t powersOf10[20] FOLLY_ALIGNED(64) = { alignas(64) static const uint64_t powersOf10[20] = {
1, 1,
10, 10,
100, 100,
......
...@@ -351,7 +351,7 @@ struct IndexedMemPool : boost::noncopyable { ...@@ -351,7 +351,7 @@ struct IndexedMemPool : boost::noncopyable {
} }
}; };
struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING LocalList { struct alignas(hardware_destructive_interference_size) LocalList {
AtomicStruct<TaggedPtr,Atom> head; AtomicStruct<TaggedPtr,Atom> head;
LocalList() : head(TaggedPtr{}) {} LocalList() : head(TaggedPtr{}) {}
...@@ -377,7 +377,7 @@ struct IndexedMemPool : boost::noncopyable { ...@@ -377,7 +377,7 @@ struct IndexedMemPool : boost::noncopyable {
/// raw storage, only 1..min(size_,actualCapacity_) (inclusive) are /// raw storage, only 1..min(size_,actualCapacity_) (inclusive) are
/// actually constructed. Note that slots_[0] is not constructed or used /// actually constructed. Note that slots_[0] is not constructed or used
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Slot* slots_; alignas(hardware_destructive_interference_size) Slot* slots_;
/// use AccessSpreader to find your list. We use stripes instead of /// use AccessSpreader to find your list. We use stripes instead of
/// thread-local to avoid the need to grow or shrink on thread start /// thread-local to avoid the need to grow or shrink on thread start
...@@ -386,7 +386,8 @@ struct IndexedMemPool : boost::noncopyable { ...@@ -386,7 +386,8 @@ struct IndexedMemPool : boost::noncopyable {
/// this is the head of a list of node chained by globalNext, that are /// this is the head of a list of node chained by globalNext, that are
/// themselves each the head of a list chained by localNext /// themselves each the head of a list chained by localNext
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING AtomicStruct<TaggedPtr,Atom> globalHead_; alignas(hardware_destructive_interference_size)
AtomicStruct<TaggedPtr, Atom> globalHead_;
///////////// private methods ///////////// private methods
......
...@@ -981,7 +981,7 @@ class MPMCQueueBase<Derived<T, Atom, Dynamic>> : boost::noncopyable { ...@@ -981,7 +981,7 @@ class MPMCQueueBase<Derived<T, Atom, Dynamic>> : boost::noncopyable {
}; };
/// The maximum number of items in the queue at once /// The maximum number of items in the queue at once
size_t FOLLY_ALIGN_TO_AVOID_FALSE_SHARING capacity_; alignas(hardware_destructive_interference_size) size_t capacity_;
/// Anonymous union for use when Dynamic = false and true, respectively /// Anonymous union for use when Dynamic = false and true, respectively
union { union {
...@@ -1014,18 +1014,19 @@ class MPMCQueueBase<Derived<T, Atom, Dynamic>> : boost::noncopyable { ...@@ -1014,18 +1014,19 @@ class MPMCQueueBase<Derived<T, Atom, Dynamic>> : boost::noncopyable {
Atom<size_t> dcapacity_; Atom<size_t> dcapacity_;
/// Enqueuers get tickets from here /// Enqueuers get tickets from here
Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushTicket_; alignas(hardware_destructive_interference_size) Atom<uint64_t> pushTicket_;
/// Dequeuers get tickets from here /// Dequeuers get tickets from here
Atom<uint64_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popTicket_; alignas(hardware_destructive_interference_size) Atom<uint64_t> popTicket_;
/// This is how many times we will spin before using FUTEX_WAIT when /// This is how many times we will spin before using FUTEX_WAIT when
/// the queue is full on enqueue, adaptively computed by occasionally /// the queue is full on enqueue, adaptively computed by occasionally
/// spinning for longer and smoothing with an exponential moving average /// spinning for longer and smoothing with an exponential moving average
Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_; alignas(
hardware_destructive_interference_size) Atom<uint32_t> pushSpinCutoff_;
/// The adaptive spin cutoff when the queue is empty on dequeue /// The adaptive spin cutoff when the queue is empty on dequeue
Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_; alignas(hardware_destructive_interference_size) Atom<uint32_t> popSpinCutoff_;
/// Alignment doesn't prevent false sharing at the end of the struct, /// Alignment doesn't prevent false sharing at the end of the struct,
/// so fill out the last cache line /// so fill out the last cache line
......
...@@ -34,14 +34,6 @@ constexpr bool kHasUnalignedAccess = false; ...@@ -34,14 +34,6 @@ constexpr bool kHasUnalignedAccess = false;
// compiler specific attribute translation // compiler specific attribute translation
// msvc should come first, so if clang is in msvc mode it gets the right defines // msvc should come first, so if clang is in msvc mode it gets the right defines
#if defined(__clang__) || defined(__GNUC__)
# define FOLLY_ALIGNED(size) __attribute__((__aligned__(size)))
#elif defined(_MSC_VER)
# define FOLLY_ALIGNED(size) __declspec(align(size))
#else
# error Cannot define FOLLY_ALIGNED on this platform
#endif
// NOTE: this will only do checking in msvc with versions that support /analyze // NOTE: this will only do checking in msvc with versions that support /analyze
#if _MSC_VER #if _MSC_VER
# ifdef _USE_ATTRIBUTES_FOR_SAL # ifdef _USE_ATTRIBUTES_FOR_SAL
......
...@@ -177,8 +177,10 @@ struct ProducerConsumerQueue { ...@@ -177,8 +177,10 @@ struct ProducerConsumerQueue {
const uint32_t size_; const uint32_t size_;
T* const records_; T* const records_;
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<unsigned int> readIndex_; alignas(hardware_destructive_interference_size)
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<unsigned int> writeIndex_; std::atomic<unsigned int> readIndex_;
alignas(hardware_destructive_interference_size)
std::atomic<unsigned int> writeIndex_;
char pad1_[hardware_destructive_interference_size - sizeof(writeIndex_)]; char pad1_[hardware_destructive_interference_size - sizeof(writeIndex_)];
}; };
......
...@@ -738,9 +738,8 @@ class SharedMutexImpl { ...@@ -738,9 +738,8 @@ class SharedMutexImpl {
typedef Atom<uintptr_t> DeferredReaderSlot; typedef Atom<uintptr_t> DeferredReaderSlot;
private: private:
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING static DeferredReaderSlot deferredReaders alignas(hardware_destructive_interference_size) static DeferredReaderSlot
[kMaxDeferredReaders * deferredReaders[kMaxDeferredReaders * kDeferredSeparationFactor];
kDeferredSeparationFactor];
// Performs an exclusive lock, waiting for state_ & waitMask to be // Performs an exclusive lock, waiting for state_ & waitMask to be
// zero first // zero first
...@@ -1350,11 +1349,11 @@ template < ...@@ -1350,11 +1349,11 @@ template <
typename Tag_, typename Tag_,
template <typename> class Atom, template <typename> class Atom,
bool BlockImmediately> bool BlockImmediately>
typename SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: alignas(hardware_destructive_interference_size)
DeferredReaderSlot typename SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>:: DeferredReaderSlot
deferredReaders[kMaxDeferredReaders * kDeferredSeparationFactor] = SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
{}; deferredReaders[kMaxDeferredReaders * kDeferredSeparationFactor] = {};
template < template <
bool ReaderPriority, bool ReaderPriority,
......
...@@ -221,7 +221,7 @@ class ParameterizedDynamicTokenBucket { ...@@ -221,7 +221,7 @@ class ParameterizedDynamicTokenBucket {
return true; return true;
} }
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<double> zeroTime_; alignas(hardware_destructive_interference_size) std::atomic<double> zeroTime_;
}; };
/** /**
......
...@@ -118,12 +118,6 @@ struct CacheLocality { ...@@ -118,12 +118,6 @@ struct CacheLocality {
static CacheLocality uniform(size_t numCpus); static CacheLocality uniform(size_t numCpus);
}; };
// TODO replace with alignas(hardware_destructive_interference_size)
/// An attribute that will cause a variable or field to be aligned so that
/// it doesn't have false sharing with anything at a smaller memory address.
#define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
/// Knows how to derive a function pointer to the VDSO implementation of /// Knows how to derive a function pointer to the VDSO implementation of
/// getcpu(2), if available /// getcpu(2), if available
struct Getcpu { struct Getcpu {
......
...@@ -648,8 +648,7 @@ class UnboundedQueue { ...@@ -648,8 +648,7 @@ class UnboundedQueue {
Atom<Segment*> next_; Atom<Segment*> next_;
const Ticket min_; const Ticket min_;
bool marked_; // used for iterative deletion bool marked_; // used for iterative deletion
FOLLY_ALIGNED(Align) alignas(Align) Entry b_[SegmentSize];
Entry b_[SegmentSize];
public: public:
explicit Segment(const Ticket t) explicit Segment(const Ticket t)
......
...@@ -197,7 +197,7 @@ template < ...@@ -197,7 +197,7 @@ template <
typename Allocator = std::allocator<uint8_t>, typename Allocator = std::allocator<uint8_t>,
template <typename> class Atom = std::atomic, template <typename> class Atom = std::atomic,
class Mutex = std::mutex> class Mutex = std::mutex>
class FOLLY_ALIGNED(64) ConcurrentHashMapSegment { class alignas(64) ConcurrentHashMapSegment {
enum class InsertType { enum class InsertType {
DOES_NOT_EXIST, // insert/emplace operations. If key exists, return false. DOES_NOT_EXIST, // insert/emplace operations. If key exists, return false.
MUST_EXIST, // assign operations. If key does not exist, return false. MUST_EXIST, // assign operations. If key does not exist, return false.
......
...@@ -73,7 +73,8 @@ class IOThreadPoolExecutor : public ThreadPoolExecutor, public IOExecutor { ...@@ -73,7 +73,8 @@ class IOThreadPoolExecutor : public ThreadPoolExecutor, public IOExecutor {
folly::EventBaseManager* getEventBaseManager(); folly::EventBaseManager* getEventBaseManager();
private: private:
struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING IOThread : public Thread { struct alignas(hardware_destructive_interference_size) IOThread
: public Thread {
IOThread(IOThreadPoolExecutor* pool) IOThread(IOThreadPoolExecutor* pool)
: Thread(pool), shouldRun(true), pendingTasks(0) {} : Thread(pool), shouldRun(true), pendingTasks(0) {}
std::atomic<bool> shouldRun; std::atomic<bool> shouldRun;
......
...@@ -129,7 +129,8 @@ class ThreadPoolExecutor : public virtual folly::Executor { ...@@ -129,7 +129,8 @@ class ThreadPoolExecutor : public virtual folly::Executor {
struct TaskStatsCallbackRegistry; struct TaskStatsCallbackRegistry;
struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Thread : public ThreadHandle { struct alignas(hardware_destructive_interference_size) Thread
: public ThreadHandle {
explicit Thread(ThreadPoolExecutor* pool) explicit Thread(ThreadPoolExecutor* pool)
: id(nextId++), : id(nextId++),
handle(), handle(),
......
...@@ -112,8 +112,8 @@ class FlatCombining { ...@@ -112,8 +112,8 @@ class FlatCombining {
public: public:
/// Combining request record. /// Combining request record.
class Rec { class Rec {
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING alignas(hardware_destructive_interference_size)
folly::SaturatingSemaphore<false, Atom> valid_; folly::SaturatingSemaphore<false, Atom> valid_;
folly::SaturatingSemaphore<false, Atom> done_; folly::SaturatingSemaphore<false, Atom> done_;
folly::SaturatingSemaphore<false, Atom> disconnected_; folly::SaturatingSemaphore<false, Atom> disconnected_;
size_t index_; size_t index_;
...@@ -421,23 +421,20 @@ class FlatCombining { ...@@ -421,23 +421,20 @@ class FlatCombining {
const uint64_t kDefaultNumRecs = 64; const uint64_t kDefaultNumRecs = 64;
const uint64_t kIdleThreshold = 10; const uint64_t kIdleThreshold = 10;
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING alignas(hardware_destructive_interference_size) Mutex m_;
Mutex m_;
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING alignas(hardware_destructive_interference_size)
folly::SaturatingSemaphore<true, Atom> pending_; folly::SaturatingSemaphore<true, Atom> pending_;
Atom<bool> shutdown_{false}; Atom<bool> shutdown_{false};
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING alignas(hardware_destructive_interference_size) uint32_t numRecs_;
uint32_t numRecs_;
uint32_t maxOps_; uint32_t maxOps_;
Atom<size_t> recs_; Atom<size_t> recs_;
bool dedicated_; bool dedicated_;
std::thread combiner_; std::thread combiner_;
Pool recsPool_; Pool recsPool_;
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING alignas(hardware_destructive_interference_size) uint64_t uncombined_ = 0;
uint64_t uncombined_ = 0;
uint64_t combined_ = 0; uint64_t combined_ = 0;
uint64_t passes_ = 0; uint64_t passes_ = 0;
uint64_t sessions_ = 0; uint64_t sessions_ = 0;
......
...@@ -25,8 +25,7 @@ ...@@ -25,8 +25,7 @@
namespace folly { namespace folly {
struct Line { struct alignas(hardware_destructive_interference_size) Line {
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
uint64_t val_; uint64_t val_;
}; };
......
...@@ -265,12 +265,11 @@ inline bool hazptr_obj_base_refcounted<T, D>::release_ref() { ...@@ -265,12 +265,11 @@ inline bool hazptr_obj_base_refcounted<T, D>::release_ref() {
* hazptr_rec * hazptr_rec
*/ */
class hazptr_rec { class alignas(hardware_destructive_interference_size) hazptr_rec {
friend class hazptr_domain; friend class hazptr_domain;
friend class hazptr_holder; friend class hazptr_holder;
friend struct hazptr_tc_entry; friend struct hazptr_tc_entry;
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
std::atomic<const void*> hazptr_{nullptr}; std::atomic<const void*> hazptr_{nullptr};
hazptr_rec* next_{nullptr}; hazptr_rec* next_{nullptr};
std::atomic<bool> active_{false}; std::atomic<bool> active_{false};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment