Commit eecfd465 authored by Orvid King's avatar Orvid King Committed by Facebook GitHub Bot

Spellcheck

Summary: Spellcheck everything in the root and docs directory.

Reviewed By: yfeldblum

Differential Revision: D31228715

fbshipit-source-id: 3c5ace5d8879fbca17ebacf4fb6edee5e28a1b42
parent bc254c2a
...@@ -199,7 +199,7 @@ class AtomicHashArray { ...@@ -199,7 +199,7 @@ class AtomicHashArray {
* allowed to be different from the type of keys actually stored (KeyT). * allowed to be different from the type of keys actually stored (KeyT).
* *
* This enables use cases where materializing the key is costly and usually * This enables use cases where materializing the key is costly and usually
* redudant, e.g., canonicalizing/interning a set of strings and being able * redundant, e.g., canonicalizing/interning a set of strings and being able
* to look up by StringPiece. To use this feature, LookupHashFcn must take * to look up by StringPiece. To use this feature, LookupHashFcn must take
* a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first
* and second parameter, respectively. * and second parameter, respectively.
......
...@@ -275,7 +275,7 @@ class AtomicHashMap { ...@@ -275,7 +275,7 @@ class AtomicHashMap {
* allowed to be different from the type of keys actually stored (KeyT). * allowed to be different from the type of keys actually stored (KeyT).
* *
* This enables use cases where materializing the key is costly and usually * This enables use cases where materializing the key is costly and usually
* redudant, e.g., canonicalizing/interning a set of strings and being able * redundant, e.g., canonicalizing/interning a set of strings and being able
* to look up by StringPiece. To use this feature, LookupHashFcn must take * to look up by StringPiece. To use this feature, LookupHashFcn must take
* a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first
* and second parameter, respectively. * and second parameter, respectively.
......
...@@ -395,7 +395,7 @@ struct AtomicUnorderedInsertMap { ...@@ -395,7 +395,7 @@ struct AtomicUnorderedInsertMap {
size_t mmapRequested_; size_t mmapRequested_;
size_t numSlots_; size_t numSlots_;
/// tricky, see keyToSlodIdx /// tricky, see keyToSlotIdx
size_t slotMask_; size_t slotMask_;
Allocator allocator_; Allocator allocator_;
......
...@@ -164,7 +164,7 @@ bool CancellationState::requestCancellation() noexcept { ...@@ -164,7 +164,7 @@ bool CancellationState::requestCancellation() noexcept {
// This was the last item in the queue when we dequeued it. // This was the last item in the queue when we dequeued it.
// No more items should be added to the queue after we have // No more items should be added to the queue after we have
// marked the state as cancelled, only removed from the queue. // marked the state as cancelled, only removed from the queue.
// Avoid acquring/releasing the lock in this case. // Avoid acquiring/releasing the lock in this case.
return false; return false;
} }
......
...@@ -189,7 +189,7 @@ struct system_clock_spec {}; ...@@ -189,7 +189,7 @@ struct system_clock_spec {};
// //
// Detects and reexports per-clock traits. // Detects and reexports per-clock traits.
// //
// Specializable for clocks for which trait detection fails.. // Specializeable for clocks for which trait detection fails..
template <typename Clock> template <typename Clock>
struct clock_traits { struct clock_traits {
private: private:
......
...@@ -144,7 +144,7 @@ template < ...@@ -144,7 +144,7 @@ template <
int MAX_HEIGHT = 24> int MAX_HEIGHT = 24>
class ConcurrentSkipList { class ConcurrentSkipList {
// MAX_HEIGHT needs to be at least 2 to suppress compiler // MAX_HEIGHT needs to be at least 2 to suppress compiler
// warnings/errors (Werror=uninitialized tiggered due to preds_[1] // warnings/errors (Werror=uninitialized triggered due to preds_[1]
// being treated as a scalar in the compiler). // being treated as a scalar in the compiler).
static_assert( static_assert(
MAX_HEIGHT >= 2 && MAX_HEIGHT < 64, MAX_HEIGHT >= 2 && MAX_HEIGHT < 64,
...@@ -268,7 +268,7 @@ class ConcurrentSkipList { ...@@ -268,7 +268,7 @@ class ConcurrentSkipList {
} }
// lock all the necessary nodes for changing (adding or removing) the list. // lock all the necessary nodes for changing (adding or removing) the list.
// returns true if all the lock acquried successfully and the related nodes // returns true if all the lock acquired successfully and the related nodes
// are all validate (not in certain pending states), false otherwise. // are all validate (not in certain pending states), false otherwise.
bool lockNodesForChange( bool lockNodesForChange(
int nodeHeight, int nodeHeight,
...@@ -303,7 +303,7 @@ class ConcurrentSkipList { ...@@ -303,7 +303,7 @@ class ConcurrentSkipList {
// It could be either the newly added data, or the existed data in the // It could be either the newly added data, or the existed data in the
// list with the same key. // list with the same key.
// pair.second stores whether the data is added successfully: // pair.second stores whether the data is added successfully:
// 0 means not added, otherwise reutrns the new size. // 0 means not added, otherwise returns the new size.
template <typename U> template <typename U>
std::pair<NodeType*, size_t> addOrGetData(U&& data) { std::pair<NodeType*, size_t> addOrGetData(U&& data) {
NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT]; NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT];
...@@ -448,7 +448,7 @@ class ConcurrentSkipList { ...@@ -448,7 +448,7 @@ class ConcurrentSkipList {
// Find node by first stepping down then stepping right. Based on benchmark // Find node by first stepping down then stepping right. Based on benchmark
// results, this is slightly faster than findNodeRightDown for better // results, this is slightly faster than findNodeRightDown for better
// localality on the skipping pointers. // locality on the skipping pointers.
std::pair<NodeType*, int> findNodeDownRight(const value_type& data) const { std::pair<NodeType*, int> findNodeDownRight(const value_type& data) const {
NodeType* pred = head_.load(std::memory_order_acquire); NodeType* pred = head_.load(std::memory_order_acquire);
int ht = pred->height(); int ht = pred->height();
...@@ -561,7 +561,7 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor { ...@@ -561,7 +561,7 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
} }
// Unsafe initializer: the caller assumes the responsibility to keep // Unsafe initializer: the caller assumes the responsibility to keep
// skip_list valid during the whole life cycle of the Acessor. // skip_list valid during the whole life cycle of the Accessor.
explicit Accessor(ConcurrentSkipList* skip_list) : sl_(skip_list) { explicit Accessor(ConcurrentSkipList* skip_list) : sl_(skip_list) {
DCHECK(sl_ != nullptr); DCHECK(sl_ != nullptr);
sl_->recycler_.addRef(); sl_->recycler_.addRef();
......
...@@ -64,7 +64,7 @@ namespace folly { ...@@ -64,7 +64,7 @@ namespace folly {
// //
// Implementation/Overhead Notes: // Implementation/Overhead Notes:
// //
// By design, adding ConstructorCallback() to an object shoud be very light // By design, adding ConstructorCallback() to an object should be very light
// weight. From a memory context, this adds 1 byte of memory to the parent // weight. From a memory context, this adds 1 byte of memory to the parent
// class. From a CPU/performance perspective, the constructor does a load of an // class. From a CPU/performance perspective, the constructor does a load of an
// atomic int and the cost of the actual callbacks themselves. So if this // atomic int and the cost of the actual callbacks themselves. So if this
......
...@@ -792,7 +792,7 @@ typename std::enable_if< ...@@ -792,7 +792,7 @@ typename std::enable_if<
toAppendDelimStrImpl(const Delimiter& delim, const T& v, const Ts&... vs) { toAppendDelimStrImpl(const Delimiter& delim, const T& v, const Ts&... vs) {
// we are really careful here, calling toAppend with just one element does // we are really careful here, calling toAppend with just one element does
// not try to estimate space needed (as we already did that). If we call // not try to estimate space needed (as we already did that). If we call
// toAppend(v, delim, ....) we would do unnecesary size calculation // toAppend(v, delim, ....) we would do unnecessary size calculation
toAppend(v, detail::getLastElement(vs...)); toAppend(v, detail::getLastElement(vs...));
toAppend(delim, detail::getLastElement(vs...)); toAppend(delim, detail::getLastElement(vs...));
toAppendDelimStrImpl(delim, vs...); toAppendDelimStrImpl(delim, vs...);
...@@ -841,7 +841,7 @@ void toAppend(const pid_t a, Tgt* res) { ...@@ -841,7 +841,7 @@ void toAppend(const pid_t a, Tgt* res) {
#endif #endif
/** /**
* Special version of the call that preallocates exaclty as much memory * Special version of the call that preallocates exactly as much memory
* as need for arguments to be stored in target. This means we are * as need for arguments to be stored in target. This means we are
* not doing exponential growth when we append. If you are using it * not doing exponential growth when we append. If you are using it
* in a loop you are aiming at your foot with a big perf-destroying * in a loop you are aiming at your foot with a big perf-destroying
...@@ -1482,7 +1482,7 @@ inline ...@@ -1482,7 +1482,7 @@ inline
/** /**
* tryTo/to that take the strings by pointer so the caller gets information * tryTo/to that take the strings by pointer so the caller gets information
* about how much of the string was consumed by the conversion. These do not * about how much of the string was consumed by the conversion. These do not
* check for trailing whitepsace. * check for trailing whitespace.
*/ */
template <class Tgt> template <class Tgt>
Expected<Tgt, detail::ParseToError<Tgt>> tryTo(StringPiece* src) { Expected<Tgt, detail::ParseToError<Tgt>> tryTo(StringPiece* src) {
......
...@@ -32,7 +32,7 @@ extern bool const demangle_build_has_cxxabi; ...@@ -32,7 +32,7 @@ extern bool const demangle_build_has_cxxabi;
extern bool const demangle_build_has_liberty; extern bool const demangle_build_has_liberty;
/** /**
* Return the demangled (prettyfied) version of a C++ type. * Return the demangled (prettified) version of a C++ type.
* *
* This function tries to produce a human-readable type, but the type name will * This function tries to produce a human-readable type, but the type name will
* be returned unchanged in case of error or if demangling isn't supported on * be returned unchanged in case of error or if demangling isn't supported on
...@@ -48,7 +48,7 @@ inline fbstring demangle(const std::type_info& type) { ...@@ -48,7 +48,7 @@ inline fbstring demangle(const std::type_info& type) {
} }
/** /**
* Return the demangled (prettyfied) version of a C++ type in a user-provided * Return the demangled (prettified) version of a C++ type in a user-provided
* buffer. * buffer.
* *
* The semantics are the same as for snprintf or strlcpy: bufSize is the size * The semantics are the same as for snprintf or strlcpy: bufSize is the size
......
...@@ -76,7 +76,7 @@ namespace folly { ...@@ -76,7 +76,7 @@ namespace folly {
//! convenience and high performance use cases. `make_exception_wrapper` is //! convenience and high performance use cases. `make_exception_wrapper` is
//! templated on derived type, allowing us to rethrow the exception properly for //! templated on derived type, allowing us to rethrow the exception properly for
//! users that prefer convenience. These explicitly named exception types can //! users that prefer convenience. These explicitly named exception types can
//! therefore be handled without any peformance penalty. `exception_wrapper` is //! therefore be handled without any performance penalty. `exception_wrapper` is
//! also flexible enough to accept any type. If a caught exception is not of an //! also flexible enough to accept any type. If a caught exception is not of an
//! explicitly named type, then `std::exception_ptr` is used to preserve the //! explicitly named type, then `std::exception_ptr` is used to preserve the
//! exception state. For performance sensitive applications, the accessor //! exception state. For performance sensitive applications, the accessor
...@@ -528,7 +528,7 @@ class exception_wrapper final { ...@@ -528,7 +528,7 @@ class exception_wrapper final {
//! \endcode //! \endcode
//! In the above example, any exception _not_ derived from `std::exception` //! In the above example, any exception _not_ derived from `std::exception`
//! will be propagated. To specify a catch-all clause, pass a lambda that //! will be propagated. To specify a catch-all clause, pass a lambda that
//! takes a C-style elipses, as in: //! takes a C-style ellipses, as in:
//! \code //! \code
//! ew.handle(/*...* /, [](...) { /* handle unknown exception */ } ) //! ew.handle(/*...* /, [](...) { /* handle unknown exception */ } )
//! \endcode //! \endcode
......
...@@ -462,7 +462,7 @@ class fbvector { ...@@ -462,7 +462,7 @@ class fbvector {
// uninitialized_copy // uninitialized_copy
// it is possible to add an optimization for the case where // it is possible to add an optimization for the case where
// It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T> // It = move(T*) and IsRelocatable<T> and Is0Initializeable<T>
// wrappers // wrappers
template <typename It> template <typename It>
......
...@@ -85,8 +85,8 @@ int dupNoInt(int fd) { ...@@ -85,8 +85,8 @@ int dupNoInt(int fd) {
return int(wrapNoInt(dup, fd)); return int(wrapNoInt(dup, fd));
} }
int dup2NoInt(int oldfd, int newfd) { int dup2NoInt(int oldFd, int newFd) {
return int(wrapNoInt(dup2, oldfd, newfd)); return int(wrapNoInt(dup2, oldFd, newFd));
} }
int fdatasyncNoInt(int fd) { int fdatasyncNoInt(int fd) {
......
...@@ -43,7 +43,7 @@ int openNoInt(const char* name, int flags, mode_t mode = 0666); ...@@ -43,7 +43,7 @@ int openNoInt(const char* name, int flags, mode_t mode = 0666);
int closeNoInt(int fd); int closeNoInt(int fd);
int closeNoInt(NetworkSocket fd); int closeNoInt(NetworkSocket fd);
int dupNoInt(int fd); int dupNoInt(int fd);
int dup2NoInt(int oldfd, int newfd); int dup2NoInt(int oldFd, int newFd);
int fsyncNoInt(int fd); int fsyncNoInt(int fd);
int fdatasyncNoInt(int fd); int fdatasyncNoInt(int fd);
int ftruncateNoInt(int fd, off_t len); int ftruncateNoInt(int fd, off_t len);
......
...@@ -91,7 +91,7 @@ constexpr poly_table<Deg> make_poly_table() { ...@@ -91,7 +91,7 @@ constexpr poly_table<Deg> make_poly_table() {
uint64_t table[8][256][poly_size(Deg)] = {}; uint64_t table[8][256][poly_size(Deg)] = {};
// table[i][q] is Q(X) * X^(k+8*i) mod P(X), // table[i][q] is Q(X) * X^(k+8*i) mod P(X),
// where k is the number of bits in the fingerprint (and deg(P)) and // where k is the number of bits in the fingerprint (and deg(P)) and
// Q(X) = q7*X^7 + q6*X^6 + ... + q1*X + q0 is a degree-7 polyonomial // Q(X) = q7*X^7 + q6*X^6 + ... + q1*X + q0 is a degree-7 polynomial
// whose coefficients are the bits of q. // whose coefficients are the bits of q.
for (uint16_t x = 0; x < 256; x++) { for (uint16_t x = 0; x < 256; x++) {
FingerprintPolynomial<Deg> t; FingerprintPolynomial<Deg> t;
......
...@@ -153,7 +153,7 @@ class Fingerprint { ...@@ -153,7 +153,7 @@ class Fingerprint {
constexpr static int size() { return detail::poly_size(BITS); } constexpr static int size() { return detail::poly_size(BITS); }
/** /**
* Write the computed fingeprint to an array of size() uint64_t's. * Write the computed fingerprint to an array of size() uint64_t's.
* For Fingerprint<64>, size()==1; we write 64 bits in out[0] * For Fingerprint<64>, size()==1; we write 64 bits in out[0]
* For Fingerprint<96>, size()==2; we write 64 bits in out[0] and * For Fingerprint<96>, size()==2; we write 64 bits in out[0] and
* the most significant 32 bits of out[1] * the most significant 32 bits of out[1]
......
...@@ -61,7 +61,7 @@ class FormatterTag {}; ...@@ -61,7 +61,7 @@ class FormatterTag {};
*/ */
/* BaseFormatter class. /* BaseFormatter class.
* Overridable behaviours: * Overridable behaviors:
* You may override the actual formatting of positional parameters in * You may override the actual formatting of positional parameters in
* `doFormatArg`. The Formatter class provides the default implementation. * `doFormatArg`. The Formatter class provides the default implementation.
* *
......
...@@ -403,7 +403,7 @@ uint8_t IPAddressV6::getMulticastScope() const { ...@@ -403,7 +403,7 @@ uint8_t IPAddressV6::getMulticastScope() const {
} }
IPAddressV6 IPAddressV6::getSolicitedNodeAddress() const { IPAddressV6 IPAddressV6::getSolicitedNodeAddress() const {
// Solicted node addresses must be constructed from unicast (or anycast) // Solicited node addresses must be constructed from unicast (or anycast)
// addresses // addresses
DCHECK(!isMulticast()); DCHECK(!isMulticast());
......
...@@ -65,7 +65,7 @@ class Indestructible final { ...@@ -65,7 +65,7 @@ class Indestructible final {
/** /**
* Constructor accepting a single argument by forwarding reference, this * Constructor accepting a single argument by forwarding reference, this
* allows using list initialzation without the overhead of things like * allows using list initialization without the overhead of things like
* in_place, etc and also works with std::initializer_list constructors * in_place, etc and also works with std::initializer_list constructors
* which can't be deduced, the default parameter helps there. * which can't be deduced, the default parameter helps there.
* *
......
...@@ -563,7 +563,7 @@ class MPMCQueue<T, Atom, true> ...@@ -563,7 +563,7 @@ class MPMCQueue<T, Atom, true>
this->dstate_.store((ticket << kSeqlockBits) + (2 * (index + 1))); this->dstate_.store((ticket << kSeqlockBits) + (2 * (index + 1)));
return true; return true;
} else { // failed to acquire seqlock } else { // failed to acquire seqlock
// Someone acaquired the seqlock. Go back to the caller and get // Someone acquired the seqlock. Go back to the caller and get
// up-to-date info. // up-to-date info.
return true; return true;
} }
...@@ -999,7 +999,7 @@ class MPMCQueueBase<Derived<T, Atom, Dynamic>> { ...@@ -999,7 +999,7 @@ class MPMCQueueBase<Derived<T, Atom, Dynamic>> {
Atom<int> dstride_; Atom<int> dstride_;
}; };
/// The following two memebers are used by dynamic MPMCQueue. /// The following two members are used by dynamic MPMCQueue.
/// Ideally they should be in MPMCQueue<T,Atom,true>, but we get /// Ideally they should be in MPMCQueue<T,Atom,true>, but we get
/// better cache locality if they are in the same cache line as /// better cache locality if they are in the same cache line as
/// dslots_ and dstride_. /// dslots_ and dstride_.
......
...@@ -264,7 +264,7 @@ Iterator<typename Container::iterator> end(Container& c) { ...@@ -264,7 +264,7 @@ Iterator<typename Container::iterator> end(Container& c) {
* *
* Converts a sequence of Node into a sequence of its underlying elements * Converts a sequence of Node into a sequence of its underlying elements
* (with enough functionality to make it useful, although it's not fully * (with enough functionality to make it useful, although it's not fully
* compatible with the STL containre requiremenets, see below). * compatible with the STL container requirements, see below).
* *
* Provides iterators (of the same category as those of the underlying * Provides iterators (of the same category as those of the underlying
* container), size(), front(), back(), push_back(), pop_back(), and const / * container), size(), front(), back(), push_back(), pop_back(), and const /
......
...@@ -297,7 +297,7 @@ template <std::size_t N, class I, typename... As> ...@@ -297,7 +297,7 @@ template <std::size_t N, class I, typename... As>
* \tparam T The (unqualified) type to which to cast the `Poly` object. * \tparam T The (unqualified) type to which to cast the `Poly` object.
* \tparam Poly The type of the `Poly` object. * \tparam Poly The type of the `Poly` object.
* \param that The `Poly` object to be cast. * \param that The `Poly` object to be cast.
* \return A reference to the `T` object stored in or refered to by `that`. * \return A reference to the `T` object stored in or referred to by `that`.
* \throw BadPolyAccess if `that` is empty. * \throw BadPolyAccess if `that` is empty.
* \throw BadPolyCast if `that` does not store or refer to an object of type * \throw BadPolyCast if `that` does not store or refer to an object of type
* `T`. * `T`.
...@@ -545,7 +545,7 @@ struct PolyVal : PolyImpl<I> { ...@@ -545,7 +545,7 @@ struct PolyVal : PolyImpl<I> {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/** /**
* The implementation of `Poly` for when the interface type is * The implementation of `Poly` for when the interface type is
* reference-quelified, like `Poly<SemuRegular &>`. * reference-qualified, like `Poly<SemiRegular &>`.
*/ */
template <class I> template <class I>
struct PolyRef : private PolyImpl<I> { struct PolyRef : private PolyImpl<I> {
...@@ -706,7 +706,7 @@ using PolyValOrRef = If<std::is_reference<I>::value, PolyRef<I>, PolyVal<I>>; ...@@ -706,7 +706,7 @@ using PolyValOrRef = If<std::is_reference<I>::value, PolyRef<I>, PolyVal<I>>;
* \li A *mapping* from a concrete type to a set of member function bindings. * \li A *mapping* from a concrete type to a set of member function bindings.
* *
* Below is a (heavily commented) example of a simple implementation of a * Below is a (heavily commented) example of a simple implementation of a
* `std::function`-like polymorphic wrapper. Its interface has only a simgle * `std::function`-like polymorphic wrapper. Its interface has only a single
* member function: `operator()` * member function: `operator()`
* *
* // An interface for a callable object of a particular signature, Fun * // An interface for a callable object of a particular signature, Fun
......
...@@ -905,7 +905,7 @@ class Range { ...@@ -905,7 +905,7 @@ class Range {
} }
/** /**
* Replaces all occurences of 'source' with 'dest'. Returns number * Replaces all occurrences of 'source' with 'dest'. Returns number
* of replacements made. Source and dest have to have the same * of replacements made. Source and dest have to have the same
* length. Throws if the lengths are different. If 'source' is a * length. Throws if the lengths are different. If 'source' is a
* pattern that is overlapping with itself, we perform sequential * pattern that is overlapping with itself, we perform sequential
...@@ -942,7 +942,7 @@ class Range { ...@@ -942,7 +942,7 @@ class Range {
/** /**
* Splits this `Range` `[b, e)` in the position `i` dictated by the next * Splits this `Range` `[b, e)` in the position `i` dictated by the next
* occurence of `delimiter`. * occurrence of `delimiter`.
* *
* Returns a new `Range` `[b, i)` and adjusts this range to start right after * Returns a new `Range` `[b, i)` and adjusts this range to start right after
* the delimiter's position. This range will be empty if the delimiter is not * the delimiter's position. This range will be empty if the delimiter is not
......
...@@ -1329,7 +1329,7 @@ class SharedMutexImpl : std::conditional_t< ...@@ -1329,7 +1329,7 @@ class SharedMutexImpl : std::conditional_t<
assert(state < state + kIncrHasS); assert(state < state + kIncrHasS);
} }
// It is straightfoward to make a token-less lock_shared() and // It is straightforward to make a token-less lock_shared() and
// unlock_shared() either by making the token-less version always use // unlock_shared() either by making the token-less version always use
// INLINE_SHARED mode or by removing the token version. Supporting // INLINE_SHARED mode or by removing the token version. Supporting
// deferred operation for both types is trickier than it appears, because // deferred operation for both types is trickier than it appears, because
......
...@@ -50,12 +50,12 @@ static void singleton_hs_init_weak(int* argc, char** argv[]) ...@@ -50,12 +50,12 @@ static void singleton_hs_init_weak(int* argc, char** argv[])
SingletonVault::Type SingletonVault::defaultVaultType() { SingletonVault::Type SingletonVault::defaultVaultType() {
#if FOLLY_SINGLETON_HAVE_DLSYM #if FOLLY_SINGLETON_HAVE_DLSYM
bool isPython = dlsym(RTLD_DEFAULT, "Py_Main"); bool isPython = dlsym(RTLD_DEFAULT, "Py_Main");
bool isHaskel = bool isHaskell =
detail::singleton_hs_init_weak || dlsym(RTLD_DEFAULT, "hs_init"); detail::singleton_hs_init_weak || dlsym(RTLD_DEFAULT, "hs_init");
bool isJVM = dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs"); bool isJVM = dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs");
bool isD = dlsym(RTLD_DEFAULT, "_d_run_main"); bool isD = dlsym(RTLD_DEFAULT, "_d_run_main");
return isPython || isHaskel || isJVM || isD ? Type::Relaxed : Type::Strict; return isPython || isHaskell || isJVM || isD ? Type::Relaxed : Type::Strict;
#else #else
return Type::Relaxed; return Type::Relaxed;
#endif #endif
......
...@@ -187,7 +187,7 @@ class SingletonVault; ...@@ -187,7 +187,7 @@ class SingletonVault;
namespace detail { namespace detail {
// A TypeDescriptor is the unique handle for a given singleton. It is // A TypeDescriptor is the unique handle for a given singleton. It is
// a combinaiton of the type and of the optional name, and is used as // a combination of the type and of the optional name, and is used as
// a key in unordered_maps. // a key in unordered_maps.
class TypeDescriptor { class TypeDescriptor {
public: public:
......
...@@ -233,7 +233,7 @@ const char* SocketAddress::getFamilyNameFrom( ...@@ -233,7 +233,7 @@ const char* SocketAddress::getFamilyNameFrom(
void SocketAddress::setFromPath(StringPiece path) { void SocketAddress::setFromPath(StringPiece path) {
// Before we touch storage_, check to see if the length is too big. // Before we touch storage_, check to see if the length is too big.
// Note that "storage_.un.addr->sun_path" may not be safe to evaluate here, // Note that "storage_.un.addr->sun_path" may not be safe to evaluate here,
// but sizeof() just uses its type, and does't evaluate it. // but sizeof() just uses its type, and doesn't evaluate it.
if (path.size() > sizeof(storage_.un.addr->sun_path)) { if (path.size() > sizeof(storage_.un.addr->sun_path)) {
throw std::invalid_argument( throw std::invalid_argument(
"socket path too large to fit into sockaddr_un"); "socket path too large to fit into sockaddr_un");
...@@ -434,7 +434,7 @@ void SocketAddress::setPort(uint16_t port) { ...@@ -434,7 +434,7 @@ void SocketAddress::setPort(uint16_t port) {
void SocketAddress::convertToIPv4() { void SocketAddress::convertToIPv4() {
if (!tryConvertToIPv4()) { if (!tryConvertToIPv4()) {
throw std::invalid_argument( throw std::invalid_argument(
"convertToIPv4() called on an addresse that is " "convertToIPv4() called on an address that is "
"not an IPv4-mapped address"); "not an IPv4-mapped address");
} }
} }
......
...@@ -556,7 +556,7 @@ void humanify(const String1& input, String2& output) { ...@@ -556,7 +556,7 @@ void humanify(const String1& input, String2& output) {
// hexlify doubles a string's size; backslashify can potentially // hexlify doubles a string's size; backslashify can potentially
// explode it by 4x. Now, the printable range of the ascii // explode it by 4x. Now, the printable range of the ascii
// "spectrum" is around 95 out of 256 values, so a "random" binary // "spectrum" is around 95 out of 256 values, so a "random" binary
// string should be around 60% unprintable. We use a 50% hueristic // string should be around 60% unprintable. We use a 50% heuristic
// here, so if a string is 60% unprintable, then we just use hex // here, so if a string is 60% unprintable, then we just use hex
// output. Otherwise we backslash. // output. Otherwise we backslash.
// //
......
...@@ -507,7 +507,7 @@ class Subprocess { ...@@ -507,7 +507,7 @@ class Subprocess {
#endif #endif
}; };
// Non-copiable, but movable // Non-copyable, but movable
Subprocess(const Subprocess&) = delete; Subprocess(const Subprocess&) = delete;
Subprocess& operator=(const Subprocess&) = delete; Subprocess& operator=(const Subprocess&) = delete;
Subprocess(Subprocess&&) = default; Subprocess(Subprocess&&) = default;
...@@ -907,7 +907,7 @@ class Subprocess { ...@@ -907,7 +907,7 @@ class Subprocess {
/** /**
* The child's pipes are logically separate from the process metadata * The child's pipes are logically separate from the process metadata
* (they may even be kept alive by the child's descendants). This call * (they may even be kept alive by the child's descendants). This call
* lets you manage the pipes' lifetime separetely from the lifetime of the * lets you manage the pipes' lifetime separately from the lifetime of the
* child process. * child process.
* *
* After this call, the Subprocess instance will have no knowledge of * After this call, the Subprocess instance will have no knowledge of
......
...@@ -158,7 +158,7 @@ inline in_place_index_tag<I> in_place_index(in_place_index_tag<I> = {}) { ...@@ -158,7 +158,7 @@ inline in_place_index_tag<I> in_place_index(in_place_index_tag<I> = {}) {
* class Something { * class Something {
* public: * public:
* explicit Something(int); * explicit Something(int);
* Something(std::intiializer_list<int>); * Something(std::initializer_list<int>);
* *
* operator int(); * operator int();
* }; * };
......
...@@ -116,7 +116,7 @@ numbers should be compared against some baseline. ...@@ -116,7 +116,7 @@ numbers should be compared against some baseline.
To support baseline-driven measurements, `folly/Benchmark.h` defines To support baseline-driven measurements, `folly/Benchmark.h` defines
`BENCHMARK_RELATIVE`, which works much like `BENCHMARK`, except it `BENCHMARK_RELATIVE`, which works much like `BENCHMARK`, except it
considers the most recent lexically-ocurring `BENCHMARK` a baseline, considers the most recent lexically-occurring `BENCHMARK` a baseline,
and fills the "relative" column. Say, for example, we want to use and fills the "relative" column. Say, for example, we want to use
front insertion for a vector as a baseline and see how back insertion front insertion for a vector as a baseline and see how back insertion
compares with it: compares with it:
......
...@@ -35,7 +35,7 @@ folly::dynamic;` was used): ...@@ -35,7 +35,7 @@ folly::dynamic;` was used):
map["something"] = 12; map["something"] = 12;
map["another_something"] = map["something"] * 2; map["another_something"] = map["something"] * 2;
// Dynamic objects may be intialized this way // Dynamic objects may be initialized this way
dynamic map2 = dynamic::object("something", 12)("another_something", 24); dynamic map2 = dynamic::object("something", 12)("another_something", 24);
``` ```
...@@ -139,7 +139,7 @@ here's what it looks like: ...@@ -139,7 +139,7 @@ here's what it looks like:
assert(parsed["key2"][0] == false); assert(parsed["key2"][0] == false);
assert(parsed["key2"][1] == nullptr); assert(parsed["key2"][1] == nullptr);
// Building the same document programatically. // Building the same document programmatically.
dynamic sonOfAJ = dynamic::object dynamic sonOfAJ = dynamic::object
("key", 12) ("key", 12)
("key2", dynamic::array(false, nullptr, true, "yay")); ("key2", dynamic::array(false, nullptr, true, "yay"));
......
...@@ -64,7 +64,7 @@ folly::Function<int(int) const> uf4 = Foo(); ...@@ -64,7 +64,7 @@ folly::Function<int(int) const> uf4 = Foo();
folly::Function<int(int, int) const> uf5 = Foo(); folly::Function<int(int, int) const> uf5 = Foo();
// uf5() returns 5 // uf5() returns 5
``` ```
If `cfoo` is a const-reference to a `Foo` object, `cfoo(int)` returns 4. If `foo` is a non-const reference to a `Foo` object, `foo(int)` returns 3. Normal const-to-non-const conversion behaviour applies: if you call `foo(int, int)` it will return 5: a non-const reference will invoke the const method if no non-const method is defined. Which leads to the following behaviour: If `cfoo` is a const-reference to a `Foo` object, `cfoo(int)` returns 4. If `foo` is a non-const reference to a `Foo` object, `foo(int)` returns 3. Normal const-to-non-const conversion behavior applies: if you call `foo(int, int)` it will return 5: a non-const reference will invoke the const method if no non-const method is defined. Which leads to the following behavior:
``` Cpp ``` Cpp
folly::Function<int(int, int)> uf5nc = Foo(); folly::Function<int(int, int)> uf5nc = Foo();
// uf5nc() returns 5 // uf5nc() returns 5
......
...@@ -175,7 +175,7 @@ Future<Unit> fut3 = std::move(fut2) ...@@ -175,7 +175,7 @@ Future<Unit> fut3 = std::move(fut2)
That example is a little contrived but the idea is that you can transform a result from one type to another, potentially in a chain, and unhandled errors propagate. Of course, the intermediate variables are optional. That example is a little contrived but the idea is that you can transform a result from one type to another, potentially in a chain, and unhandled errors propagate. Of course, the intermediate variables are optional.
Using `.thenValue` or `.thenTry` to add callbacks is idiomatic. It brings all the code into one place, which avoids callback hell. `.thenValue` appends a continuation that takes `T&&` for some `Future<T>` and an error bypasses the callback and is passed to the next, `thenTry` takes a callback taking `folly::Try<T>` which encapsulates both value and exception. `thenError(tag_t<ExceptionType>{},...` will bypass a value and only run if there is an exception, the `ExceptionType` template parameter to the tag type allows filtering by exception type; `tag_t<ExceptionType>{}` is optional and if no tag is passed passed the function will be parameterised with a `folly::exception_wrapper`. For C++17 there is a global inline variable and `folly::tag<ExceptionType>` may be passed directly without explicit construction. Using `.thenValue` or `.thenTry` to add callbacks is idiomatic. It brings all the code into one place, which avoids callback hell. `.thenValue` appends a continuation that takes `T&&` for some `Future<T>` and an error bypasses the callback and is passed to the next, `thenTry` takes a callback taking `folly::Try<T>` which encapsulates both value and exception. `thenError(tag_t<ExceptionType>{},...` will bypass a value and only run if there is an exception, the `ExceptionType` template parameter to the tag type allows filtering by exception type; `tag_t<ExceptionType>{}` is optional and if no tag is passed passed the function will be parameterized with a `folly::exception_wrapper`. For C++17 there is a global inline variable and `folly::tag<ExceptionType>` may be passed directly without explicit construction.
......
...@@ -62,7 +62,7 @@ highest-order unused 16 bits in a pointer as discriminator. So ...@@ -62,7 +62,7 @@ highest-order unused 16 bits in a pointer as discriminator. So
#### [`dynamic.h`](Dynamic.md) #### [`dynamic.h`](Dynamic.md)
Dynamically-typed object, created with JSON objects in mind. `DynamicConverter.h` is Dynamically-typed object, created with JSON objects in mind. `DynamicConverter.h` is
a utility for effeciently converting from a `dynamic` to a more concrete structure when a utility for efficiently converting from a `dynamic` to a more concrete structure when
the scheme is known (e.g. json -> `map<int,int>`). the scheme is known (e.g. json -> `map<int,int>`).
#### `EvictingCacheMap.h` #### `EvictingCacheMap.h`
......
...@@ -9,7 +9,7 @@ Because there is no memory overhead, an arbitrarily large number of locks can be ...@@ -9,7 +9,7 @@ Because there is no memory overhead, an arbitrarily large number of locks can be
used to minimize lock contention with no memory penalty. Additionally, used to minimize lock contention with no memory penalty. Additionally,
excellent cache performance is obtained by storing the lock inline with the excellent cache performance is obtained by storing the lock inline with the
pointer (no additional cache miss or false sharing). Finally, because it uses a pointer (no additional cache miss or false sharing). Finally, because it uses a
simple spinlock mechanism, the cost of aqcuiring an uncontended lock is minimal. simple spinlock mechanism, the cost of acquiring an uncontended lock is minimal.
### Usage ### Usage
*** ***
......
...@@ -498,7 +498,7 @@ problem is called reader starvation. ...@@ -498,7 +498,7 @@ problem is called reader starvation.
One solution is to use a shared mutex type with read priority, such as One solution is to use a shared mutex type with read priority, such as
`folly::SharedMutexReadPriority`. That can introduce less blocking under `folly::SharedMutexReadPriority`. That can introduce less blocking under
contention to the other threads attemping to acquire a shared lock to do the contention to the other threads attempting to acquire a shared lock to do the
first check. However, that may backfire and cause threads which are attempting first check. However, that may backfire and cause threads which are attempting
to acquire a unique lock (for the second check) to stall, waiting for a moment to acquire a unique lock (for the second check) to stall, waiting for a moment
in time when there are no shared locks held on the mutex, a moment in time that in time when there are no shared locks held on the mutex, a moment in time that
...@@ -520,7 +520,7 @@ checks rather than blocking or being blocked by them. ...@@ -520,7 +520,7 @@ checks rather than blocking or being blocked by them.
The example would then look like: The example would then look like:
``` Cpp ``` Cpp
struct MyObect { struct MyObject {
bool isUpdateRequired() const; bool isUpdateRequired() const;
void doUpdate(); void doUpdate();
}; };
......
...@@ -1309,7 +1309,7 @@ inline std::string dynamic_view::move_string_or(Stringish&& val) { ...@@ -1309,7 +1309,7 @@ inline std::string dynamic_view::move_string_or(Stringish&& val) {
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// Secialization of FormatValue so dynamic objects can be formatted // Specialization of FormatValue so dynamic objects can be formatted
template <> template <>
class FormatValue<dynamic> { class FormatValue<dynamic> {
public: public:
......
...@@ -753,7 +753,7 @@ size_t firstEscapableInWord(T s, const serialization_opts& opts) { ...@@ -753,7 +753,7 @@ size_t firstEscapableInWord(T s, const serialization_opts& opts) {
}; };
auto isChar = [&](uint8_t c) { auto isChar = [&](uint8_t c) {
// A byte is == c iff it is 0 if xored with c. // A byte is == c iff it is 0 if xor'd with c.
return isLess(s ^ (kOnes * c), 1); return isLess(s ^ (kOnes * c), 1);
}; };
......
...@@ -439,7 +439,7 @@ struct small_vector_base { ...@@ -439,7 +439,7 @@ struct small_vector_base {
/* /*
* Now inherit from them all. This is done in such a convoluted * Now inherit from them all. This is done in such a convoluted
* way to make sure we get the empty base optimizaton on all these * way to make sure we get the empty base optimization on all these
* types to keep sizeof(small_vector<>) minimal. * types to keep sizeof(small_vector<>) minimal.
*/ */
typedef boost::totally_ordered1< typedef boost::totally_ordered1<
......
...@@ -29,7 +29,7 @@ using monotonic_clock = std::chrono::steady_clock; ...@@ -29,7 +29,7 @@ using monotonic_clock = std::chrono::steady_clock;
/** /**
* Calculates the duration of time intervals. Prefer this over directly using * Calculates the duration of time intervals. Prefer this over directly using
* monotonic clocks. It is very lightweight and provides convenient facilitles * monotonic clocks. It is very lightweight and provides convenient facilities
* to avoid common pitfalls. * to avoid common pitfalls.
* *
* There are two type aliases that should be preferred over instantiating this * There are two type aliases that should be preferred over instantiating this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment