Commit 27494a20 authored by Jordan DeLong's avatar Jordan DeLong

Pull from FB rev 63ce89e2f2301e6bba44a111cc7d4218022156f6

parent 59de307e
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_ARENA_H_
#error This file may only be included from Arena.h
#endif
// Implementation of Arena.h functions
namespace folly {
template <class Alloc>
std::pair<typename Arena<Alloc>::Block*, size_t>
Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) {
size_t allocSize = sizeof(Block) + size;
if (allowSlack) {
allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize);
}
void* mem = alloc.allocate(allocSize);
assert(isAligned(mem));
return std::make_pair(new (mem) Block(), allocSize - sizeof(Block));
}
template <class Alloc>
void Arena<Alloc>::Block::deallocate(Alloc& alloc) {
this->~Block();
alloc.deallocate(this);
}
template <class Alloc>
void* Arena<Alloc>::allocateSlow(size_t size) {
std::pair<Block*, size_t> p;
char* start;
if (size > minBlockSize()) {
// Allocate a large block for this chunk only, put it at the back of the
// list so it doesn't get used for small allocations; don't change ptr_
// and end_, let them point into a normal block (or none, if they're
// null)
p = Block::allocate(alloc(), size, false);
start = p.first->start();
blocks_.push_back(*p.first);
} else {
// Allocate a normal sized block and carve out size bytes from it
p = Block::allocate(alloc(), minBlockSize(), true);
start = p.first->start();
blocks_.push_front(*p.first);
ptr_ = start + size;
end_ = start + p.second;
}
assert(p.second >= size);
return start;
}
template <class Alloc>
void Arena<Alloc>::merge(Arena<Alloc>&& other) {
blocks_.splice_after(blocks_.before_begin(), other.blocks_);
other.blocks_.clear();
other.ptr_ = other.end_ = nullptr;
}
template <class Alloc>
Arena<Alloc>::~Arena() {
auto disposer = [this] (Block* b) { b->deallocate(this->alloc()); };
while (!blocks_.empty()) {
blocks_.pop_front_and_dispose(disposer);
}
}
} // namespace folly
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_ARENA_H_
#define FOLLY_ARENA_H_
#include <cassert>
#include <utility>
#include <limits>
#include <boost/intrusive/slist.hpp>
#include "folly/Likely.h"
#include "folly/Malloc.h"
namespace folly {
/**
* Simple arena: allocate memory which gets freed when the arena gets
* destroyed.
*
* The arena itself allocates memory using a custom allocator which provides
* the following interface (same as required by StlAllocator in StlAllocator.h)
*
* void* allocate(size_t size);
* Allocate a block of size bytes, properly aligned to the maximum
* alignment required on your system; throw std::bad_alloc if the
* allocation can't be satisfied.
*
* void deallocate(void* ptr);
* Deallocate a previously allocated block.
*
* You may also specialize ArenaAllocatorTraits for your allocator type to
* provide:
*
* size_t goodSize(const Allocator& alloc, size_t size) const;
* Return a size (>= the provided size) that is considered "good" for your
* allocator (for example, if your allocator allocates memory in 4MB
* chunks, size should be rounded up to 4MB). The provided value is
* guaranteed to be rounded up to a multiple of the maximum alignment
* required on your system; the returned value must be also.
*
* An implementation that uses malloc() / free() is defined below, see
* SysAlloc / SysArena.
*/
template <class Alloc> struct ArenaAllocatorTraits;
template <class Alloc>
class Arena {
public:
explicit Arena(const Alloc& alloc,
size_t minBlockSize = kDefaultMinBlockSize)
: allocAndSize_(alloc, minBlockSize),
ptr_(nullptr),
end_(nullptr) {
}
~Arena();
void* allocate(size_t size) {
size = roundUp(size);
if (LIKELY(end_ - ptr_ >= size)) {
// Fast path: there's enough room in the current block
char* r = ptr_;
ptr_ += size;
assert(isAligned(r));
return r;
}
// Not enough room in the current block
void* r = allocateSlow(size);
assert(isAligned(r));
return r;
}
void deallocate(void* p) {
// Deallocate? Never!
}
// Transfer ownership of all memory allocated from "other" to "this".
void merge(Arena&& other);
private:
// not copyable
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
// movable
Arena(Arena&&) = default;
Arena& operator=(Arena&&) = default;
struct Block;
typedef boost::intrusive::slist_member_hook<
boost::intrusive::tag<Arena>> BlockLink;
struct Block {
BlockLink link;
// Allocate a block with at least size bytes of storage.
// If allowSlack is true, allocate more than size bytes if convenient
// (via ArenaAllocatorTraits::goodSize()) as we'll try to pack small
// allocations in this block.
static std::pair<Block*, size_t> allocate(
Alloc& alloc, size_t size, bool allowSlack);
void deallocate(Alloc& alloc);
char* start() {
return reinterpret_cast<char*>(this + 1);
}
private:
Block() { }
~Block() { }
} __attribute__((aligned));
// This should be alignas(std::max_align_t) but neither alignas nor
// max_align_t are supported by gcc 4.6.2.
public:
static constexpr size_t kDefaultMinBlockSize = 4096 - sizeof(Block);
private:
static constexpr size_t maxAlign = alignof(Block);
static constexpr bool isAligned(uintptr_t address) {
return (address & (maxAlign - 1)) == 0;
}
static bool isAligned(void* p) {
return isAligned(reinterpret_cast<uintptr_t>(p));
}
// Round up size so it's properly aligned
static constexpr size_t roundUp(size_t size) {
return (size + maxAlign - 1) & ~(maxAlign - 1);
}
// cache_last<true> makes the list keep a pointer to the last element, so we
// have push_back() and constant time splice_after()
typedef boost::intrusive::slist<
Block,
boost::intrusive::member_hook<Block, BlockLink, &Block::link>,
boost::intrusive::constant_time_size<false>,
boost::intrusive::cache_last<true>> BlockList;
void* allocateSlow(size_t size);
// Empty member optimization: package Alloc with a non-empty member
// in case Alloc is empty (as it is in the case of SysAlloc).
struct AllocAndSize : public Alloc {
explicit AllocAndSize(const Alloc& a, size_t s)
: Alloc(a), minBlockSize(s) {
}
size_t minBlockSize;
};
size_t minBlockSize() const {
return allocAndSize_.minBlockSize;
}
Alloc& alloc() { return allocAndSize_; }
const Alloc& alloc() const { return allocAndSize_; }
AllocAndSize allocAndSize_;
BlockList blocks_;
char* ptr_;
char* end_;
};
/**
* By default, don't pad the given size.
*/
template <class Alloc>
struct ArenaAllocatorTraits {
static size_t goodSize(const Alloc& alloc, size_t size) {
return size;
}
};
/**
* Arena-compatible allocator that calls malloc() and free(); see
* goodMallocSize() in Malloc.h for goodSize().
*/
class SysAlloc {
public:
void* allocate(size_t size) {
void* mem = malloc(size);
if (!mem) throw std::bad_alloc();
return mem;
}
void deallocate(void* p) {
free(p);
}
};
template <>
struct ArenaAllocatorTraits<SysAlloc> {
static size_t goodSize(const SysAlloc& alloc, size_t size) {
return goodMallocSize(size);
}
};
/**
* Arena that uses the system allocator (malloc / free)
*/
class SysArena : public Arena<SysAlloc> {
public:
explicit SysArena(size_t minBlockSize = kDefaultMinBlockSize)
: Arena<SysAlloc>(SysAlloc(), minBlockSize) {
}
};
} // namespace folly
#include "folly/Arena-inl.h"
#endif /* FOLLY_ARENA_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @author: Xin Liu <xliux@fb.com>
#ifndef FOLLY_CONCURRENTSKIPLIST_INL_H_
#define FOLLY_CONCURRENTSKIPLIST_INL_H_
#include <algorithm>
#include <climits>
#include <cmath>
#include <boost/random.hpp>
#include <glog/logging.h>
#include "folly/SmallLocks.h"
#include "folly/ThreadLocal.h"
namespace folly { namespace detail {
template<typename ValT, typename NodeT> class csl_iterator;
template<typename T>
class SkipListNode : boost::noncopyable {
enum {
IS_HEAD_NODE = 1,
MARKED_FOR_REMOVAL = (1 << 1),
FULLY_LINKED = (1 << 2),
};
public:
typedef T value_type;
static SkipListNode* create(int height,
const value_type& data, bool isHead = false) {
DCHECK(height >= 1 && height < 64) << height;
size_t size = sizeof(SkipListNode) + height * sizeof(SkipListNode*);
auto* node = static_cast<SkipListNode*>(malloc(size));
new (node) SkipListNode(height);
node->spinLock_.init();
node->setFlags(0);
if (isHead) {
node->setIsHeadNode();
} else {
new (&(node->data_)) value_type(data);
}
return node;
}
static void destroy(SkipListNode* node) {
if (!node->isHeadNode()) {
node->data_.~value_type();
}
node->~SkipListNode();
free(node);
}
// assuming lock acquired
SkipListNode* promoteFrom(const SkipListNode* node) {
DCHECK(node != nullptr && height_ > node->height_);
setFlags(node->getFlags());
if (!isHeadNode()) {
new (&(data_)) value_type(node->data());
}
for (int i = 0; i < node->height_; ++i) {
setSkip(i, node->skip(i));
}
return this;
}
inline SkipListNode* skip(int layer) const {
DCHECK_LT(layer, height_);
return skip_[layer].load(std::memory_order_consume);
}
// next valid node as in the linked list
SkipListNode* next() {
SkipListNode* node;
for (node = skip(0);
(node != nullptr && node->markedForRemoval());
node = node->skip(0)) {}
return node;
}
void setSkip(uint8_t h, SkipListNode* next) {
DCHECK_LT(h, height_);
skip_[h].store(next, std::memory_order_release);
}
value_type& data() { return data_; }
const value_type& data() const { return data_; }
int maxLayer() const { return height_ - 1; }
int height() const { return height_; }
std::unique_lock<MicroSpinLock> acquireGuard() {
return std::unique_lock<MicroSpinLock>(spinLock_);
}
bool fullyLinked() const { return getFlags() & FULLY_LINKED; }
bool markedForRemoval() const { return getFlags() & MARKED_FOR_REMOVAL; }
bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
void setIsHeadNode() {
setFlags(getFlags() | IS_HEAD_NODE);
}
void setFullyLinked() {
setFlags(getFlags() | FULLY_LINKED);
}
void setMarkedForRemoval() {
setFlags(getFlags() | MARKED_FOR_REMOVAL);
}
private:
~SkipListNode() {
for (uint8_t i = 0; i < height_; ++i) {
skip_[i].~atomic();
}
}
explicit SkipListNode(uint8_t height) : height_(height) {
for (uint8_t i = 0; i < height_; ++i) {
new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
}
}
uint16_t getFlags() const {
return flags_.load(std::memory_order_consume);
}
void setFlags(uint16_t flags) {
flags_.store(flags, std::memory_order_release);
}
// TODO(xliu): on x86_64, it's possible to squeeze these into
// skip_[0] to maybe save 8 bytes depending on the data alignments.
// NOTE: currently this is x86_64 only anyway, due to the
// MicroSpinLock.
std::atomic<uint16_t> flags_;
const uint8_t height_;
MicroSpinLock spinLock_;
value_type data_;
std::atomic<SkipListNode*> skip_[0];
};
class SkipListRandomHeight {
enum { kMaxHeight = 64 };
public:
// make it a singleton.
static SkipListRandomHeight *instance() {
static SkipListRandomHeight instance_;
return &instance_;
}
int getHeight(int maxHeight) const {
DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!";
double p = randomProb();
for (int i = 0; i < maxHeight; ++i) {
if (p < lookupTable_[i]) {
return i + 1;
}
}
return maxHeight;
}
size_t getSizeLimit(int height) const {
DCHECK_LT(height, kMaxHeight);
return sizeLimitTable_[height];
}
private:
SkipListRandomHeight() { initLookupTable(); }
void initLookupTable() {
// set skip prob = 1/E
static const double kProbInv = exp(1);
static const double kProb = 1.0 / kProbInv;
static const size_t kMaxSizeLimit = std::numeric_limits<size_t>::max();
double sizeLimit = 1;
double p = lookupTable_[0] = (1 - kProb);
sizeLimitTable_[0] = 1;
for (int i = 1; i < kMaxHeight - 1; ++i) {
p *= kProb;
sizeLimit *= kProbInv;
lookupTable_[i] = lookupTable_[i - 1] + p;
sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit ?
kMaxSizeLimit :
static_cast<size_t>(sizeLimit);
}
lookupTable_[kMaxHeight - 1] = 1;
sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
}
static double randomProb() {
static ThreadLocal<boost::lagged_fibonacci2281> rng_;
return (*rng_)();
}
double lookupTable_[kMaxHeight];
size_t sizeLimitTable_[kMaxHeight];
};
}}
#endif // FOLLY_CONCURRENTSKIPLIST_INL_H_
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define FOLLY_CONV_INTERNAL
#include "folly/Conv.h"
namespace folly {
namespace detail {
extern const char digit1[101] =
"00000000001111111111222222222233333333334444444444"
"55555555556666666666777777777788888888889999999999";
extern const char digit2[101] =
"01234567890123456789012345678901234567890123456789"
"01234567890123456789012345678901234567890123456789";
template <> const char *const MaxString<bool>::value = "true";
template <> const char *const MaxString<uint8_t>::value = "255";
template <> const char *const MaxString<uint16_t>::value = "65535";
template <> const char *const MaxString<uint32_t>::value = "4294967295";
#if __SIZEOF_LONG__ == 4
template <> const char *const MaxString<unsigned long>::value =
"4294967295";
#else
template <> const char *const MaxString<unsigned long>::value =
"18446744073709551615";
#endif
static_assert(sizeof(unsigned long) >= 4,
"Wrong value for MaxString<unsigned long>::value,"
" please update.");
template <> const char *const MaxString<unsigned long long>::value =
"18446744073709551615";
static_assert(sizeof(unsigned long long) >= 8,
"Wrong value for MaxString<unsigned long long>::value"
", please update.");
inline bool bool_str_cmp(const char** b, size_t len, const char* value) {
// Can't use strncasecmp, since we want to ensure that the full value matches
const char* p = *b;
const char* e = *b + len;
const char* v = value;
while (*v != '\0') {
if (p == e || tolower(*p) != *v) { // value is already lowercase
return false;
}
++p;
++v;
}
*b = p;
return true;
}
bool str_to_bool(StringPiece* src) {
auto b = src->begin(), e = src->end();
for (;; ++b) {
FOLLY_RANGE_CHECK(b < e,
"No non-whitespace characters found in input string");
if (!isspace(*b)) break;
}
bool result;
size_t len = e - b;
switch (*b) {
case '0':
case '1': {
// Attempt to parse the value as an integer
StringPiece tmp(*src);
uint8_t value = to<uint8_t>(&tmp);
// Only accept 0 or 1
FOLLY_RANGE_CHECK(value <= 1,
"Integer overflow when parsing bool: must be 0 or 1");
b = tmp.begin();
result = (value == 1);
break;
}
case 'y':
case 'Y':
result = true;
if (!bool_str_cmp(&b, len, "yes")) {
++b; // accept the single 'y' character
}
break;
case 'n':
case 'N':
result = false;
if (!bool_str_cmp(&b, len, "no")) {
++b;
}
break;
case 't':
case 'T':
result = true;
if (!bool_str_cmp(&b, len, "true")) {
++b;
}
break;
case 'f':
case 'F':
result = false;
if (!bool_str_cmp(&b, len, "false")) {
++b;
}
break;
case 'o':
case 'O':
if (bool_str_cmp(&b, len, "on")) {
result = true;
} else if (bool_str_cmp(&b, len, "off")) {
result = false;
} else {
FOLLY_RANGE_CHECK(false, "Invalid value for bool");
}
break;
default:
FOLLY_RANGE_CHECK(false, "Invalid value for bool");
}
src->assign(b, e);
return result;
}
} // namespace detail
} // namespace folly
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Discriminated pointer: Type-safe pointer to one of several types.
*
* Similar to boost::variant, but has no space overhead over a raw pointer, as
* it relies on the fact that (on x86_64) there are 16 unused bits in a
* pointer.
*
* @author Tudor Bosman (tudorb@fb.com)
*/
#ifndef FOLLY_DISCRIMINATEDPTR_H_
#define FOLLY_DISCRIMINATEDPTR_H_
#include <limits>
#include <stdexcept>
#include <glog/logging.h>
#include "folly/Likely.h"
#include "folly/detail/DiscriminatedPtrDetail.h"
#ifndef __x86_64__
# error "DiscriminatedPtr is x64-specific code."
#endif
namespace folly {
/**
* Discriminated pointer.
*
* Given a list of types, a DiscriminatedPtr<Types...> may point to an object
* of one of the given types, or may be empty. DiscriminatedPtr is type-safe:
* you may only get a pointer to the type that you put in, otherwise get
* throws an exception (and get_nothrow returns nullptr)
*
* This pointer does not do any kind of lifetime management -- it's not a
* "smart" pointer. You are responsible for deallocating any memory used
* to hold pointees, if necessary.
*/
template <typename... Types>
class DiscriminatedPtr {
// <, not <=, as our indexes are 1-based (0 means "empty")
static_assert(sizeof...(Types) < std::numeric_limits<uint16_t>::max(),
"too many types");
public:
/**
* Create an empty DiscriminatedPtr.
*/
DiscriminatedPtr() : data_(0) {
}
/**
* Create a DiscriminatedPtr that points to an object of type T.
* Fails at compile time if T is not a valid type (listed in Types)
*/
template <typename T>
explicit DiscriminatedPtr(T* ptr) {
set(ptr, typeIndex<T>());
}
/**
* Set this DiscriminatedPtr to point to an object of type T.
* Fails at compile time if T is not a valid type (listed in Types)
*/
template <typename T>
void set(T* ptr) {
set(ptr, typeIndex<T>());
}
/**
* Get a pointer to the object that this DiscriminatedPtr points to, if it is
* of type T. Fails at compile time if T is not a valid type (listed in
* Types), and returns nullptr if this DiscriminatedPtr is empty or points to
* an object of a different type.
*/
template <typename T>
T* get_nothrow() noexcept {
void* p = LIKELY(hasType<T>()) ? ptr() : nullptr;
return static_cast<T*>(p);
}
template <typename T>
const T* get_nothrow() const noexcept {
const void* p = LIKELY(hasType<T>()) ? ptr() : nullptr;
return static_cast<const T*>(p);
}
/**
* Get a pointer to the object that this DiscriminatedPtr points to, if it is
* of type T. Fails at compile time if T is not a valid type (listed in
* Types), and throws std::invalid_argument if this DiscriminatedPtr is empty
* or points to an object of a different type.
*/
template <typename T>
T* get() {
if (UNLIKELY(!hasType<T>())) {
throw std::invalid_argument("Invalid type");
}
return static_cast<T*>(ptr());
}
template <typename T>
const T* get() const {
if (UNLIKELY(!hasType<T>())) {
throw std::invalid_argument("Invalid type");
}
return static_cast<const T*>(ptr());
}
/**
* Return true iff this DiscriminatedPtr is empty.
*/
bool empty() const {
return index() == 0;
}
/**
* Return true iff the object pointed by this DiscriminatedPtr has type T,
* false otherwise. Fails at compile time if T is not a valid type (listed
* in Types...)
*/
template <typename T>
bool hasType() const {
return index() == typeIndex<T>();
}
/**
* Clear this DiscriminatedPtr, making it empty.
*/
void clear() {
data_ = 0;
}
/**
* Assignment operator from a pointer of type T.
*/
template <typename T>
DiscriminatedPtr& operator=(T* ptr) {
set(ptr);
return *this;
}
/**
* Apply a visitor to this object, calling the appropriate overload for
* the type currently stored in DiscriminatedPtr. Throws invalid_argument
* if the DiscriminatedPtr is empty.
*
* The visitor must meet the following requirements:
*
* - The visitor must allow invocation as a function by overloading
* operator(), unambiguously accepting all values of type T* (or const T*)
* for all T in Types...
* - All operations of the function object on T* (or const T*) must
* return the same type (or a static_assert will fire).
*/
template <typename V>
typename dptr_detail::VisitorResult<V, Types...>::type apply(V&& visitor) {
size_t n = index();
if (n == 0) throw std::invalid_argument("Empty DiscriminatedPtr");
return dptr_detail::ApplyVisitor<V, Types...>()(
n, std::forward<V>(visitor), ptr());
}
template <typename V>
typename dptr_detail::ConstVisitorResult<V, Types...>::type apply(V&& visitor)
const {
size_t n = index();
if (n == 0) throw std::invalid_argument("Empty DiscriminatedPtr");
return dptr_detail::ApplyConstVisitor<V, Types...>()(
n, std::forward<V>(visitor), ptr());
}
private:
/**
* Get the 1-based type index of T in Types.
*/
template <typename T>
size_t typeIndex() const {
return dptr_detail::GetTypeIndex<T, Types...>::value;
}
uint16_t index() const { return data_ >> 48; }
void* ptr() const {
return reinterpret_cast<void*>(data_ & ((1ULL << 48) - 1));
}
void set(void* p, uint16_t v) {
uintptr_t ip = reinterpret_cast<uintptr_t>(p);
CHECK(!(ip >> 48));
ip |= static_cast<uintptr_t>(v) << 48;
data_ = ip;
}
/**
* We store a pointer in the least significant 48 bits of data_, and a type
* index (0 = empty, or 1-based index in Types) in the most significant 16
* bits. We rely on the fact that pointers have their most significant 16
* bits clear on x86_64.
*/
uintptr_t data_;
};
} // namespace folly
#endif /* FOLLY_DISCRIMINATEDPTR_H_ */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_BASE_FOREACH_H_
#define FOLLY_BASE_FOREACH_H_
/*
* Iterim macros (until we have C++0x range-based for) that simplify
* writing loops of the form
*
* for (Container<data>::iterator i = c.begin(); i != c.end(); ++i) statement
*
* Just replace the above with:
*
* FOR_EACH (i, c) statement
*
* and everything is taken care of.
*
* The implementation is a bit convoluted to make sure the container is
* only evaluated once (however, keep in mind that c.end() is evaluated
* at every pass through the loop). To ensure the container is not
* evaluated multiple times, the macro defines one do-nothing if
* statement to inject the Boolean variable FOR_EACH_state1, and then a
* for statement that is executed only once, which defines the variable
* FOR_EACH_state2 holding a reference to the container being
* iterated. The workhorse is the last loop, which uses the just defined
* reference FOR_EACH_state2.
*
* The state variables are nested so they don't interfere; you can use
* FOR_EACH multiple times in the same scope, either at the same level or
* nested.
*
* In optimized builds g++ eliminates the extra gymnastics entirely and
* generates code 100% identical to the handwritten loop.
*
* This will not work with temporary containers. Consider BOOST_FOREACH
* if you need that.
*/
#include <boost/type_traits/remove_cv.hpp>
namespace folly { namespace detail {
/*
* Simple template for obtaining the unqualified type given a generic
* type T. For example, if T is const int,
* typeof(remove_cv_from_expression(T())) yields int. Due to a bug in
* g++, you need to actually use
* typeof(remove_cv_from_expression(T())) instead of typename
* boost::remove_cv<T>::type. Note that the function
* remove_cv_from_expression is never defined - use it only inside
* typeof.
*/
template <class T> typename boost::remove_cv<T>::type
remove_cv_from_expression(T value);
}}
/*
* Use a "reference reference" (auto&&) to take advantage of reference
* collapsing rules, if available. In this case, FOR_EACH* will work with
* temporary containers.
*/
#define FB_AUTO_RR(x, y) auto&& x = y
/*
* The first AUTO should be replaced by decltype((c)) &
* FOR_EACH_state2, but bugs in gcc prevent that from functioning
* properly. The second pair of parens in decltype is actually
* required, see
* cpp-next.com/archive/2011/04/appearing-and-disappearing-consts-in-c/
*/
#define FOR_EACH(i, c) \
if (bool FOR_EACH_state1 = false) {} else \
for (auto & FOR_EACH_state2 = (c); \
!FOR_EACH_state1; FOR_EACH_state1 = true) \
for (auto i = FOR_EACH_state2.begin(); \
i != FOR_EACH_state2.end(); ++i)
/*
* Similar to FOR_EACH, but iterates the container backwards by
* using rbegin() and rend().
*/
#define FOR_EACH_R(i, c) \
if (bool FOR_EACH_R_state1 = false) {} else \
for (auto & FOR_EACH_R_state2 = (c); \
!FOR_EACH_R_state1; FOR_EACH_R_state1 = true) \
for (auto i = FOR_EACH_R_state2.rbegin(); \
i != FOR_EACH_R_state2.rend(); ++i)
/*
* Similar to FOR_EACH but also allows client to specify a 'count' variable
* to track the current iteration in the loop (starting at zero).
* Similar to python's enumerate() function. For example:
* string commaSeparatedValues = "VALUES: ";
* FOR_EACH_ENUMERATE(ii, value, columns) { // don't want comma at the end!
* commaSeparatedValues += (ii == 0) ? *value : string(",") + *value;
* }
*/
#define FOR_EACH_ENUMERATE(count, i, c) \
if (bool FOR_EACH_state1 = false) {} else \
for (auto & FOR_EACH_state2 = (c); \
!FOR_EACH_state1; FOR_EACH_state1 = true) \
if (size_t FOR_EACH_privateCount = 0) {} else \
if (const size_t& count = FOR_EACH_privateCount) {} else \
for (auto i = FOR_EACH_state2.begin(); \
i != FOR_EACH_state2.end(); ++FOR_EACH_privateCount, ++i)
/**
* Similar to FOR_EACH, but gives the user the key and value for each entry in
* the container, instead of just the iterator to the entry. For example:
* map<string, string> testMap;
* FOR_EACH_KV(key, value, testMap) {
* cout << key << " " << value;
* }
*/
#define FOR_EACH_KV(k, v, c) \
if (unsigned int FOR_EACH_state1 = 0) {} else \
for (FB_AUTO_RR(FOR_EACH_state2, (c)); \
!FOR_EACH_state1; FOR_EACH_state1 = 1) \
for (auto FOR_EACH_state3 = FOR_EACH_state2.begin(); \
FOR_EACH_state3 != FOR_EACH_state2.end(); \
FOR_EACH_state1 == 2 \
? ((FOR_EACH_state1 = 0), ++FOR_EACH_state3) \
: (FOR_EACH_state3 = FOR_EACH_state2.end())) \
for (auto &k = FOR_EACH_state3->first; \
!FOR_EACH_state1; ++FOR_EACH_state1) \
for (auto &v = FOR_EACH_state3->second; \
!FOR_EACH_state1; ++FOR_EACH_state1)
namespace folly { namespace detail {
// Boost 1.48 lacks has_less, we emulate a subset of it here.
template <typename T, typename U>
class HasLess {
struct BiggerThanChar { char unused[2]; };
template <typename C, typename D> static char test(decltype(C() < D())*);
template <typename, typename> static BiggerThanChar test(...);
public:
enum { value = sizeof(test<T, U>(0)) == 1 };
};
/**
* notThereYet helps the FOR_EACH_RANGE macro by opportunistically
* using "<" instead of "!=" whenever available when checking for loop
* termination. This makes e.g. examples such as FOR_EACH_RANGE (i,
* 10, 5) execute zero iterations instead of looping virtually
* forever. At the same time, some iterator types define "!=" but not
* "<". The notThereYet function will dispatch differently for those.
*
* Below is the correct implementation of notThereYet. It is disabled
* because of a bug in Boost 1.46: The filesystem::path::iterator
* defines operator< (via boost::iterator_facade), but that in turn
* uses distance_to which is undefined for that particular
* iterator. So HasLess (defined above) identifies
* boost::filesystem::path as properly comparable with <, but in fact
* attempting to do so will yield a compile-time error.
*
* The else branch (active) contains a conservative
* implementation.
*/
#if 0
template <class T, class U>
typename std::enable_if<HasLess<T, U>::value, bool>::type
notThereYet(T& iter, const U& end) {
return iter < end;
}
template <class T, class U>
typename std::enable_if<!HasLess<T, U>::value, bool>::type
notThereYet(T& iter, const U& end) {
return iter != end;
}
#else
template <class T, class U>
typename std::enable_if<
(std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
(std::is_pointer<T>::value && std::is_pointer<U>::value),
bool>::type
notThereYet(T& iter, const U& end) {
return iter < end;
}
template <class T, class U>
typename std::enable_if<
!(
(std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
(std::is_pointer<T>::value && std::is_pointer<U>::value)
),
bool>::type
notThereYet(T& iter, const U& end) {
return iter != end;
}
#endif
/**
* downTo is similar to notThereYet, but in reverse - it helps the
* FOR_EACH_RANGE_R macro.
*/
template <class T, class U>
typename std::enable_if<HasLess<U, T>::value, bool>::type
downTo(T& iter, const U& begin) {
return begin < iter--;
}
template <class T, class U>
typename std::enable_if<!HasLess<U, T>::value, bool>::type
downTo(T& iter, const U& begin) {
if (iter == begin) return false;
--iter;
return true;
}
} }
/*
* Iteration with given limits. end is assumed to be reachable from
* begin. end is evaluated every pass through the loop.
*
* NOTE: The type of the loop variable should be the common type of "begin"
* and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
* to be "long". This is done by getting the type of (true ? begin : end)
*/
#define FOR_EACH_RANGE(i, begin, end) \
for (auto i = (true ? (begin) : (end)); \
::folly::detail::notThereYet(i, (end)); \
++i)
/*
* Iteration with given limits. begin is assumed to be reachable from
* end by successive decrements. begin is evaluated every pass through
* the loop.
*
* NOTE: The type of the loop variable should be the common type of "begin"
* and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
* to be "long". This is done by getting the type of (false ? begin : end)
*/
#define FOR_EACH_RANGE_R(i, begin, end) \
for (auto i = (false ? (begin) : (end)); ::folly::detail::downTo(i, (begin));)
#endif
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "folly/Format.h"
namespace folly {
namespace detail {
extern const FormatArg::Align formatAlignTable[];
extern const FormatArg::Sign formatSignTable[];
} // namespace detail
using namespace folly::detail;
void FormatArg::initSlow() {
auto b = fullArgString.begin();
auto end = fullArgString.end();
// Parse key
auto p = static_cast<const char*>(memchr(b, ':', end - b));
if (!p) {
key_ = StringPiece(b, end);
return;
}
key_ = StringPiece(b, p);
if (*p == ':') {
// parse format spec
if (++p == end) return;
// fill/align, or just align
Align a;
if (p + 1 != end &&
(a = formatAlignTable[static_cast<unsigned char>(p[1])]) !=
Align::INVALID) {
fill = *p;
align = a;
p += 2;
if (p == end) return;
} else if ((a = formatAlignTable[static_cast<unsigned char>(*p)]) !=
Align::INVALID) {
align = a;
if (++p == end) return;
}
Sign s;
unsigned char uSign = static_cast<unsigned char>(*p);
if ((s = formatSignTable[uSign]) != Sign::INVALID) {
sign = s;
if (++p == end) return;
}
if (*p == '#') {
basePrefix = true;
if (++p == end) return;
}
if (*p == '0') {
enforce(align == Align::DEFAULT, "alignment specified twice");
fill = '0';
align = Align::PAD_AFTER_SIGN;
if (++p == end) return;
}
if (*p >= '0' && *p <= '9') {
auto b = p;
do {
++p;
} while (p != end && *p >= '0' && *p <= '9');
width = to<int>(StringPiece(b, p));
if (p == end) return;
}
if (*p == ',') {
thousandsSeparator = true;
if (++p == end) return;
}
if (*p == '.') {
auto b = ++p;
while (p != end && *p >= '0' && *p <= '9') {
++p;
}
precision = to<int>(StringPiece(b, p));
if (p == end) return;
}
presentation = *p;
if (++p == end) return;
}
error("extra characters in format string");
}
void FormatArg::validate(Type type) const {
enforce(keyEmpty(), "index not allowed");
switch (type) {
case Type::INTEGER:
enforce(precision == kDefaultPrecision,
"precision not allowed on integers");
break;
case Type::FLOAT:
enforce(!basePrefix,
"base prefix ('#') specifier only allowed on integers");
enforce(!thousandsSeparator,
"thousands separator (',') only allowed on integers");
break;
case Type::OTHER:
enforce(align != Align::PAD_AFTER_SIGN,
"'='alignment only allowed on numbers");
enforce(sign == Sign::DEFAULT,
"sign specifier only allowed on numbers");
enforce(!basePrefix,
"base prefix ('#') specifier only allowed on integers");
enforce(!thousandsSeparator,
"thousands separator (',') only allowed on integers");
break;
}
}
} // namespace folly
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "folly/GroupVarint.h"
namespace folly {
const uint32_t GroupVarint32::kMask[] = {
0xff, 0xffff, 0xffffff, 0xffffffff
};
const uint64_t GroupVarint64::kMask[] = {
0xff, 0xffff, 0xffffff, 0xffffffff,
0xffffffffffULL, 0xffffffffffffULL, 0xffffffffffffffULL,
0xffffffffffffffffULL
};
} // namespace folly
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compiler hints to indicate the fast path of an "if" branch: whether
* the if condition is likely to be true or false.
*
* @author Tudor Bosman (tudorb@fb.com)
*/
#ifndef FOLLY_BASE_LIKELY_H_
#define FOLLY_BASE_LIKELY_H_
#undef LIKELY
#undef UNLIKELY
#if defined(__GNUC__) && __GNUC__ >= 4
#define LIKELY(x) (__builtin_expect((x), 1))
#define UNLIKELY(x) (__builtin_expect((x), 0))
#else
#define LIKELY(x) (x)
#define UNLIKELY(x) (x)
#endif
#endif /* FOLLY_BASE_LIKELY_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_PORTABILITY_H_
#define FOLLY_PORTABILITY_H_
#include "folly-config.h"
#ifdef FOLLY_HAVE_SCHED_H
#include <sched.h>
#ifndef FOLLY_HAVE_PTHREAD_YIELD
#define pthread_yield sched_yield
#endif
#endif
#endif // FOLLY_PORTABILITY_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_BASE_RANDOM_H_
#define FOLLY_BASE_RANDOM_H_
#include <stdint.h>
namespace folly {
/*
* Return a good seed for a random number generator.
*/
uint32_t randomNumberSeed();
}
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment