Commit 928ed23a authored by Nathan Bronson's avatar Nathan Bronson Committed by Facebook Github Bot

clang-format folly/io subdir

Summary: Automated reformat of folly/io subdir in preparation for other changes

Reviewed By: yfeldblum

Differential Revision: D8559473

fbshipit-source-id: c94d9c05ee77e25b6a61ee7a47b472ccea1f18f3
parent 38589135
......@@ -41,8 +41,8 @@ void Appender::vprintf(const char* fmt, va_list ap) {
};
// First try writing into our available data space.
int ret = vsnprintf(reinterpret_cast<char*>(writableData()), length(),
fmt, ap);
int ret =
vsnprintf(reinterpret_cast<char*>(writableData()), length(), fmt, ap);
if (ret < 0) {
throw std::runtime_error("error formatting printf() data");
}
......@@ -58,15 +58,16 @@ void Appender::vprintf(const char* fmt, va_list ap) {
// There wasn't enough room for the data.
// Allocate more room, and then retry.
ensure(len + 1);
ret = vsnprintf(reinterpret_cast<char*>(writableData()), length(),
fmt, apCopy);
ret =
vsnprintf(reinterpret_cast<char*>(writableData()), length(), fmt, apCopy);
if (ret < 0) {
throw std::runtime_error("error formatting printf() data");
}
len = size_t(ret);
if (len >= length()) {
// This shouldn't ever happen.
throw std::runtime_error("unexpectedly out of buffer space on second "
throw std::runtime_error(
"unexpectedly out of buffer space on second "
"vsnprintf() attmept");
}
append(len);
......
......@@ -55,7 +55,9 @@ namespace detail {
template <class Derived, class BufType>
class CursorBase {
// Make all the templated classes friends for copy constructor.
template <class D, typename B> friend class CursorBase;
template <class D, typename B>
friend class CursorBase;
public:
explicit CursorBase(BufType* buf) : crtBuf_(buf), buffer_(buf) {
if (crtBuf_) {
......@@ -166,7 +168,8 @@ class CursorBase {
// We are at the end of a buffer, but it isn't the last buffer.
// We might still be at the end if the remaining buffers in the chain are
// empty.
const IOBuf* buf = crtBuf_->next();;
const IOBuf* buf = crtBuf_->next();
;
while (buf != buffer_) {
if (buf->length() > 0) {
return false;
......@@ -516,7 +519,7 @@ class CursorBase {
* Return the distance between two cursors.
*/
size_t operator-(const CursorBase& other) const {
BufType *otherBuf = other.crtBuf_;
BufType* otherBuf = other.crtBuf_;
size_t len = 0;
if (otherBuf != crtBuf_) {
......@@ -572,7 +575,7 @@ class CursorBase {
(uint64_t)(crtEnd_ - crtBegin_) == crtBuf_->length());
}
~CursorBase() { }
~CursorBase() {}
BufType* head() {
return buffer_;
......@@ -629,7 +632,7 @@ class CursorBase {
}
void readFixedStringSlow(std::string* str, size_t len) {
for (size_t available; (available = length()) < len; ) {
for (size_t available; (available = length()) < len;) {
str->append(reinterpret_cast<const char*>(data()), available);
if (UNLIKELY(!tryAdvanceBuffer())) {
throw_exception<std::out_of_range>("string underflow");
......@@ -650,7 +653,7 @@ class CursorBase {
}
uint8_t* p = reinterpret_cast<uint8_t*>(buf);
size_t copied = 0;
for (size_t available; (available = length()) < len; ) {
for (size_t available; (available = length()) < len;) {
memcpy(p, data(), available);
copied += available;
if (UNLIKELY(!tryAdvanceBuffer())) {
......@@ -673,7 +676,7 @@ class CursorBase {
size_t skipAtMostSlow(size_t len) {
size_t skipped = 0;
for (size_t available; (available = length()) < len; ) {
for (size_t available; (available = length()) < len;) {
skipped += available;
if (UNLIKELY(!tryAdvanceBuffer())) {
return skipped;
......@@ -710,8 +713,7 @@ class CursorBase {
}
}
void advanceDone() {
}
void advanceDone() {}
};
} // namespace detail
......@@ -732,8 +734,7 @@ template <class Derived>
class Writable {
public:
template <class T>
typename std::enable_if<std::is_arithmetic<T>::value>::type
write(T value) {
typename std::enable_if<std::is_arithmetic<T>::value>::type write(T value) {
const uint8_t* u8 = reinterpret_cast<const uint8_t*>(&value);
Derived* d = static_cast<Derived*>(this);
d->push(u8, sizeof(T));
......@@ -782,7 +783,7 @@ class Writable {
size_t pushAtMost(Cursor cursor, size_t len) {
size_t written = 0;
for(;;) {
for (;;) {
auto currentBuffer = cursor.peekBytes();
const uint8_t* crtData = currentBuffer.data();
size_t available = currentBuffer.size();
......@@ -808,20 +809,16 @@ class Writable {
} // namespace detail
enum class CursorAccess {
PRIVATE,
UNSHARE
};
enum class CursorAccess { PRIVATE, UNSHARE };
template <CursorAccess access>
class RWCursor
: public detail::CursorBase<RWCursor<access>, IOBuf>,
class RWCursor : public detail::CursorBase<RWCursor<access>, IOBuf>,
public detail::Writable<RWCursor<access>> {
friend class detail::CursorBase<RWCursor<access>, IOBuf>;
public:
explicit RWCursor(IOBuf* buf)
: detail::CursorBase<RWCursor<access>, IOBuf>(buf),
maybeShared_(true) {}
: detail::CursorBase<RWCursor<access>, IOBuf>(buf), maybeShared_(true) {}
template <class OtherDerived, class OtherBuf>
explicit RWCursor(const detail::CursorBase<OtherDerived, OtherBuf>& cursor)
......@@ -972,10 +969,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor;
class Appender : public detail::Writable<Appender> {
public:
Appender(IOBuf* buf, uint64_t growth)
: buffer_(buf),
crtBuf_(buf->prev()),
growth_(growth) {
}
: buffer_(buf), crtBuf_(buf->prev()), growth_(growth) {}
uint8_t* writableData() {
return crtBuf_->writableTail();
......
......@@ -60,7 +60,9 @@ enum : uint64_t {
};
// Helper function for IOBuf::takeOwnership()
void takeOwnershipError(bool freeOnError, void* buf,
void takeOwnershipError(
bool freeOnError,
void* buf,
folly::IOBuf::FreeFunction freeFn,
void* userData) {
if (!freeOnError) {
......@@ -108,25 +110,21 @@ struct IOBuf::HeapStorage {
struct IOBuf::HeapFullStorage {
// Make sure jemalloc allocates from the 64-byte class. Putting this here
// because HeapStorage is private so it can't be at namespace level.
static_assert(sizeof(HeapStorage) <= 64,
"IOBuf may not grow over 56 bytes!");
static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!");
HeapStorage hs;
SharedInfo shared;
folly::max_align_t align;
};
IOBuf::SharedInfo::SharedInfo()
: freeFn(nullptr),
userData(nullptr) {
IOBuf::SharedInfo::SharedInfo() : freeFn(nullptr), userData(nullptr) {
// Use relaxed memory ordering here. Since we are creating a new SharedInfo,
// no other threads should be referring to it yet.
refcount.store(1, std::memory_order_relaxed);
}
IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
: freeFn(fn),
userData(arg) {
: freeFn(fn), userData(arg) {
// Use relaxed memory ordering here. Since we are creating a new SharedInfo,
// no other threads should be referring to it yet.
refcount.store(1, std::memory_order_relaxed);
......@@ -144,7 +142,9 @@ void* IOBuf::operator new(size_t size) {
return &(storage->buf);
}
void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; }
void* IOBuf::operator new(size_t /* size */, void* ptr) {
return ptr;
}
void IOBuf::operator delete(void* ptr) {
auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
......@@ -202,7 +202,8 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity)
data_ = buf_;
}
IOBuf::IOBuf(CopyBufferOp /* op */,
IOBuf::IOBuf(
CopyBufferOp /* op */,
const void* buf,
uint64_t size,
uint64_t headroom,
......@@ -216,10 +217,12 @@ IOBuf::IOBuf(CopyBufferOp /* op */,
}
}
IOBuf::IOBuf(CopyBufferOp op, ByteRange br,
uint64_t headroom, uint64_t minTailroom)
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) {
}
IOBuf::IOBuf(
CopyBufferOp op,
ByteRange br,
uint64_t headroom,
uint64_t minTailroom)
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) {}
unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
// For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
......@@ -251,8 +254,12 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
size_t actualCapacity = size_t(storageEnd - bufAddr);
unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
bufAddr, actualCapacity, bufAddr, 0));
InternalConstructor(),
packFlagsAndSharedInfo(0, &storage->shared),
bufAddr,
actualCapacity,
bufAddr,
0));
return ret;
}
......@@ -261,9 +268,10 @@ unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
}
unique_ptr<IOBuf> IOBuf::createChain(
size_t totalCapacity, uint64_t maxBufCapacity) {
unique_ptr<IOBuf> out = create(
std::min(totalCapacity, size_t(maxBufCapacity)));
size_t totalCapacity,
uint64_t maxBufCapacity) {
unique_ptr<IOBuf> out =
create(std::min(totalCapacity, size_t(maxBufCapacity)));
size_t allocatedCapacity = out->capacity();
while (allocatedCapacity < totalCapacity) {
......@@ -276,8 +284,13 @@ unique_ptr<IOBuf> IOBuf::createChain(
return out;
}
IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
FreeFunction freeFn, void* userData,
IOBuf::IOBuf(
TakeOwnershipOp,
void* buf,
uint64_t capacity,
uint64_t length,
FreeFunction freeFn,
void* userData,
bool freeOnError)
: next_(this),
prev_(this),
......@@ -285,7 +298,8 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
buf_(static_cast<uint8_t*>(buf)),
length_(length),
capacity_(capacity),
flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
flagsAndSharedInfo_(
packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
try {
setSharedInfo(new SharedInfo(freeFn, userData));
} catch (...) {
......@@ -294,7 +308,9 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
}
}
unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
unique_ptr<IOBuf> IOBuf::takeOwnership(
void* buf,
uint64_t capacity,
uint64_t length,
FreeFunction freeFn,
void* userData,
......@@ -318,17 +334,18 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
}
IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
: IOBuf(InternalConstructor(), 0,
: IOBuf(
InternalConstructor(),
0,
// We cast away the const-ness of the buffer here.
// This is okay since IOBuf users must use unshare() to create a copy
// of this buffer before writing to the buffer.
static_cast<uint8_t*>(const_cast<void*>(buf)), capacity,
static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {
}
static_cast<uint8_t*>(const_cast<void*>(buf)),
capacity,
static_cast<uint8_t*>(const_cast<void*>(buf)),
capacity) {}
IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
: IOBuf(op, br.data(), br.size()) {
}
IOBuf::IOBuf(WrapBufferOp op, ByteRange br) : IOBuf(op, br.data(), br.size()) {}
unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
......@@ -338,8 +355,7 @@ IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
}
IOBuf::IOBuf() noexcept {
}
IOBuf::IOBuf() noexcept {}
IOBuf::IOBuf(IOBuf&& other) noexcept
: data_(other.data_),
......@@ -375,7 +391,8 @@ IOBuf::IOBuf(const IOBuf& other) {
*this = other.cloneAsValue();
}
IOBuf::IOBuf(InternalConstructor,
IOBuf::IOBuf(
InternalConstructor,
uintptr_t flagsAndSharedInfo,
uint8_t* buf,
uint64_t capacity,
......@@ -675,7 +692,8 @@ void IOBuf::coalesceSlow(size_t maxLength) {
break;
}
if (end == this) {
throw std::overflow_error("attempted to coalesce more data than "
throw std::overflow_error(
"attempted to coalesce more data than "
"available");
}
}
......@@ -685,7 +703,8 @@ void IOBuf::coalesceSlow(size_t maxLength) {
DCHECK_GE(length_, maxLength);
}
void IOBuf::coalesceAndReallocate(size_t newHeadroom,
void IOBuf::coalesceAndReallocate(
size_t newHeadroom,
size_t newLength,
IOBuf* end,
size_t newTailroom) {
......@@ -744,8 +763,7 @@ void IOBuf::decrementRefcount() {
}
// Decrement the refcount
uint32_t newcnt = info->refcount.fetch_sub(
1, std::memory_order_acq_rel);
uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel);
// Note that fetch_sub() returns the value before we decremented.
// If it is 1, we were the only remaining user; if it is greater there are
// still other users.
......@@ -894,7 +912,8 @@ void IOBuf::freeExtBuffer() {
}
}
void IOBuf::allocExtBuffer(uint64_t minCapacity,
void IOBuf::allocExtBuffer(
uint64_t minCapacity,
uint8_t** bufReturn,
SharedInfo** infoReturn,
uint64_t* capacityReturn) {
......@@ -923,13 +942,15 @@ size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
return goodMallocSize(minSize);
}
void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
void IOBuf::initExtBuffer(
uint8_t* buf,
size_t mallocSize,
SharedInfo** infoReturn,
uint64_t* capacityReturn) {
// Find the SharedInfo storage at the end of the buffer
// and construct the SharedInfo.
uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
SharedInfo* sharedInfo = new(infoStart) SharedInfo;
SharedInfo* sharedInfo = new (infoStart) SharedInfo;
*capacityReturn = uint64_t(infoStart - buf);
*infoReturn = sharedInfo;
......@@ -951,8 +972,10 @@ fbstring IOBuf::moveToFbString() {
// Ensure NUL terminated
*writableTail() = 0;
fbstring str(reinterpret_cast<char*>(writableData()),
length(), capacity(),
fbstring str(
reinterpret_cast<char*>(writableData()),
length(),
capacity(),
AcquireMallocatedString());
if (flags() & kFlagFreeSharedInfo) {
......
This diff is collapsed.
......@@ -35,8 +35,7 @@ const size_t MAX_PACK_COPY = 4096;
/**
* Convenience function to append chain src to chain dst.
*/
void
appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
void appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
if (dst == nullptr) {
dst = std::move(src);
} else {
......@@ -109,8 +108,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
return *this;
}
std::pair<void*, uint64_t>
IOBufQueue::headroom() {
std::pair<void*, uint64_t> IOBufQueue::headroom() {
// Note, headroom is independent from the tail, so we don't need to flush the
// cache.
if (head_) {
......@@ -120,8 +118,7 @@ IOBufQueue::headroom() {
}
}
void
IOBufQueue::markPrepended(uint64_t n) {
void IOBufQueue::markPrepended(uint64_t n) {
if (n == 0) {
return;
}
......@@ -132,8 +129,7 @@ IOBufQueue::markPrepended(uint64_t n) {
chainLength_ += n;
}
void
IOBufQueue::prepend(const void* buf, uint64_t n) {
void IOBufQueue::prepend(const void* buf, uint64_t n) {
// We're not touching the tail, so we don't need to flush the cache.
auto hroom = head_->headroom();
if (!head_ || hroom < n) {
......@@ -144,8 +140,7 @@ IOBufQueue::prepend(const void* buf, uint64_t n) {
chainLength_ += n;
}
void
IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
void IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
if (!buf) {
return;
}
......@@ -156,8 +151,7 @@ IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
appendToChain(head_, std::move(buf), pack);
}
void
IOBufQueue::append(IOBufQueue& other, bool pack) {
void IOBufQueue::append(IOBufQueue& other, bool pack) {
if (!other.head_) {
return;
}
......@@ -175,16 +169,16 @@ IOBufQueue::append(IOBufQueue& other, bool pack) {
other.chainLength_ = 0;
}
void
IOBufQueue::append(const void* buf, size_t len) {
void IOBufQueue::append(const void* buf, size_t len) {
auto guard = updateGuard();
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
if ((head_ == nullptr) || head_->prev()->isSharedOne() ||
(head_->prev()->tailroom() == 0)) {
appendToChain(head_,
IOBuf::create(std::max(MIN_ALLOC_SIZE,
std::min(len, MAX_ALLOC_SIZE))),
appendToChain(
head_,
IOBuf::create(
std::max(MIN_ALLOC_SIZE, std::min(len, MAX_ALLOC_SIZE))),
false);
}
IOBuf* last = head_->prev();
......@@ -197,8 +191,7 @@ IOBufQueue::append(const void* buf, size_t len) {
}
}
void
IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
size_t n = std::min(len, size_t(blockSize));
......@@ -208,8 +201,9 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
}
}
pair<void*,uint64_t>
IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize,
pair<void*, uint64_t> IOBufQueue::preallocateSlow(
uint64_t min,
uint64_t newAllocationSize,
uint64_t max) {
// Avoid grabbing update guard, since we're manually setting the cache ptrs.
flushCache();
......
......@@ -291,15 +291,14 @@ class IOBufQueue {
* releasing the buffers), if possible. If pack is false, we leave
* the chain topology unchanged.
*/
void append(std::unique_ptr<folly::IOBuf>&& buf,
bool pack=false);
void append(std::unique_ptr<folly::IOBuf>&& buf, bool pack = false);
/**
* Add a queue to the end of this queue. The queue takes ownership of
* all buffers from the other queue.
*/
void append(IOBufQueue& other, bool pack=false);
void append(IOBufQueue&& other, bool pack=false) {
void append(IOBufQueue& other, bool pack = false);
void append(IOBufQueue&& other, bool pack = false) {
append(other, pack); // call lvalue reference overload, above
}
......@@ -329,8 +328,10 @@ class IOBufQueue {
* Every buffer except for the last will wrap exactly blockSize bytes.
* Importantly, this method may be used to wrap buffers larger than 4GB.
*/
void wrapBuffer(const void* buf, size_t len,
uint64_t blockSize=(1U << 31)); // default block size: 2GB
void wrapBuffer(
const void* buf,
size_t len,
uint64_t blockSize = (1U << 31)); // default block size: 2GB
/**
* Obtain a writable block of contiguous bytes at the end of this
......@@ -352,8 +353,9 @@ class IOBufQueue {
* callback, tell the application how much of the buffer they've
* filled with data.
*/
std::pair<void*,uint64_t> preallocate(
uint64_t min, uint64_t newAllocationSize,
std::pair<void*, uint64_t> preallocate(
uint64_t min,
uint64_t newAllocationSize,
uint64_t max = std::numeric_limits<uint64_t>::max()) {
dcheckCacheIntegrity();
......
......@@ -30,11 +30,16 @@ class RecordIOReader::Iterator : public boost::iterator_facade<
boost::forward_traversal_tag> {
friend class boost::iterator_core_access;
friend class RecordIOReader;
private:
Iterator(ByteRange range, uint32_t fileId, off_t pos);
reference dereference() const { return recordAndPos_; }
bool equal(const Iterator& other) const { return range_ == other.range_; }
reference dereference() const {
return recordAndPos_;
}
bool equal(const Iterator& other) const {
return range_ == other.range_;
}
void increment() {
size_t skip = recordio_helpers::headerSize() + recordAndPos_.first.size();
recordAndPos_.second += off_t(skip);
......@@ -49,10 +54,18 @@ class RecordIOReader::Iterator : public boost::iterator_facade<
std::pair<ByteRange, off_t> recordAndPos_;
};
inline auto RecordIOReader::cbegin() const -> Iterator { return seek(0); }
inline auto RecordIOReader::begin() const -> Iterator { return cbegin(); }
inline auto RecordIOReader::cend() const -> Iterator { return seek(off_t(-1)); }
inline auto RecordIOReader::end() const -> Iterator { return cend(); }
inline auto RecordIOReader::cbegin() const -> Iterator {
return seek(0);
}
inline auto RecordIOReader::begin() const -> Iterator {
return cbegin();
}
inline auto RecordIOReader::cend() const -> Iterator {
return seek(off_t(-1));
}
inline auto RecordIOReader::end() const -> Iterator {
return cend();
}
inline auto RecordIOReader::seek(off_t pos) const -> Iterator {
return Iterator(map_.range(), fileId_, pos);
}
......@@ -79,12 +92,15 @@ struct Header {
} FOLLY_PACK_ATTR;
FOLLY_PACK_POP
static_assert(offsetof(Header, headerHash) + sizeof(Header::headerHash) ==
sizeof(Header), "invalid header layout");
static_assert(
offsetof(Header, headerHash) + sizeof(Header::headerHash) == sizeof(Header),
"invalid header layout");
} // namespace recordio_detail
constexpr size_t headerSize() { return sizeof(recordio_detail::Header); }
constexpr size_t headerSize() {
return sizeof(recordio_detail::Header);
}
inline RecordInfo findRecord(ByteRange range, uint32_t fileId) {
return findRecord(range, range, fileId);
......
......@@ -70,14 +70,10 @@ void RecordIOWriter::write(std::unique_ptr<IOBuf> buf) {
}
RecordIOReader::RecordIOReader(File file, uint32_t fileId)
: map_(std::move(file)),
fileId_(fileId) {
}
: map_(std::move(file)), fileId_(fileId) {}
RecordIOReader::Iterator::Iterator(ByteRange range, uint32_t fileId, off_t pos)
: range_(range),
fileId_(fileId),
recordAndPos_(ByteRange(), 0) {
: range_(range), fileId_(fileId), recordAndPos_(ByteRange(), 0) {
if (size_t(pos) >= range_.size()) {
// Note that this branch can execute if pos is negative as well.
recordAndPos_.second = off_t(-1);
......@@ -113,8 +109,8 @@ namespace {
constexpr uint32_t kHashSeed = 0xdeadbeef; // for mcurtiss
uint32_t headerHash(const Header& header) {
return hash::SpookyHashV2::Hash32(&header, offsetof(Header, headerHash),
kHashSeed);
return hash::SpookyHashV2::Hash32(
&header, offsetof(Header, headerHash), kHashSeed);
}
std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) {
......@@ -177,10 +173,8 @@ RecordInfo validateRecord(ByteRange range, uint32_t fileId) {
}
const Header* header = reinterpret_cast<const Header*>(range.begin());
range.advance(sizeof(Header));
if (header->magic != Header::kMagic ||
header->version != 0 ||
header->hashFunction != 0 ||
header->flags != 0 ||
if (header->magic != Header::kMagic || header->version != 0 ||
header->hashFunction != 0 || header->flags != 0 ||
(fileId != 0 && header->fileId != fileId) ||
header->dataLength > range.size()) {
return {0, {}};
......@@ -195,19 +189,18 @@ RecordInfo validateRecord(ByteRange range, uint32_t fileId) {
return {header->fileId, range};
}
RecordInfo findRecord(ByteRange searchRange,
ByteRange wholeRange,
uint32_t fileId) {
RecordInfo
findRecord(ByteRange searchRange, ByteRange wholeRange, uint32_t fileId) {
static const uint32_t magic = Header::kMagic;
static const ByteRange magicRange(reinterpret_cast<const uint8_t*>(&magic),
sizeof(magic));
static const ByteRange magicRange(
reinterpret_cast<const uint8_t*>(&magic), sizeof(magic));
DCHECK_GE(searchRange.begin(), wholeRange.begin());
DCHECK_LE(searchRange.end(), wholeRange.end());
const uint8_t* start = searchRange.begin();
const uint8_t* end = std::min(searchRange.end(),
wholeRange.end() - sizeof(Header));
const uint8_t* end =
std::min(searchRange.end(), wholeRange.end() - sizeof(Header));
// end-1: the last place where a Header could start
while (start < end) {
auto p = ByteRange(start, end + sizeof(magic)).find(magicRange);
......
......@@ -69,7 +69,9 @@ class RecordIOWriter {
* Return the position in the file where the next byte will be written.
* Conservative, as stuff can be written at any time from another thread.
*/
off_t filePos() const { return filePos_; }
off_t filePos() const {
return filePos_;
}
private:
File file_;
......@@ -157,9 +159,8 @@ struct RecordInfo {
uint32_t fileId;
ByteRange record;
};
RecordInfo findRecord(ByteRange searchRange,
ByteRange wholeRange,
uint32_t fileId);
RecordInfo
findRecord(ByteRange searchRange, ByteRange wholeRange, uint32_t fileId);
/**
* Search for the first valid record in range.
......
......@@ -70,7 +70,7 @@ class ShutdownSocketSet : private boost::noncopyable {
* read() and write() operations to the socket will fail. During normal
* operation, just call ::shutdown() on the socket.
*/
void shutdown(int fd, bool abortive=false);
void shutdown(int fd, bool abortive = false);
/**
* Immediate shutdown of all connections. This is a hard-hitting hammer;
......@@ -92,7 +92,7 @@ class ShutdownSocketSet : private boost::noncopyable {
*
* This is async-signal-safe and ignores errors.
*/
void shutdownAll(bool abortive=false);
void shutdownAll(bool abortive = false);
private:
void doShutdown(int fd, bool abortive);
......
......@@ -38,6 +38,7 @@ namespace folly {
template <class T>
class TypedIOBuf {
static_assert(std::is_standard_layout<T>::value, "must be standard layout");
public:
typedef T value_type;
typedef value_type& reference;
......@@ -46,7 +47,7 @@ class TypedIOBuf {
typedef value_type* iterator;
typedef const value_type* const_iterator;
explicit TypedIOBuf(IOBuf* buf) : buf_(buf) { }
explicit TypedIOBuf(IOBuf* buf) : buf_(buf) {}
IOBuf* ioBuf() {
return buf_;
......@@ -73,7 +74,9 @@ class TypedIOBuf {
uint32_t length() const {
return sdiv(buf_->length());
}
uint32_t size() const { return length(); }
uint32_t size() const {
return length();
}
uint32_t headroom() const {
return sdiv(buf_->headroom());
......@@ -117,14 +120,28 @@ class TypedIOBuf {
void reserve(uint32_t minHeadroom, uint32_t minTailroom) {
buf_->reserve(smul(minHeadroom), smul(minTailroom));
}
void reserve(uint32_t minTailroom) { reserve(0, minTailroom); }
void reserve(uint32_t minTailroom) {
reserve(0, minTailroom);
}
const T* cbegin() const { return data(); }
const T* cend() const { return tail(); }
const T* begin() const { return cbegin(); }
const T* end() const { return cend(); }
T* begin() { return writableData(); }
T* end() { return writableTail(); }
const T* cbegin() const {
return data();
}
const T* cend() const {
return tail();
}
const T* begin() const {
return cbegin();
}
const T* end() const {
return cend();
}
T* begin() {
return writableData();
}
T* end() {
return writableTail();
}
const T& front() const {
assert(!empty());
......@@ -163,7 +180,9 @@ class TypedIOBuf {
void push(const T& data) {
push(&data, &data + 1);
}
void push_back(const T& data) { push(data); }
void push_back(const T& data) {
push(data);
}
/**
* Append multiple elements in a sequence; will call distance().
......
......@@ -18,10 +18,10 @@
#include <folly/FileUtil.h>
#include <folly/io/async/AsyncSocketException.h>
using std::string;
using std::unique_ptr;
using folly::IOBuf;
using folly::IOBufQueue;
using std::string;
using std::unique_ptr;
namespace folly {
......@@ -30,8 +30,8 @@ AsyncPipeReader::~AsyncPipeReader() {
}
void AsyncPipeReader::failRead(const AsyncSocketException& ex) {
VLOG(5) << "AsyncPipeReader(this=" << this << ", fd=" << fd_ <<
"): failed while reading: " << ex.what();
VLOG(5) << "AsyncPipeReader(this=" << this << ", fd=" << fd_
<< "): failed while reading: " << ex.what();
DCHECK(readCallback_ != nullptr);
AsyncReader::ReadCallback* callback = readCallback_;
......@@ -124,8 +124,8 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept {
// No more data to read right now.
return;
} else if (bytesRead < 0) {
AsyncSocketException ex(AsyncSocketException::INVALID_STATE,
"read failed", errno);
AsyncSocketException ex(
AsyncSocketException::INVALID_STATE, "read failed", errno);
failRead(ex);
return;
} else {
......@@ -142,13 +142,13 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept {
}
}
void AsyncPipeWriter::write(unique_ptr<folly::IOBuf> buf,
void AsyncPipeWriter::write(
unique_ptr<folly::IOBuf> buf,
AsyncWriter::WriteCallback* callback) {
if (closed()) {
if (callback) {
AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
"attempt to write to closed pipe");
AsyncSocketException ex(
AsyncSocketException::NOT_OPEN, "attempt to write to closed pipe");
callback->writeErr(0, ex);
}
return;
......@@ -167,7 +167,8 @@ void AsyncPipeWriter::write(unique_ptr<folly::IOBuf> buf,
}
}
void AsyncPipeWriter::writeChain(folly::AsyncWriter::WriteCallback* callback,
void AsyncPipeWriter::writeChain(
folly::AsyncWriter::WriteCallback* callback,
std::unique_ptr<folly::IOBuf>&& buf,
WriteFlags) {
write(std::move(buf), callback);
......@@ -186,8 +187,8 @@ void AsyncPipeWriter::closeOnEmpty() {
void AsyncPipeWriter::closeNow() {
VLOG(5) << "close now";
if (!queue_.empty()) {
failAllWrites(AsyncSocketException(AsyncSocketException::NOT_OPEN,
"closed with pending writes"));
failAllWrites(AsyncSocketException(
AsyncSocketException::NOT_OPEN, "closed with pending writes"));
}
if (fd_ >= 0) {
unregisterHandler();
......@@ -213,7 +214,6 @@ void AsyncPipeWriter::failAllWrites(const AsyncSocketException& ex) {
}
}
void AsyncPipeWriter::handlerReady(uint16_t events) noexcept {
CHECK(events & EventHandler::WRITE);
......@@ -225,7 +225,7 @@ void AsyncPipeWriter::handleWrite() {
assert(!queue_.empty());
do {
auto& front = queue_.front();
folly::IOBufQueue &curQueue = front.first;
folly::IOBufQueue& curQueue = front.first;
DCHECK(!curQueue.empty());
// someday, support writev. The logic for partial writes is a bit complex
const IOBuf* head = curQueue.front();
......@@ -238,8 +238,8 @@ void AsyncPipeWriter::handleWrite() {
registerHandler(EventHandler::WRITE);
return;
} else {
failAllWrites(AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
"write failed", errno));
failAllWrites(AsyncSocketException(
AsyncSocketException::INTERNAL_ERROR, "write failed", errno));
closeNow();
return;
}
......
......@@ -35,8 +35,9 @@ class AsyncPipeReader : public EventHandler,
public AsyncReader,
public DelayedDestruction {
public:
typedef std::unique_ptr<AsyncPipeReader,
folly::DelayedDestruction::Destructor> UniquePtr;
typedef std::
unique_ptr<AsyncPipeReader, folly::DelayedDestruction::Destructor>
UniquePtr;
template <typename... Args>
static UniquePtr newReader(Args&&... args) {
......@@ -44,8 +45,7 @@ class AsyncPipeReader : public EventHandler,
}
AsyncPipeReader(folly::EventBase* eventBase, int pipeFd)
: EventHandler(eventBase, pipeFd),
fd_(pipeFd) {}
: EventHandler(eventBase, pipeFd), fd_(pipeFd) {}
/**
* Set the read callback and automatically install/uninstall the handler
......@@ -96,8 +96,9 @@ class AsyncPipeWriter : public EventHandler,
public AsyncWriter,
public DelayedDestruction {
public:
typedef std::unique_ptr<AsyncPipeWriter,
folly::DelayedDestruction::Destructor> UniquePtr;
typedef std::
unique_ptr<AsyncPipeWriter, folly::DelayedDestruction::Destructor>
UniquePtr;
template <typename... Args>
static UniquePtr newWriter(Args&&... args) {
......@@ -105,14 +106,14 @@ class AsyncPipeWriter : public EventHandler,
}
AsyncPipeWriter(folly::EventBase* eventBase, int pipeFd)
: EventHandler(eventBase, pipeFd),
fd_(pipeFd) {}
: EventHandler(eventBase, pipeFd), fd_(pipeFd) {}
/**
* Asynchronously write the given iobuf to this pipe, and invoke the callback
* on success/error.
*/
void write(std::unique_ptr<folly::IOBuf> iob,
void write(
std::unique_ptr<folly::IOBuf> iob,
AsyncWriter::WriteCallback* wcb = nullptr);
/**
......@@ -148,19 +149,22 @@ class AsyncPipeWriter : public EventHandler,
}
// AsyncWriter methods
void write(folly::AsyncWriter::WriteCallback* callback,
void write(
folly::AsyncWriter::WriteCallback* callback,
const void* buf,
size_t bytes,
WriteFlags flags = WriteFlags::NONE) override {
writeChain(callback, IOBuf::wrapBuffer(buf, bytes), flags);
}
void writev(folly::AsyncWriter::WriteCallback*,
void writev(
folly::AsyncWriter::WriteCallback*,
const iovec*,
size_t,
WriteFlags = WriteFlags::NONE) override {
throw std::runtime_error("writev is not supported. Please use writeChain.");
}
void writeChain(folly::AsyncWriter::WriteCallback* callback,
void writeChain(
folly::AsyncWriter::WriteCallback* callback,
std::unique_ptr<folly::IOBuf>&& buf,
WriteFlags flags = WriteFlags::NONE) override;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -26,8 +26,7 @@ using std::string;
namespace folly {
AsyncSignalHandler::AsyncSignalHandler(EventBase* eventBase)
: eventBase_(eventBase) {
}
: eventBase_(eventBase) {}
AsyncSignalHandler::~AsyncSignalHandler() {
// Unregister any outstanding events
......@@ -55,24 +54,21 @@ void AsyncSignalHandler::registerSignalHandler(int signum) {
signalEvents_.insert(make_pair(signum, event()));
if (!ret.second) {
// This signal has already been registered
throw std::runtime_error(folly::to<string>(
"handler already registered for signal ",
signum));
throw std::runtime_error(
folly::to<string>("handler already registered for signal ", signum));
}
struct event* ev = &(ret.first->second);
try {
signal_set(ev, signum, libeventCallback, this);
if (event_base_set(eventBase_->getLibeventBase(), ev) != 0 ) {
if (event_base_set(eventBase_->getLibeventBase(), ev) != 0) {
throw std::runtime_error(folly::to<string>(
"error initializing event handler for signal ",
signum));
"error initializing event handler for signal ", signum));
}
if (event_add(ev, nullptr) != 0) {
throw std::runtime_error(folly::to<string>(
"error adding event handler for signal ",
signum));
throw std::runtime_error(
folly::to<string>("error adding event handler for signal ", signum));
}
} catch (...) {
signalEvents_.erase(ret.first);
......@@ -85,14 +81,16 @@ void AsyncSignalHandler::unregisterSignalHandler(int signum) {
if (it == signalEvents_.end()) {
throw std::runtime_error(folly::to<string>(
"unable to unregister handler for signal ",
signum, ": signal not registered"));
signum,
": signal not registered"));
}
event_del(&it->second);
signalEvents_.erase(it);
}
void AsyncSignalHandler::libeventCallback(libevent_fd_t signum,
void AsyncSignalHandler::libeventCallback(
libevent_fd_t signum,
short /* events */,
void* arg) {
AsyncSignalHandler* handler = static_cast<AsyncSignalHandler*>(arg);
......
......@@ -105,8 +105,8 @@ class AsyncSignalHandler {
typedef std::map<int, struct event> SignalEventMap;
// Forbidden copy constructor and assignment operator
AsyncSignalHandler(AsyncSignalHandler const &);
AsyncSignalHandler& operator=(AsyncSignalHandler const &);
AsyncSignalHandler(AsyncSignalHandler const&);
AsyncSignalHandler& operator=(AsyncSignalHandler const&);
static void libeventCallback(libevent_fd_t signum, short events, void* arg);
......
This diff is collapsed.
This diff is collapsed.
......@@ -26,32 +26,27 @@ namespace folly {
AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager)
: timeoutManager_(timeoutManager) {
folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr;
timeoutManager_->attachTimeoutManager(
this,
TimeoutManager::InternalEnum::NORMAL);
this, TimeoutManager::InternalEnum::NORMAL);
}
AsyncTimeout::AsyncTimeout(EventBase* eventBase)
: timeoutManager_(eventBase) {
AsyncTimeout::AsyncTimeout(EventBase* eventBase) : timeoutManager_(eventBase) {
folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr;
if (eventBase) {
timeoutManager_->attachTimeoutManager(
this,
TimeoutManager::InternalEnum::NORMAL);
this, TimeoutManager::InternalEnum::NORMAL);
}
}
AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager,
AsyncTimeout::AsyncTimeout(
TimeoutManager* timeoutManager,
InternalEnum internal)
: timeoutManager_(timeoutManager) {
folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr;
......@@ -60,14 +55,13 @@ AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager,
AsyncTimeout::AsyncTimeout(EventBase* eventBase, InternalEnum internal)
: timeoutManager_(eventBase) {
folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr;
timeoutManager_->attachTimeoutManager(this, internal);
}
AsyncTimeout::AsyncTimeout(): timeoutManager_(nullptr) {
AsyncTimeout::AsyncTimeout() : timeoutManager_(nullptr) {
folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr;
......
......@@ -121,9 +121,11 @@ class AsyncTimeout : private boost::noncopyable {
* internal event. TimeoutManager::loop() will return when there are no more
* non-internal events remaining.
*/
void attachTimeoutManager(TimeoutManager* timeoutManager,
void attachTimeoutManager(
TimeoutManager* timeoutManager,
InternalEnum internal = InternalEnum::NORMAL);
void attachEventBase(EventBase* eventBase,
void attachEventBase(
EventBase* eventBase,
InternalEnum internal = InternalEnum::NORMAL);
/**
......@@ -175,9 +177,8 @@ class AsyncTimeout : private boost::noncopyable {
*/
template <typename TCallback>
static std::unique_ptr<AsyncTimeout> make(
TimeoutManager &manager,
TCallback &&callback
);
TimeoutManager& manager,
TCallback&& callback);
/**
* Convenience function that wraps a function object as
......@@ -211,9 +212,8 @@ class AsyncTimeout : private boost::noncopyable {
template <typename TCallback>
static std::unique_ptr<AsyncTimeout> schedule(
TimeoutManager::timeout_type timeout,
TimeoutManager &manager,
TCallback &&callback
);
TimeoutManager& manager,
TCallback&& callback);
private:
static void libeventCallback(libevent_fd_t fd, short events, void* arg);
......@@ -239,20 +239,15 @@ namespace detail {
* @author: Marcelo Juchem <marcelo@fb.com>
*/
template <typename TCallback>
struct async_timeout_wrapper:
public AsyncTimeout
{
struct async_timeout_wrapper : public AsyncTimeout {
template <typename UCallback>
async_timeout_wrapper(TimeoutManager *manager, UCallback &&callback):
AsyncTimeout(manager),
callback_(std::forward<UCallback>(callback))
{}
async_timeout_wrapper(TimeoutManager* manager, UCallback&& callback)
: AsyncTimeout(manager), callback_(std::forward<UCallback>(callback)) {}
void timeoutExpired() noexcept override {
static_assert(
noexcept(std::declval<TCallback>()()),
"callback must be declared noexcept, e.g.: `[]() noexcept {}`"
);
"callback must be declared noexcept, e.g.: `[]() noexcept {}`");
callback_();
}
......@@ -264,23 +259,18 @@ struct async_timeout_wrapper:
template <typename TCallback>
std::unique_ptr<AsyncTimeout> AsyncTimeout::make(
TimeoutManager &manager,
TCallback &&callback
) {
TimeoutManager& manager,
TCallback&& callback) {
return std::unique_ptr<AsyncTimeout>(
new detail::async_timeout_wrapper<typename std::decay<TCallback>::type>(
std::addressof(manager),
std::forward<TCallback>(callback)
)
);
std::addressof(manager), std::forward<TCallback>(callback)));
}
template <typename TCallback>
std::unique_ptr<AsyncTimeout> AsyncTimeout::schedule(
TimeoutManager::timeout_type timeout,
TimeoutManager &manager,
TCallback &&callback
) {
TimeoutManager& manager,
TCallback&& callback) {
auto wrapper = AsyncTimeout::make(manager, std::forward<TCallback>(callback));
wrapper->scheduleTimeout(timeout);
return wrapper;
......
......@@ -33,7 +33,7 @@ constexpr bool kOpenSslModeMoveBufferOwnership =
#else
false
#endif
;
;
namespace folly {
......@@ -120,7 +120,6 @@ inline bool isSet(WriteFlags a, WriteFlags b) {
return (a & b) == b;
}
/**
* AsyncTransport defines an asynchronous API for streaming I/O.
*
......@@ -382,7 +381,9 @@ class AsyncTransport : public DelayedDestruction, public AsyncSocketBase {
/**
* Get the certificate used to authenticate the peer.
*/
virtual ssl::X509UniquePtr getPeerCert() const { return nullptr; }
virtual ssl::X509UniquePtr getPeerCert() const {
return nullptr;
}
/**
* The local certificate used for this connection. May be null
......@@ -459,7 +460,9 @@ class AsyncTransport : public DelayedDestruction, public AsyncSocketBase {
* False if the transport does not have replay protection, but will in the
* future.
*/
virtual bool isReplaySafe() const { return true; }
virtual bool isReplaySafe() const {
return true;
}
/**
* Set the ReplaySafeCallback on this transport.
......@@ -580,8 +583,8 @@ class AsyncReader {
* @param readBuf The unique pointer of read buffer.
*/
virtual void readBufferAvailable(std::unique_ptr<IOBuf> /*readBuf*/)
noexcept {}
virtual void readBufferAvailable(
std::unique_ptr<IOBuf> /*readBuf*/) noexcept {}
/**
* readEOF() will be invoked when the transport is closed.
......@@ -636,16 +639,24 @@ class AsyncWriter {
* @param bytesWritten The number of bytes that were successfull
* @param ex An exception describing the error that occurred.
*/
virtual void writeErr(size_t bytesWritten,
virtual void writeErr(
size_t bytesWritten,
const AsyncSocketException& ex) noexcept = 0;
};
// Write methods that aren't part of AsyncTransport
virtual void write(WriteCallback* callback, const void* buf, size_t bytes,
virtual void write(
WriteCallback* callback,
const void* buf,
size_t bytes,
WriteFlags flags = WriteFlags::NONE) = 0;
virtual void writev(WriteCallback* callback, const iovec* vec, size_t count,
virtual void writev(
WriteCallback* callback,
const iovec* vec,
size_t count,
WriteFlags flags = WriteFlags::NONE) = 0;
virtual void writeChain(WriteCallback* callback,
virtual void writeChain(
WriteCallback* callback,
std::unique_ptr<IOBuf>&& buf,
WriteFlags flags = WriteFlags::NONE) = 0;
......
......@@ -35,8 +35,8 @@ namespace folly {
* more than 1 packet will not work because they will end up with
* different event base to process.
*/
class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
, public AsyncSocketBase {
class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback,
public AsyncSocketBase {
public:
class Callback {
public:
......@@ -84,10 +84,7 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
* is dropped and you get `truncated = true` in onDataAvailable callback
*/
explicit AsyncUDPServerSocket(EventBase* evb, size_t sz = 1500)
: evb_(evb),
packetSize_(sz),
nextListener_(0) {
}
: evb_(evb), packetSize_(sz), nextListener_(0) {}
~AsyncUDPServerSocket() override {
if (socket_) {
......@@ -126,12 +123,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
void listen() {
CHECK(socket_) << "Need to bind before listening";
for (auto& listener: listeners_) {
for (auto& listener : listeners_) {
auto callback = listener.second;
listener.first->runInEventBaseThread([callback] () mutable {
callback->onListenStarted();
});
listener.first->runInEventBaseThread(
[callback]() mutable { callback->onListenStarted(); });
}
socket_->resumeRead(this);
......@@ -207,13 +203,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
// Schedule it in the listener's eventbase
// XXX: Speed this up
auto f = [
socket,
auto f = [socket,
client,
callback,
data = std::move(data),
truncated
]() mutable {
truncated]() mutable {
callback->onDataAvailable(socket, client, std::move(data), truncated);
};
......@@ -229,12 +223,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
}
void onReadClosed() noexcept override {
for (auto& listener: listeners_) {
for (auto& listener : listeners_) {
auto callback = listener.second;
listener.first->runInEventBaseThread([callback] () mutable {
callback->onListenStopped();
});
listener.first->runInEventBaseThread(
[callback]() mutable { callback->onListenStopped(); });
}
}
......
......@@ -26,8 +26,8 @@ namespace folly {
template <class T>
class DecoratedAsyncTransportWrapper : public folly::AsyncTransportWrapper {
public:
explicit DecoratedAsyncTransportWrapper(typename T::UniquePtr transport):
transport_(std::move(transport)) {}
explicit DecoratedAsyncTransportWrapper(typename T::UniquePtr transport)
: transport_(std::move(transport)) {}
const AsyncTransportWrapper* getWrappedTransport() const override {
return transport_.get();
......
......@@ -96,9 +96,7 @@ class DelayedDestruction : public DelayedDestructionBase {
*/
~DelayedDestruction() override = default;
DelayedDestruction()
: destroyPending_(false) {
}
DelayedDestruction() : destroyPending_(false) {}
private:
/**
......
......@@ -61,30 +61,25 @@ class DelayedDestructionBase : private boost::noncopyable {
*/
class DestructorGuard {
public:
explicit DestructorGuard(DelayedDestructionBase* dd = nullptr) :
dd_(dd) {
explicit DestructorGuard(DelayedDestructionBase* dd = nullptr) : dd_(dd) {
if (dd_ != nullptr) {
++dd_->guardCount_;
assert(dd_->guardCount_ > 0); // check for wrapping
}
}
DestructorGuard(const DestructorGuard& dg) :
DestructorGuard(dg.dd_) {
}
DestructorGuard(const DestructorGuard& dg) : DestructorGuard(dg.dd_) {}
DestructorGuard(DestructorGuard&& dg) noexcept :
dd_(dg.dd_) {
DestructorGuard(DestructorGuard&& dg) noexcept : dd_(dg.dd_) {
dg.dd_ = nullptr;
}
DestructorGuard& operator =(DestructorGuard dg) noexcept {
DestructorGuard& operator=(DestructorGuard dg) noexcept {
std::swap(dd_, dg.dd_);
return *this;
}
DestructorGuard& operator =(DelayedDestructionBase* dd) {
DestructorGuard& operator=(DelayedDestructionBase* dd) {
*this = DestructorGuard(dd);
return *this;
}
......@@ -120,6 +115,7 @@ class DelayedDestructionBase : private boost::noncopyable {
class IntrusivePtr : private DestructorGuard {
template <typename CopyAliasType>
friend class IntrusivePtr;
public:
template <typename... Args>
static IntrusivePtr<AliasType> make(Args&&... args) {
......@@ -130,51 +126,47 @@ class DelayedDestructionBase : private boost::noncopyable {
IntrusivePtr(const IntrusivePtr&) = default;
IntrusivePtr(IntrusivePtr&&) noexcept = default;
template <typename CopyAliasType, typename =
typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value
>::type>
IntrusivePtr(const IntrusivePtr<CopyAliasType>& copy) :
DestructorGuard(copy) {
}
template <
typename CopyAliasType,
typename = typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
IntrusivePtr(const IntrusivePtr<CopyAliasType>& copy)
: DestructorGuard(copy) {}
template <typename CopyAliasType, typename =
typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value
>::type>
IntrusivePtr(IntrusivePtr<CopyAliasType>&& copy) :
DestructorGuard(std::move(copy)) {
}
template <
typename CopyAliasType,
typename = typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
IntrusivePtr(IntrusivePtr<CopyAliasType>&& copy)
: DestructorGuard(std::move(copy)) {}
explicit IntrusivePtr(AliasType* dd) :
DestructorGuard(dd) {
}
explicit IntrusivePtr(AliasType* dd) : DestructorGuard(dd) {}
// Copying from a unique_ptr is safe because if the upcast to
// DelayedDestructionBase works, then the instance is already using
// intrusive ref-counting.
template <typename CopyAliasType, typename Deleter, typename =
typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value
>::type>
explicit IntrusivePtr(const std::unique_ptr<CopyAliasType, Deleter>& copy) :
DestructorGuard(copy.get()) {
}
IntrusivePtr& operator =(const IntrusivePtr&) = default;
IntrusivePtr& operator =(IntrusivePtr&&) noexcept = default;
template <typename CopyAliasType, typename =
typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value
>::type>
IntrusivePtr& operator =(IntrusivePtr<CopyAliasType> copy) noexcept {
DestructorGuard::operator =(copy);
template <
typename CopyAliasType,
typename Deleter,
typename = typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
explicit IntrusivePtr(const std::unique_ptr<CopyAliasType, Deleter>& copy)
: DestructorGuard(copy.get()) {}
IntrusivePtr& operator=(const IntrusivePtr&) = default;
IntrusivePtr& operator=(IntrusivePtr&&) noexcept = default;
template <
typename CopyAliasType,
typename = typename std::enable_if<
std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
IntrusivePtr& operator=(IntrusivePtr<CopyAliasType> copy) noexcept {
DestructorGuard::operator=(copy);
return *this;
}
IntrusivePtr& operator =(AliasType* dd) {
DestructorGuard::operator =(dd);
IntrusivePtr& operator=(AliasType* dd) {
DestructorGuard::operator=(dd);
return *this;
}
......@@ -183,14 +175,14 @@ class DelayedDestructionBase : private boost::noncopyable {
}
AliasType* get() const {
return static_cast<AliasType *>(DestructorGuard::get());
return static_cast<AliasType*>(DestructorGuard::get());
}
AliasType& operator *() const {
AliasType& operator*() const {
return *get();
}
AliasType* operator ->() const {
AliasType* operator->() const {
return get();
}
......@@ -200,8 +192,7 @@ class DelayedDestructionBase : private boost::noncopyable {
};
protected:
DelayedDestructionBase()
: guardCount_(0) {}
DelayedDestructionBase() : guardCount_(0) {}
/**
* Get the number of DestructorGuards currently protecting this object.
......@@ -237,12 +228,12 @@ class DelayedDestructionBase : private boost::noncopyable {
uint32_t guardCount_;
};
inline bool operator ==(
inline bool operator==(
const DelayedDestructionBase::DestructorGuard& left,
const DelayedDestructionBase::DestructorGuard& right) {
return left.get() == right.get();
}
inline bool operator !=(
inline bool operator!=(
const DelayedDestructionBase::DestructorGuard& left,
const DelayedDestructionBase::DestructorGuard& right) {
return left.get() != right.get();
......@@ -269,13 +260,13 @@ inline bool operator!=(
}
template <typename LeftAliasType, typename RightAliasType>
inline bool operator ==(
inline bool operator==(
const DelayedDestructionBase::IntrusivePtr<LeftAliasType>& left,
const DelayedDestructionBase::IntrusivePtr<RightAliasType>& right) {
return left.get() == right.get();
}
template <typename LeftAliasType, typename RightAliasType>
inline bool operator !=(
inline bool operator!=(
const DelayedDestructionBase::IntrusivePtr<LeftAliasType>& left,
const DelayedDestructionBase::IntrusivePtr<RightAliasType>& right) {
return left.get() != right.get();
......
......@@ -73,21 +73,23 @@ static std::mutex libevent_mutex_;
*/
EventBase::EventBase(bool enableTimeMeasurement)
: runOnceCallbacks_(nullptr)
, stop_(false)
, loopThread_()
, queue_(nullptr)
, fnRunner_(nullptr)
, maxLatency_(0)
, avgLoopTime_(std::chrono::seconds(2))
, maxLatencyLoopTime_(avgLoopTime_)
, enableTimeMeasurement_(enableTimeMeasurement)
, nextLoopCnt_(uint64_t(-40)) // Early wrap-around so bugs will manifest soon
, latestLoopCnt_(nextLoopCnt_)
, startWork_()
, observer_(nullptr)
, observerSampleCount_(0)
, executionObserver_(nullptr) {
: runOnceCallbacks_(nullptr),
stop_(false),
loopThread_(),
queue_(nullptr),
fnRunner_(nullptr),
maxLatency_(0),
avgLoopTime_(std::chrono::seconds(2)),
maxLatencyLoopTime_(avgLoopTime_),
enableTimeMeasurement_(enableTimeMeasurement),
nextLoopCnt_(
uint64_t(-40)) // Early wrap-around so bugs will manifest soon
,
latestLoopCnt_(nextLoopCnt_),
startWork_(),
observer_(nullptr),
observerSampleCount_(0),
executionObserver_(nullptr) {
struct event ev;
{
std::lock_guard<std::mutex> lock(libevent_mutex_);
......@@ -117,22 +119,24 @@ EventBase::EventBase(bool enableTimeMeasurement)
// takes ownership of the event_base
EventBase::EventBase(event_base* evb, bool enableTimeMeasurement)
: runOnceCallbacks_(nullptr)
, stop_(false)
, loopThread_()
, evb_(evb)
, queue_(nullptr)
, fnRunner_(nullptr)
, maxLatency_(0)
, avgLoopTime_(std::chrono::seconds(2))
, maxLatencyLoopTime_(avgLoopTime_)
, enableTimeMeasurement_(enableTimeMeasurement)
, nextLoopCnt_(uint64_t(-40)) // Early wrap-around so bugs will manifest soon
, latestLoopCnt_(nextLoopCnt_)
, startWork_()
, observer_(nullptr)
, observerSampleCount_(0)
, executionObserver_(nullptr) {
: runOnceCallbacks_(nullptr),
stop_(false),
loopThread_(),
evb_(evb),
queue_(nullptr),
fnRunner_(nullptr),
maxLatency_(0),
avgLoopTime_(std::chrono::seconds(2)),
maxLatencyLoopTime_(avgLoopTime_),
enableTimeMeasurement_(enableTimeMeasurement),
nextLoopCnt_(
uint64_t(-40)) // Early wrap-around so bugs will manifest soon
,
latestLoopCnt_(nextLoopCnt_),
startWork_(),
observer_(nullptr),
observerSampleCount_(0),
executionObserver_(nullptr) {
if (UNLIKELY(evb_ == nullptr)) {
LOG(ERROR) << "EventBase(): Pass nullptr as event base.";
throw std::invalid_argument("EventBase(): event base cannot be nullptr");
......@@ -233,8 +237,8 @@ void EventBase::resetLoadAvg(double value) {
maxLatencyLoopTime_.reset(value);
}
static std::chrono::milliseconds
getTimeDelta(std::chrono::steady_clock::time_point* prev) {
static std::chrono::milliseconds getTimeDelta(
std::chrono::steady_clock::time_point* prev) {
auto result = std::chrono::steady_clock::now() - *prev;
*prev = std::chrono::steady_clock::now();
......@@ -314,7 +318,7 @@ bool EventBase::loopBody(int flags, bool ignoreKeepAlive) {
LoopCallbackList callbacks;
callbacks.swap(runBeforeLoopCallbacks_);
while(!callbacks.empty()) {
while (!callbacks.empty()) {
auto* item = &callbacks.front();
callbacks.pop_front();
item->runLoopCallback();
......@@ -388,8 +392,8 @@ bool EventBase::loopBody(int flags, bool ignoreKeepAlive) {
}
if (enableTimeMeasurement_) {
VLOG(11) << "EventBase " << this << " loop time: " <<
getTimeDelta(&prev).count();
VLOG(11) << "EventBase " << this
<< " loop time: " << getTimeDelta(&prev).count();
}
if (once) {
......@@ -473,8 +477,8 @@ void EventBase::bumpHandlingTime() {
return;
}
VLOG(11) << "EventBase " << this << " " << __PRETTY_FUNCTION__ <<
" (loop) latest " << latestLoopCnt_ << " next " << nextLoopCnt_;
VLOG(11) << "EventBase " << this << " " << __PRETTY_FUNCTION__
<< " (loop) latest " << latestLoopCnt_ << " next " << nextLoopCnt_;
if (nothingHandledYet()) {
latestLoopCnt_ = nextLoopCnt_;
// set the time
......@@ -686,9 +690,7 @@ bool EventBase::nothingHandledYet() const noexcept {
return (nextLoopCnt_ != latestLoopCnt_);
}
void EventBase::attachTimeoutManager(AsyncTimeout* obj,
InternalEnum internal) {
void EventBase::attachTimeoutManager(AsyncTimeout* obj, InternalEnum internal) {
struct event* ev = obj->getEvent();
assert(ev->ev_base == nullptr);
......@@ -705,7 +707,8 @@ void EventBase::detachTimeoutManager(AsyncTimeout* obj) {
ev->ev_base = nullptr;
}
bool EventBase::scheduleTimeout(AsyncTimeout* obj,
bool EventBase::scheduleTimeout(
AsyncTimeout* obj,
TimeoutManager::timeout_type timeout) {
dcheckIsInEventBaseThread();
// Set up the timeval and add the event
......@@ -738,8 +741,7 @@ void EventBase::setName(const std::string& name) {
name_ = name;
if (isRunning()) {
setThreadName(loopThread_.load(std::memory_order_relaxed),
name_);
setThreadName(loopThread_.load(std::memory_order_relaxed), name_);
}
}
......@@ -755,8 +757,12 @@ void EventBase::scheduleAt(Func&& fn, TimePoint const& timeout) {
std::chrono::duration_cast<std::chrono::milliseconds>(duration));
}
const char* EventBase::getLibeventVersion() { return event_get_version(); }
const char* EventBase::getLibeventMethod() { return event_get_method(); }
const char* EventBase::getLibeventVersion() {
return event_get_version();
}
const char* EventBase::getLibeventMethod() {
return event_get_method();
}
VirtualEventBase& EventBase::getVirtualEventBase() {
folly::call_once(virtualEventBaseInitFlag_, [&] {
......
......@@ -73,8 +73,7 @@ class EventBaseObserver {
virtual uint32_t getSampleRate() const = 0;
virtual void loopSample(
int64_t busyTime, int64_t idleTime) = 0;
virtual void loopSample(int64_t busyTime, int64_t idleTime) = 0;
};
// Helper class that sets and retrieves the EventBase associated with a given
......@@ -163,9 +162,9 @@ class EventBase : private boost::noncopyable,
}
private:
typedef boost::intrusive::list<
LoopCallback,
boost::intrusive::constant_time_size<false> > List;
typedef boost::intrusive::
list<LoopCallback, boost::intrusive::constant_time_size<false>>
List;
// EventBase needs access to LoopCallbackList (and therefore to hook_)
friend class EventBase;
......@@ -257,8 +256,8 @@ class EventBase : private boost::noncopyable,
/**
* Same as loop(), but doesn't wait for all keep-alive tokens to be released.
*/
[[deprecated("This should only be used in legacy unit tests")]]
bool loopIgnoreKeepAlive();
[[deprecated("This should only be used in legacy unit tests")]] bool
loopIgnoreKeepAlive();
/**
* Wait for some events to become active, run them, then return.
......@@ -535,7 +534,9 @@ class EventBase : private boost::noncopyable,
// Avoid using these functions if possible. These functions are not
// guaranteed to always be present if we ever provide alternative EventBase
// implementations that do not use libevent internally.
event_base* getLibeventBase() const { return evb_; }
event_base* getLibeventBase() const {
return evb_;
}
static const char* getLibeventVersion();
static const char* getLibeventMethod();
......@@ -780,7 +781,8 @@ class EventBase : private boost::noncopyable,
// see EventBaseLocal
friend class detail::EventBaseLocalBase;
template <typename T> friend class EventBaseLocal;
template <typename T>
friend class EventBaseLocal;
std::unordered_map<uint64_t, std::shared_ptr<void>> localStorage_;
std::unordered_set<detail::EventBaseLocalBaseBase*> localStorageToDtor_;
......
......@@ -19,12 +19,13 @@
#include <atomic>
#include <thread>
namespace folly { namespace detail {
namespace folly {
namespace detail {
EventBaseLocalBase::~EventBaseLocalBase() {
auto locked = eventBases_.rlock();
for (auto* evb : *locked) {
evb->runInEventBaseThread([ this, evb, key = key_ ] {
evb->runInEventBaseThread([this, evb, key = key_] {
evb->localStorage_.erase(key);
evb->localStorageToDtor_.erase(this);
});
......@@ -55,8 +56,7 @@ void EventBaseLocalBase::onEventBaseDestruction(EventBase& evb) {
void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) {
evb.dcheckIsInEventBaseThread();
auto alreadyExists =
evb.localStorage_.find(key_) != evb.localStorage_.end();
auto alreadyExists = evb.localStorage_.find(key_) != evb.localStorage_.end();
evb.localStorage_.emplace(key_, std::move(ptr));
......
......@@ -73,7 +73,7 @@ class EventBaseLocalBase : public EventBaseLocalBaseBase, boost::noncopyable {
template <typename T>
class EventBaseLocal : public detail::EventBaseLocalBase {
public:
EventBaseLocal(): EventBaseLocalBase() {}
EventBaseLocal() : EventBaseLocalBase() {}
T* get(EventBase& evb) {
return static_cast<T*>(getVoid(evb));
......
......@@ -34,18 +34,17 @@ EventBaseManager* EventBaseManager::get() {
} else {
return new_mgr;
}
}
/*
* EventBaseManager methods
*/
void EventBaseManager::setEventBase(EventBase *eventBase,
bool takeOwnership) {
EventBaseInfo *info = localStore_.get();
void EventBaseManager::setEventBase(EventBase* eventBase, bool takeOwnership) {
EventBaseInfo* info = localStore_.get();
if (info != nullptr) {
throw std::runtime_error("EventBaseManager: cannot set a new EventBase "
throw std::runtime_error(
"EventBaseManager: cannot set a new EventBase "
"for this thread when one already exists");
}
......@@ -55,7 +54,7 @@ void EventBaseManager::setEventBase(EventBase *eventBase,
}
void EventBaseManager::clearEventBase() {
EventBaseInfo *info = localStore_.get();
EventBaseInfo* info = localStore_.get();
if (info != nullptr) {
this->untrackEventBase(info->eventBase);
this->localStore_.reset(nullptr);
......@@ -63,10 +62,10 @@ void EventBaseManager::clearEventBase() {
}
// XXX should this really be "const"?
EventBase * EventBaseManager::getEventBase() const {
EventBase* EventBaseManager::getEventBase() const {
// have one?
auto *info = localStore_.get();
if (! info) {
auto* info = localStore_.get();
if (!info) {
info = new EventBaseInfo();
localStore_.reset(info);
......@@ -81,7 +80,7 @@ EventBase * EventBaseManager::getEventBase() const {
// Simply removing the const causes trouble all over fbcode;
// lots of services build a const EventBaseManager and errors
// abound when we make this non-const.
(const_cast<EventBaseManager *>(this))->trackEventBase(info->eventBase);
(const_cast<EventBaseManager*>(this))->trackEventBase(info->eventBase);
}
return info->eventBase;
......
......@@ -37,15 +37,12 @@ class EventBaseManager {
public:
// XXX Constructing a EventBaseManager directly is DEPRECATED and not
// encouraged. You should instead use the global singleton if possible.
EventBaseManager() {
}
EventBaseManager() {}
~EventBaseManager() {
}
~EventBaseManager() {}
explicit EventBaseManager(
const std::shared_ptr<EventBaseObserver>& observer
) : observer_(observer) {}
explicit EventBaseManager(const std::shared_ptr<EventBaseObserver>& observer)
: observer_(observer) {}
/**
* Get the global EventBaseManager for this program. Ideally all users
......@@ -87,7 +84,7 @@ class EventBaseManager {
* EventBase, to make sure the EventBaseManager points to the correct
* EventBase that is actually running in this thread.
*/
void setEventBase(EventBase *eventBase, bool takeOwnership);
void setEventBase(EventBase* eventBase, bool takeOwnership);
/**
* Clear the EventBase for this thread.
......@@ -107,22 +104,17 @@ class EventBaseManager {
// grab the mutex for the caller
std::lock_guard<std::mutex> g(*&eventBaseSetMutex_);
// give them only a const set to work with
const std::set<EventBase *>& constSet = eventBaseSet_;
const std::set<EventBase*>& constSet = eventBaseSet_;
runnable(constSet);
}
private:
struct EventBaseInfo {
EventBaseInfo(EventBase *evb, bool owned)
: eventBase(evb),
owned_(owned) {}
EventBaseInfo(EventBase* evb, bool owned) : eventBase(evb), owned_(owned) {}
EventBaseInfo()
: eventBase(new EventBase)
, owned_(true) {}
EventBaseInfo() : eventBase(new EventBase), owned_(true) {}
EventBase *eventBase;
EventBase* eventBase;
bool owned_;
~EventBaseInfo() {
if (owned_) {
......@@ -132,15 +124,15 @@ class EventBaseManager {
};
// Forbidden copy constructor and assignment opererator
EventBaseManager(EventBaseManager const &);
EventBaseManager& operator=(EventBaseManager const &);
EventBaseManager(EventBaseManager const&);
EventBaseManager& operator=(EventBaseManager const&);
void trackEventBase(EventBase *evb) {
void trackEventBase(EventBase* evb) {
std::lock_guard<std::mutex> g(*&eventBaseSetMutex_);
eventBaseSet_.insert(evb);
}
void untrackEventBase(EventBase *evb) {
void untrackEventBase(EventBase* evb) {
std::lock_guard<std::mutex> g(*&eventBaseSetMutex_);
eventBaseSet_.erase(evb);
}
......@@ -150,7 +142,7 @@ class EventBaseManager {
// set of "active" EventBase instances
// (also see the mutex "eventBaseSetMutex_" below
// which governs access to this).
mutable std::set<EventBase *> eventBaseSet_;
mutable std::set<EventBase*> eventBaseSet_;
// a mutex to use as a guard for the above set
std::mutex eventBaseSetMutex_;
......
......@@ -31,7 +31,7 @@
#if defined(__GLIBC__) && !defined(__APPLE__)
#if __GLIBC_PREREQ(2, 9)
# define FOLLY_GLIBC_2_9
#define FOLLY_GLIBC_2_9
#endif
#endif
......@@ -59,19 +59,18 @@
#endif
#endif
enum
{
enum {
EFD_SEMAPHORE = 1,
#define EFD_SEMAPHORE EFD_SEMAPHORE
EFD_CLOEXEC = 02000000,
#define EFD_CLOEXEC EFD_CLOEXEC
EFD_NONBLOCK = 04000
#define EFD_NONBLOCK EFD_NONBLOCK
};
};
// http://www.kernel.org/doc/man-pages/online/pages/man2/eventfd.2.html
// Use the eventfd2 system call, as in glibc 2.9+
// (requires kernel 2.6.30+)
#define eventfd(initval, flags) syscall(__NR_eventfd2,(initval),(flags))
#define eventfd(initval, flags) syscall(__NR_eventfd2, (initval), (flags))
#endif /* !(defined(__GLIBC__) && __GLIBC_PREREQ(2, 9)) */
......@@ -22,24 +22,26 @@
namespace folly {
# if LIBEVENT_VERSION_NUMBER <= 0x02010101
# define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_##name
# else
# define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_evcallback.evcb_##name
# endif
# define FOLLY_LIBEVENT_DEF_ACCESSORS(name) \
inline auto event_ref_##name(struct event* ev) -> \
decltype(std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) \
{ return std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); } \
inline auto event_ref_##name(struct event const* ev) -> \
decltype(std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) \
{ return std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); } \
#if LIBEVENT_VERSION_NUMBER <= 0x02010101
#define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_##name
#else
#define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_evcallback.evcb_##name
#endif
#define FOLLY_LIBEVENT_DEF_ACCESSORS(name) \
inline auto event_ref_##name(struct event* ev) \
->decltype(std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) { \
return std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); \
} \
inline auto event_ref_##name(struct event const* ev) \
->decltype(std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) { \
return std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); \
} \
//
FOLLY_LIBEVENT_DEF_ACCESSORS(flags)
# undef FOLLY_LIBEVENT_COMPAT_PLUCK
# undef FOLLY_LIBEVENT_DEF_ACCESSORS
#undef FOLLY_LIBEVENT_COMPAT_PLUCK
#undef FOLLY_LIBEVENT_DEF_ACCESSORS
/**
* low-level libevent utility functions
......@@ -49,8 +51,8 @@ class EventUtil {
static bool isEventRegistered(const struct event* ev) {
// If any of these flags are set, the event is registered.
enum {
EVLIST_REGISTERED = (EVLIST_INSERTED | EVLIST_ACTIVE |
EVLIST_TIMEOUT | EVLIST_SIGNAL)
EVLIST_REGISTERED =
(EVLIST_INSERTED | EVLIST_ACTIVE | EVLIST_TIMEOUT | EVLIST_SIGNAL)
};
return (event_ref_flags(ev) & EVLIST_REGISTERED);
}
......
......@@ -50,7 +50,8 @@ HHWheelTimer::Callback::~Callback() {
}
}
void HHWheelTimer::Callback::setScheduled(HHWheelTimer* wheel,
void HHWheelTimer::Callback::setScheduled(
HHWheelTimer* wheel,
std::chrono::milliseconds timeout) {
assert(wheel_ == nullptr);
assert(expiration_ == decltype(expiration_){});
......@@ -108,7 +109,8 @@ HHWheelTimer::~HHWheelTimer() {
cancelAll();
}
void HHWheelTimer::scheduleTimeoutImpl(Callback* callback,
void HHWheelTimer::scheduleTimeoutImpl(
Callback* callback,
std::chrono::milliseconds timeout) {
auto nextTick = calcNextTick();
int64_t due = timeToWheelTicks(timeout) + nextTick;
......@@ -140,7 +142,8 @@ void HHWheelTimer::scheduleTimeoutImpl(Callback* callback,
list->push_back(*callback);
}
void HHWheelTimer::scheduleTimeout(Callback* callback,
void HHWheelTimer::scheduleTimeout(
Callback* callback,
std::chrono::milliseconds timeout) {
// Cancel the callback if it happens to be scheduled already.
callback->cancelTimeout();
......
......@@ -133,17 +133,16 @@ class HHWheelTimer : private folly::AsyncTimeout,
expiration_ - now);
}
void setScheduled(HHWheelTimer* wheel,
std::chrono::milliseconds);
void setScheduled(HHWheelTimer* wheel, std::chrono::milliseconds);
void cancelTimeoutImpl();
HHWheelTimer* wheel_{nullptr};
std::chrono::steady_clock::time_point expiration_{};
int bucket_{-1};
typedef boost::intrusive::list<
Callback,
boost::intrusive::constant_time_size<false> > List;
typedef boost::intrusive::
list<Callback, boost::intrusive::constant_time_size<false>>
List;
std::shared_ptr<RequestContext> context_;
......@@ -209,9 +208,9 @@ class HHWheelTimer : private folly::AsyncTimeout,
* If the callback is already scheduled, this cancels the existing timeout
* before scheduling the new timeout.
*/
void scheduleTimeout(Callback* callback,
std::chrono::milliseconds timeout);
void scheduleTimeoutImpl(Callback* callback,
void scheduleTimeout(Callback* callback, std::chrono::milliseconds timeout);
void scheduleTimeoutImpl(
Callback* callback,
std::chrono::milliseconds timeout);
/**
......@@ -273,8 +272,8 @@ class HHWheelTimer : private folly::AsyncTimeout,
private:
// Forbidden copy constructor and assignment operator
HHWheelTimer(HHWheelTimer const &) = delete;
HHWheelTimer& operator=(HHWheelTimer const &) = delete;
HHWheelTimer(HHWheelTimer const&) = delete;
HHWheelTimer& operator=(HHWheelTimer const&) = delete;
// Methods inherited from AsyncTimeout
void timeoutExpired() noexcept override;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -35,10 +35,7 @@ class TimeoutManager {
typedef std::chrono::milliseconds timeout_type;
using Func = folly::Function<void()>;
enum class InternalEnum {
INTERNAL,
NORMAL
};
enum class InternalEnum { INTERNAL, NORMAL };
TimeoutManager();
......@@ -47,15 +44,15 @@ class TimeoutManager {
/**
* Attaches/detaches TimeoutManager to AsyncTimeout
*/
virtual void attachTimeoutManager(AsyncTimeout* obj,
virtual void attachTimeoutManager(
AsyncTimeout* obj,
InternalEnum internal) = 0;
virtual void detachTimeoutManager(AsyncTimeout* obj) = 0;
/**
* Schedules AsyncTimeout to fire after `timeout` milliseconds
*/
virtual bool scheduleTimeout(AsyncTimeout* obj,
timeout_type timeout) = 0;
virtual bool scheduleTimeout(AsyncTimeout* obj, timeout_type timeout) = 0;
/**
* Cancels the AsyncTimeout, if scheduled
......
......@@ -25,8 +25,8 @@ namespace folly {
* Helper class that redirects write() and writev() calls to writeChain().
*/
template <class T>
class WriteChainAsyncTransportWrapper :
public DecoratedAsyncTransportWrapper<T> {
class WriteChainAsyncTransportWrapper
: public DecoratedAsyncTransportWrapper<T> {
public:
using DecoratedAsyncTransportWrapper<T>::DecoratedAsyncTransportWrapper;
......
......@@ -71,7 +71,8 @@ bool OpenSSLUtils::getTLSClientRandom(
return false;
}
bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx,
bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(
X509_STORE_CTX* ctx,
sockaddr_storage* addrStorage,
socklen_t* addrLen) {
// Grab the ssl idx and then the ssl object so that we can get the peer
......@@ -93,7 +94,8 @@ bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx,
return true;
}
bool OpenSSLUtils::validatePeerCertNames(X509* cert,
bool OpenSSLUtils::validatePeerCertNames(
X509* cert,
const sockaddr* addr,
socklen_t /* addrLen */) {
// Try to extract the names within the SAN extension from the certificate
......
......@@ -62,9 +62,8 @@ class OpenSSLUtils {
*/
// TODO(agartrell): Add support for things like common name when
// necessary.
static bool validatePeerCertNames(X509* cert,
const sockaddr* addr,
socklen_t addrLen);
static bool
validatePeerCertNames(X509* cert, const sockaddr* addr, socklen_t addrLen);
/**
* Get the peer socket address from an X509_STORE_CTX*. Unlike the
......@@ -76,7 +75,8 @@ class OpenSSLUtils {
* @param addrLen out param for length of address
* @return true on success, false on failure
*/
static bool getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx,
static bool getPeerAddressFromX509StoreCtx(
X509_STORE_CTX* ctx,
sockaddr_storage* addrStorage,
socklen_t* addrLen);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment