Commit 928ed23a authored by Nathan Bronson's avatar Nathan Bronson Committed by Facebook Github Bot

clang-format folly/io subdir

Summary: Automated reformat of folly/io subdir in preparation for other changes

Reviewed By: yfeldblum

Differential Revision: D8559473

fbshipit-source-id: c94d9c05ee77e25b6a61ee7a47b472ccea1f18f3
parent 38589135
...@@ -41,8 +41,8 @@ void Appender::vprintf(const char* fmt, va_list ap) { ...@@ -41,8 +41,8 @@ void Appender::vprintf(const char* fmt, va_list ap) {
}; };
// First try writing into our available data space. // First try writing into our available data space.
int ret = vsnprintf(reinterpret_cast<char*>(writableData()), length(), int ret =
fmt, ap); vsnprintf(reinterpret_cast<char*>(writableData()), length(), fmt, ap);
if (ret < 0) { if (ret < 0) {
throw std::runtime_error("error formatting printf() data"); throw std::runtime_error("error formatting printf() data");
} }
...@@ -58,15 +58,16 @@ void Appender::vprintf(const char* fmt, va_list ap) { ...@@ -58,15 +58,16 @@ void Appender::vprintf(const char* fmt, va_list ap) {
// There wasn't enough room for the data. // There wasn't enough room for the data.
// Allocate more room, and then retry. // Allocate more room, and then retry.
ensure(len + 1); ensure(len + 1);
ret = vsnprintf(reinterpret_cast<char*>(writableData()), length(), ret =
fmt, apCopy); vsnprintf(reinterpret_cast<char*>(writableData()), length(), fmt, apCopy);
if (ret < 0) { if (ret < 0) {
throw std::runtime_error("error formatting printf() data"); throw std::runtime_error("error formatting printf() data");
} }
len = size_t(ret); len = size_t(ret);
if (len >= length()) { if (len >= length()) {
// This shouldn't ever happen. // This shouldn't ever happen.
throw std::runtime_error("unexpectedly out of buffer space on second " throw std::runtime_error(
"unexpectedly out of buffer space on second "
"vsnprintf() attmept"); "vsnprintf() attmept");
} }
append(len); append(len);
......
...@@ -55,7 +55,9 @@ namespace detail { ...@@ -55,7 +55,9 @@ namespace detail {
template <class Derived, class BufType> template <class Derived, class BufType>
class CursorBase { class CursorBase {
// Make all the templated classes friends for copy constructor. // Make all the templated classes friends for copy constructor.
template <class D, typename B> friend class CursorBase; template <class D, typename B>
friend class CursorBase;
public: public:
explicit CursorBase(BufType* buf) : crtBuf_(buf), buffer_(buf) { explicit CursorBase(BufType* buf) : crtBuf_(buf), buffer_(buf) {
if (crtBuf_) { if (crtBuf_) {
...@@ -166,7 +168,8 @@ class CursorBase { ...@@ -166,7 +168,8 @@ class CursorBase {
// We are at the end of a buffer, but it isn't the last buffer. // We are at the end of a buffer, but it isn't the last buffer.
// We might still be at the end if the remaining buffers in the chain are // We might still be at the end if the remaining buffers in the chain are
// empty. // empty.
const IOBuf* buf = crtBuf_->next();; const IOBuf* buf = crtBuf_->next();
;
while (buf != buffer_) { while (buf != buffer_) {
if (buf->length() > 0) { if (buf->length() > 0) {
return false; return false;
...@@ -516,7 +519,7 @@ class CursorBase { ...@@ -516,7 +519,7 @@ class CursorBase {
* Return the distance between two cursors. * Return the distance between two cursors.
*/ */
size_t operator-(const CursorBase& other) const { size_t operator-(const CursorBase& other) const {
BufType *otherBuf = other.crtBuf_; BufType* otherBuf = other.crtBuf_;
size_t len = 0; size_t len = 0;
if (otherBuf != crtBuf_) { if (otherBuf != crtBuf_) {
...@@ -572,7 +575,7 @@ class CursorBase { ...@@ -572,7 +575,7 @@ class CursorBase {
(uint64_t)(crtEnd_ - crtBegin_) == crtBuf_->length()); (uint64_t)(crtEnd_ - crtBegin_) == crtBuf_->length());
} }
~CursorBase() { } ~CursorBase() {}
BufType* head() { BufType* head() {
return buffer_; return buffer_;
...@@ -629,7 +632,7 @@ class CursorBase { ...@@ -629,7 +632,7 @@ class CursorBase {
} }
void readFixedStringSlow(std::string* str, size_t len) { void readFixedStringSlow(std::string* str, size_t len) {
for (size_t available; (available = length()) < len; ) { for (size_t available; (available = length()) < len;) {
str->append(reinterpret_cast<const char*>(data()), available); str->append(reinterpret_cast<const char*>(data()), available);
if (UNLIKELY(!tryAdvanceBuffer())) { if (UNLIKELY(!tryAdvanceBuffer())) {
throw_exception<std::out_of_range>("string underflow"); throw_exception<std::out_of_range>("string underflow");
...@@ -650,7 +653,7 @@ class CursorBase { ...@@ -650,7 +653,7 @@ class CursorBase {
} }
uint8_t* p = reinterpret_cast<uint8_t*>(buf); uint8_t* p = reinterpret_cast<uint8_t*>(buf);
size_t copied = 0; size_t copied = 0;
for (size_t available; (available = length()) < len; ) { for (size_t available; (available = length()) < len;) {
memcpy(p, data(), available); memcpy(p, data(), available);
copied += available; copied += available;
if (UNLIKELY(!tryAdvanceBuffer())) { if (UNLIKELY(!tryAdvanceBuffer())) {
...@@ -673,7 +676,7 @@ class CursorBase { ...@@ -673,7 +676,7 @@ class CursorBase {
size_t skipAtMostSlow(size_t len) { size_t skipAtMostSlow(size_t len) {
size_t skipped = 0; size_t skipped = 0;
for (size_t available; (available = length()) < len; ) { for (size_t available; (available = length()) < len;) {
skipped += available; skipped += available;
if (UNLIKELY(!tryAdvanceBuffer())) { if (UNLIKELY(!tryAdvanceBuffer())) {
return skipped; return skipped;
...@@ -710,8 +713,7 @@ class CursorBase { ...@@ -710,8 +713,7 @@ class CursorBase {
} }
} }
void advanceDone() { void advanceDone() {}
}
}; };
} // namespace detail } // namespace detail
...@@ -732,8 +734,7 @@ template <class Derived> ...@@ -732,8 +734,7 @@ template <class Derived>
class Writable { class Writable {
public: public:
template <class T> template <class T>
typename std::enable_if<std::is_arithmetic<T>::value>::type typename std::enable_if<std::is_arithmetic<T>::value>::type write(T value) {
write(T value) {
const uint8_t* u8 = reinterpret_cast<const uint8_t*>(&value); const uint8_t* u8 = reinterpret_cast<const uint8_t*>(&value);
Derived* d = static_cast<Derived*>(this); Derived* d = static_cast<Derived*>(this);
d->push(u8, sizeof(T)); d->push(u8, sizeof(T));
...@@ -782,7 +783,7 @@ class Writable { ...@@ -782,7 +783,7 @@ class Writable {
size_t pushAtMost(Cursor cursor, size_t len) { size_t pushAtMost(Cursor cursor, size_t len) {
size_t written = 0; size_t written = 0;
for(;;) { for (;;) {
auto currentBuffer = cursor.peekBytes(); auto currentBuffer = cursor.peekBytes();
const uint8_t* crtData = currentBuffer.data(); const uint8_t* crtData = currentBuffer.data();
size_t available = currentBuffer.size(); size_t available = currentBuffer.size();
...@@ -808,20 +809,16 @@ class Writable { ...@@ -808,20 +809,16 @@ class Writable {
} // namespace detail } // namespace detail
enum class CursorAccess { enum class CursorAccess { PRIVATE, UNSHARE };
PRIVATE,
UNSHARE
};
template <CursorAccess access> template <CursorAccess access>
class RWCursor class RWCursor : public detail::CursorBase<RWCursor<access>, IOBuf>,
: public detail::CursorBase<RWCursor<access>, IOBuf>,
public detail::Writable<RWCursor<access>> { public detail::Writable<RWCursor<access>> {
friend class detail::CursorBase<RWCursor<access>, IOBuf>; friend class detail::CursorBase<RWCursor<access>, IOBuf>;
public: public:
explicit RWCursor(IOBuf* buf) explicit RWCursor(IOBuf* buf)
: detail::CursorBase<RWCursor<access>, IOBuf>(buf), : detail::CursorBase<RWCursor<access>, IOBuf>(buf), maybeShared_(true) {}
maybeShared_(true) {}
template <class OtherDerived, class OtherBuf> template <class OtherDerived, class OtherBuf>
explicit RWCursor(const detail::CursorBase<OtherDerived, OtherBuf>& cursor) explicit RWCursor(const detail::CursorBase<OtherDerived, OtherBuf>& cursor)
...@@ -972,10 +969,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor; ...@@ -972,10 +969,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor;
class Appender : public detail::Writable<Appender> { class Appender : public detail::Writable<Appender> {
public: public:
Appender(IOBuf* buf, uint64_t growth) Appender(IOBuf* buf, uint64_t growth)
: buffer_(buf), : buffer_(buf), crtBuf_(buf->prev()), growth_(growth) {}
crtBuf_(buf->prev()),
growth_(growth) {
}
uint8_t* writableData() { uint8_t* writableData() {
return crtBuf_->writableTail(); return crtBuf_->writableTail();
......
...@@ -60,7 +60,9 @@ enum : uint64_t { ...@@ -60,7 +60,9 @@ enum : uint64_t {
}; };
// Helper function for IOBuf::takeOwnership() // Helper function for IOBuf::takeOwnership()
void takeOwnershipError(bool freeOnError, void* buf, void takeOwnershipError(
bool freeOnError,
void* buf,
folly::IOBuf::FreeFunction freeFn, folly::IOBuf::FreeFunction freeFn,
void* userData) { void* userData) {
if (!freeOnError) { if (!freeOnError) {
...@@ -108,25 +110,21 @@ struct IOBuf::HeapStorage { ...@@ -108,25 +110,21 @@ struct IOBuf::HeapStorage {
struct IOBuf::HeapFullStorage { struct IOBuf::HeapFullStorage {
// Make sure jemalloc allocates from the 64-byte class. Putting this here // Make sure jemalloc allocates from the 64-byte class. Putting this here
// because HeapStorage is private so it can't be at namespace level. // because HeapStorage is private so it can't be at namespace level.
static_assert(sizeof(HeapStorage) <= 64, static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!");
"IOBuf may not grow over 56 bytes!");
HeapStorage hs; HeapStorage hs;
SharedInfo shared; SharedInfo shared;
folly::max_align_t align; folly::max_align_t align;
}; };
IOBuf::SharedInfo::SharedInfo() IOBuf::SharedInfo::SharedInfo() : freeFn(nullptr), userData(nullptr) {
: freeFn(nullptr),
userData(nullptr) {
// Use relaxed memory ordering here. Since we are creating a new SharedInfo, // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
// no other threads should be referring to it yet. // no other threads should be referring to it yet.
refcount.store(1, std::memory_order_relaxed); refcount.store(1, std::memory_order_relaxed);
} }
IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg) IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
: freeFn(fn), : freeFn(fn), userData(arg) {
userData(arg) {
// Use relaxed memory ordering here. Since we are creating a new SharedInfo, // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
// no other threads should be referring to it yet. // no other threads should be referring to it yet.
refcount.store(1, std::memory_order_relaxed); refcount.store(1, std::memory_order_relaxed);
...@@ -144,7 +142,9 @@ void* IOBuf::operator new(size_t size) { ...@@ -144,7 +142,9 @@ void* IOBuf::operator new(size_t size) {
return &(storage->buf); return &(storage->buf);
} }
void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; } void* IOBuf::operator new(size_t /* size */, void* ptr) {
return ptr;
}
void IOBuf::operator delete(void* ptr) { void IOBuf::operator delete(void* ptr) {
auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf); auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
...@@ -202,7 +202,8 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity) ...@@ -202,7 +202,8 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity)
data_ = buf_; data_ = buf_;
} }
IOBuf::IOBuf(CopyBufferOp /* op */, IOBuf::IOBuf(
CopyBufferOp /* op */,
const void* buf, const void* buf,
uint64_t size, uint64_t size,
uint64_t headroom, uint64_t headroom,
...@@ -216,10 +217,12 @@ IOBuf::IOBuf(CopyBufferOp /* op */, ...@@ -216,10 +217,12 @@ IOBuf::IOBuf(CopyBufferOp /* op */,
} }
} }
IOBuf::IOBuf(CopyBufferOp op, ByteRange br, IOBuf::IOBuf(
uint64_t headroom, uint64_t minTailroom) CopyBufferOp op,
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) { ByteRange br,
} uint64_t headroom,
uint64_t minTailroom)
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) {}
unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) { unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
// For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
...@@ -251,8 +254,12 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) { ...@@ -251,8 +254,12 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize; uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
size_t actualCapacity = size_t(storageEnd - bufAddr); size_t actualCapacity = size_t(storageEnd - bufAddr);
unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf( unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), InternalConstructor(),
bufAddr, actualCapacity, bufAddr, 0)); packFlagsAndSharedInfo(0, &storage->shared),
bufAddr,
actualCapacity,
bufAddr,
0));
return ret; return ret;
} }
...@@ -261,9 +268,10 @@ unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) { ...@@ -261,9 +268,10 @@ unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
} }
unique_ptr<IOBuf> IOBuf::createChain( unique_ptr<IOBuf> IOBuf::createChain(
size_t totalCapacity, uint64_t maxBufCapacity) { size_t totalCapacity,
unique_ptr<IOBuf> out = create( uint64_t maxBufCapacity) {
std::min(totalCapacity, size_t(maxBufCapacity))); unique_ptr<IOBuf> out =
create(std::min(totalCapacity, size_t(maxBufCapacity)));
size_t allocatedCapacity = out->capacity(); size_t allocatedCapacity = out->capacity();
while (allocatedCapacity < totalCapacity) { while (allocatedCapacity < totalCapacity) {
...@@ -276,8 +284,13 @@ unique_ptr<IOBuf> IOBuf::createChain( ...@@ -276,8 +284,13 @@ unique_ptr<IOBuf> IOBuf::createChain(
return out; return out;
} }
IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length, IOBuf::IOBuf(
FreeFunction freeFn, void* userData, TakeOwnershipOp,
void* buf,
uint64_t capacity,
uint64_t length,
FreeFunction freeFn,
void* userData,
bool freeOnError) bool freeOnError)
: next_(this), : next_(this),
prev_(this), prev_(this),
...@@ -285,7 +298,8 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length, ...@@ -285,7 +298,8 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
buf_(static_cast<uint8_t*>(buf)), buf_(static_cast<uint8_t*>(buf)),
length_(length), length_(length),
capacity_(capacity), capacity_(capacity),
flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { flagsAndSharedInfo_(
packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
try { try {
setSharedInfo(new SharedInfo(freeFn, userData)); setSharedInfo(new SharedInfo(freeFn, userData));
} catch (...) { } catch (...) {
...@@ -294,7 +308,9 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length, ...@@ -294,7 +308,9 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
} }
} }
unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity, unique_ptr<IOBuf> IOBuf::takeOwnership(
void* buf,
uint64_t capacity,
uint64_t length, uint64_t length,
FreeFunction freeFn, FreeFunction freeFn,
void* userData, void* userData,
...@@ -318,17 +334,18 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity, ...@@ -318,17 +334,18 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
} }
IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity) IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
: IOBuf(InternalConstructor(), 0, : IOBuf(
InternalConstructor(),
0,
// We cast away the const-ness of the buffer here. // We cast away the const-ness of the buffer here.
// This is okay since IOBuf users must use unshare() to create a copy // This is okay since IOBuf users must use unshare() to create a copy
// of this buffer before writing to the buffer. // of this buffer before writing to the buffer.
static_cast<uint8_t*>(const_cast<void*>(buf)), capacity, static_cast<uint8_t*>(const_cast<void*>(buf)),
static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) { capacity,
} static_cast<uint8_t*>(const_cast<void*>(buf)),
capacity) {}
IOBuf::IOBuf(WrapBufferOp op, ByteRange br) IOBuf::IOBuf(WrapBufferOp op, ByteRange br) : IOBuf(op, br.data(), br.size()) {}
: IOBuf(op, br.data(), br.size()) {
}
unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) { unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity); return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
...@@ -338,8 +355,7 @@ IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) { ...@@ -338,8 +355,7 @@ IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity); return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
} }
IOBuf::IOBuf() noexcept { IOBuf::IOBuf() noexcept {}
}
IOBuf::IOBuf(IOBuf&& other) noexcept IOBuf::IOBuf(IOBuf&& other) noexcept
: data_(other.data_), : data_(other.data_),
...@@ -375,7 +391,8 @@ IOBuf::IOBuf(const IOBuf& other) { ...@@ -375,7 +391,8 @@ IOBuf::IOBuf(const IOBuf& other) {
*this = other.cloneAsValue(); *this = other.cloneAsValue();
} }
IOBuf::IOBuf(InternalConstructor, IOBuf::IOBuf(
InternalConstructor,
uintptr_t flagsAndSharedInfo, uintptr_t flagsAndSharedInfo,
uint8_t* buf, uint8_t* buf,
uint64_t capacity, uint64_t capacity,
...@@ -675,7 +692,8 @@ void IOBuf::coalesceSlow(size_t maxLength) { ...@@ -675,7 +692,8 @@ void IOBuf::coalesceSlow(size_t maxLength) {
break; break;
} }
if (end == this) { if (end == this) {
throw std::overflow_error("attempted to coalesce more data than " throw std::overflow_error(
"attempted to coalesce more data than "
"available"); "available");
} }
} }
...@@ -685,7 +703,8 @@ void IOBuf::coalesceSlow(size_t maxLength) { ...@@ -685,7 +703,8 @@ void IOBuf::coalesceSlow(size_t maxLength) {
DCHECK_GE(length_, maxLength); DCHECK_GE(length_, maxLength);
} }
void IOBuf::coalesceAndReallocate(size_t newHeadroom, void IOBuf::coalesceAndReallocate(
size_t newHeadroom,
size_t newLength, size_t newLength,
IOBuf* end, IOBuf* end,
size_t newTailroom) { size_t newTailroom) {
...@@ -744,8 +763,7 @@ void IOBuf::decrementRefcount() { ...@@ -744,8 +763,7 @@ void IOBuf::decrementRefcount() {
} }
// Decrement the refcount // Decrement the refcount
uint32_t newcnt = info->refcount.fetch_sub( uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel);
1, std::memory_order_acq_rel);
// Note that fetch_sub() returns the value before we decremented. // Note that fetch_sub() returns the value before we decremented.
// If it is 1, we were the only remaining user; if it is greater there are // If it is 1, we were the only remaining user; if it is greater there are
// still other users. // still other users.
...@@ -894,7 +912,8 @@ void IOBuf::freeExtBuffer() { ...@@ -894,7 +912,8 @@ void IOBuf::freeExtBuffer() {
} }
} }
void IOBuf::allocExtBuffer(uint64_t minCapacity, void IOBuf::allocExtBuffer(
uint64_t minCapacity,
uint8_t** bufReturn, uint8_t** bufReturn,
SharedInfo** infoReturn, SharedInfo** infoReturn,
uint64_t* capacityReturn) { uint64_t* capacityReturn) {
...@@ -923,13 +942,15 @@ size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) { ...@@ -923,13 +942,15 @@ size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
return goodMallocSize(minSize); return goodMallocSize(minSize);
} }
void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize, void IOBuf::initExtBuffer(
uint8_t* buf,
size_t mallocSize,
SharedInfo** infoReturn, SharedInfo** infoReturn,
uint64_t* capacityReturn) { uint64_t* capacityReturn) {
// Find the SharedInfo storage at the end of the buffer // Find the SharedInfo storage at the end of the buffer
// and construct the SharedInfo. // and construct the SharedInfo.
uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
SharedInfo* sharedInfo = new(infoStart) SharedInfo; SharedInfo* sharedInfo = new (infoStart) SharedInfo;
*capacityReturn = uint64_t(infoStart - buf); *capacityReturn = uint64_t(infoStart - buf);
*infoReturn = sharedInfo; *infoReturn = sharedInfo;
...@@ -951,8 +972,10 @@ fbstring IOBuf::moveToFbString() { ...@@ -951,8 +972,10 @@ fbstring IOBuf::moveToFbString() {
// Ensure NUL terminated // Ensure NUL terminated
*writableTail() = 0; *writableTail() = 0;
fbstring str(reinterpret_cast<char*>(writableData()), fbstring str(
length(), capacity(), reinterpret_cast<char*>(writableData()),
length(),
capacity(),
AcquireMallocatedString()); AcquireMallocatedString());
if (flags() & kFlagFreeSharedInfo) { if (flags() & kFlagFreeSharedInfo) {
......
This diff is collapsed.
...@@ -35,8 +35,7 @@ const size_t MAX_PACK_COPY = 4096; ...@@ -35,8 +35,7 @@ const size_t MAX_PACK_COPY = 4096;
/** /**
* Convenience function to append chain src to chain dst. * Convenience function to append chain src to chain dst.
*/ */
void void appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
if (dst == nullptr) { if (dst == nullptr) {
dst = std::move(src); dst = std::move(src);
} else { } else {
...@@ -109,8 +108,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) { ...@@ -109,8 +108,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
return *this; return *this;
} }
std::pair<void*, uint64_t> std::pair<void*, uint64_t> IOBufQueue::headroom() {
IOBufQueue::headroom() {
// Note, headroom is independent from the tail, so we don't need to flush the // Note, headroom is independent from the tail, so we don't need to flush the
// cache. // cache.
if (head_) { if (head_) {
...@@ -120,8 +118,7 @@ IOBufQueue::headroom() { ...@@ -120,8 +118,7 @@ IOBufQueue::headroom() {
} }
} }
void void IOBufQueue::markPrepended(uint64_t n) {
IOBufQueue::markPrepended(uint64_t n) {
if (n == 0) { if (n == 0) {
return; return;
} }
...@@ -132,8 +129,7 @@ IOBufQueue::markPrepended(uint64_t n) { ...@@ -132,8 +129,7 @@ IOBufQueue::markPrepended(uint64_t n) {
chainLength_ += n; chainLength_ += n;
} }
void void IOBufQueue::prepend(const void* buf, uint64_t n) {
IOBufQueue::prepend(const void* buf, uint64_t n) {
// We're not touching the tail, so we don't need to flush the cache. // We're not touching the tail, so we don't need to flush the cache.
auto hroom = head_->headroom(); auto hroom = head_->headroom();
if (!head_ || hroom < n) { if (!head_ || hroom < n) {
...@@ -144,8 +140,7 @@ IOBufQueue::prepend(const void* buf, uint64_t n) { ...@@ -144,8 +140,7 @@ IOBufQueue::prepend(const void* buf, uint64_t n) {
chainLength_ += n; chainLength_ += n;
} }
void void IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
if (!buf) { if (!buf) {
return; return;
} }
...@@ -156,8 +151,7 @@ IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) { ...@@ -156,8 +151,7 @@ IOBufQueue::append(unique_ptr<IOBuf>&& buf, bool pack) {
appendToChain(head_, std::move(buf), pack); appendToChain(head_, std::move(buf), pack);
} }
void void IOBufQueue::append(IOBufQueue& other, bool pack) {
IOBufQueue::append(IOBufQueue& other, bool pack) {
if (!other.head_) { if (!other.head_) {
return; return;
} }
...@@ -175,16 +169,16 @@ IOBufQueue::append(IOBufQueue& other, bool pack) { ...@@ -175,16 +169,16 @@ IOBufQueue::append(IOBufQueue& other, bool pack) {
other.chainLength_ = 0; other.chainLength_ = 0;
} }
void void IOBufQueue::append(const void* buf, size_t len) {
IOBufQueue::append(const void* buf, size_t len) {
auto guard = updateGuard(); auto guard = updateGuard();
auto src = static_cast<const uint8_t*>(buf); auto src = static_cast<const uint8_t*>(buf);
while (len != 0) { while (len != 0) {
if ((head_ == nullptr) || head_->prev()->isSharedOne() || if ((head_ == nullptr) || head_->prev()->isSharedOne() ||
(head_->prev()->tailroom() == 0)) { (head_->prev()->tailroom() == 0)) {
appendToChain(head_, appendToChain(
IOBuf::create(std::max(MIN_ALLOC_SIZE, head_,
std::min(len, MAX_ALLOC_SIZE))), IOBuf::create(
std::max(MIN_ALLOC_SIZE, std::min(len, MAX_ALLOC_SIZE))),
false); false);
} }
IOBuf* last = head_->prev(); IOBuf* last = head_->prev();
...@@ -197,8 +191,7 @@ IOBufQueue::append(const void* buf, size_t len) { ...@@ -197,8 +191,7 @@ IOBufQueue::append(const void* buf, size_t len) {
} }
} }
void void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
auto src = static_cast<const uint8_t*>(buf); auto src = static_cast<const uint8_t*>(buf);
while (len != 0) { while (len != 0) {
size_t n = std::min(len, size_t(blockSize)); size_t n = std::min(len, size_t(blockSize));
...@@ -208,8 +201,9 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) { ...@@ -208,8 +201,9 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
} }
} }
pair<void*,uint64_t> pair<void*, uint64_t> IOBufQueue::preallocateSlow(
IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize, uint64_t min,
uint64_t newAllocationSize,
uint64_t max) { uint64_t max) {
// Avoid grabbing update guard, since we're manually setting the cache ptrs. // Avoid grabbing update guard, since we're manually setting the cache ptrs.
flushCache(); flushCache();
......
...@@ -291,15 +291,14 @@ class IOBufQueue { ...@@ -291,15 +291,14 @@ class IOBufQueue {
* releasing the buffers), if possible. If pack is false, we leave * releasing the buffers), if possible. If pack is false, we leave
* the chain topology unchanged. * the chain topology unchanged.
*/ */
void append(std::unique_ptr<folly::IOBuf>&& buf, void append(std::unique_ptr<folly::IOBuf>&& buf, bool pack = false);
bool pack=false);
/** /**
* Add a queue to the end of this queue. The queue takes ownership of * Add a queue to the end of this queue. The queue takes ownership of
* all buffers from the other queue. * all buffers from the other queue.
*/ */
void append(IOBufQueue& other, bool pack=false); void append(IOBufQueue& other, bool pack = false);
void append(IOBufQueue&& other, bool pack=false) { void append(IOBufQueue&& other, bool pack = false) {
append(other, pack); // call lvalue reference overload, above append(other, pack); // call lvalue reference overload, above
} }
...@@ -329,8 +328,10 @@ class IOBufQueue { ...@@ -329,8 +328,10 @@ class IOBufQueue {
* Every buffer except for the last will wrap exactly blockSize bytes. * Every buffer except for the last will wrap exactly blockSize bytes.
* Importantly, this method may be used to wrap buffers larger than 4GB. * Importantly, this method may be used to wrap buffers larger than 4GB.
*/ */
void wrapBuffer(const void* buf, size_t len, void wrapBuffer(
uint64_t blockSize=(1U << 31)); // default block size: 2GB const void* buf,
size_t len,
uint64_t blockSize = (1U << 31)); // default block size: 2GB
/** /**
* Obtain a writable block of contiguous bytes at the end of this * Obtain a writable block of contiguous bytes at the end of this
...@@ -352,8 +353,9 @@ class IOBufQueue { ...@@ -352,8 +353,9 @@ class IOBufQueue {
* callback, tell the application how much of the buffer they've * callback, tell the application how much of the buffer they've
* filled with data. * filled with data.
*/ */
std::pair<void*,uint64_t> preallocate( std::pair<void*, uint64_t> preallocate(
uint64_t min, uint64_t newAllocationSize, uint64_t min,
uint64_t newAllocationSize,
uint64_t max = std::numeric_limits<uint64_t>::max()) { uint64_t max = std::numeric_limits<uint64_t>::max()) {
dcheckCacheIntegrity(); dcheckCacheIntegrity();
......
...@@ -30,11 +30,16 @@ class RecordIOReader::Iterator : public boost::iterator_facade< ...@@ -30,11 +30,16 @@ class RecordIOReader::Iterator : public boost::iterator_facade<
boost::forward_traversal_tag> { boost::forward_traversal_tag> {
friend class boost::iterator_core_access; friend class boost::iterator_core_access;
friend class RecordIOReader; friend class RecordIOReader;
private: private:
Iterator(ByteRange range, uint32_t fileId, off_t pos); Iterator(ByteRange range, uint32_t fileId, off_t pos);
reference dereference() const { return recordAndPos_; } reference dereference() const {
bool equal(const Iterator& other) const { return range_ == other.range_; } return recordAndPos_;
}
bool equal(const Iterator& other) const {
return range_ == other.range_;
}
void increment() { void increment() {
size_t skip = recordio_helpers::headerSize() + recordAndPos_.first.size(); size_t skip = recordio_helpers::headerSize() + recordAndPos_.first.size();
recordAndPos_.second += off_t(skip); recordAndPos_.second += off_t(skip);
...@@ -49,10 +54,18 @@ class RecordIOReader::Iterator : public boost::iterator_facade< ...@@ -49,10 +54,18 @@ class RecordIOReader::Iterator : public boost::iterator_facade<
std::pair<ByteRange, off_t> recordAndPos_; std::pair<ByteRange, off_t> recordAndPos_;
}; };
inline auto RecordIOReader::cbegin() const -> Iterator { return seek(0); } inline auto RecordIOReader::cbegin() const -> Iterator {
inline auto RecordIOReader::begin() const -> Iterator { return cbegin(); } return seek(0);
inline auto RecordIOReader::cend() const -> Iterator { return seek(off_t(-1)); } }
inline auto RecordIOReader::end() const -> Iterator { return cend(); } inline auto RecordIOReader::begin() const -> Iterator {
return cbegin();
}
inline auto RecordIOReader::cend() const -> Iterator {
return seek(off_t(-1));
}
inline auto RecordIOReader::end() const -> Iterator {
return cend();
}
inline auto RecordIOReader::seek(off_t pos) const -> Iterator { inline auto RecordIOReader::seek(off_t pos) const -> Iterator {
return Iterator(map_.range(), fileId_, pos); return Iterator(map_.range(), fileId_, pos);
} }
...@@ -79,12 +92,15 @@ struct Header { ...@@ -79,12 +92,15 @@ struct Header {
} FOLLY_PACK_ATTR; } FOLLY_PACK_ATTR;
FOLLY_PACK_POP FOLLY_PACK_POP
static_assert(offsetof(Header, headerHash) + sizeof(Header::headerHash) == static_assert(
sizeof(Header), "invalid header layout"); offsetof(Header, headerHash) + sizeof(Header::headerHash) == sizeof(Header),
"invalid header layout");
} // namespace recordio_detail } // namespace recordio_detail
constexpr size_t headerSize() { return sizeof(recordio_detail::Header); } constexpr size_t headerSize() {
return sizeof(recordio_detail::Header);
}
inline RecordInfo findRecord(ByteRange range, uint32_t fileId) { inline RecordInfo findRecord(ByteRange range, uint32_t fileId) {
return findRecord(range, range, fileId); return findRecord(range, range, fileId);
......
...@@ -70,14 +70,10 @@ void RecordIOWriter::write(std::unique_ptr<IOBuf> buf) { ...@@ -70,14 +70,10 @@ void RecordIOWriter::write(std::unique_ptr<IOBuf> buf) {
} }
RecordIOReader::RecordIOReader(File file, uint32_t fileId) RecordIOReader::RecordIOReader(File file, uint32_t fileId)
: map_(std::move(file)), : map_(std::move(file)), fileId_(fileId) {}
fileId_(fileId) {
}
RecordIOReader::Iterator::Iterator(ByteRange range, uint32_t fileId, off_t pos) RecordIOReader::Iterator::Iterator(ByteRange range, uint32_t fileId, off_t pos)
: range_(range), : range_(range), fileId_(fileId), recordAndPos_(ByteRange(), 0) {
fileId_(fileId),
recordAndPos_(ByteRange(), 0) {
if (size_t(pos) >= range_.size()) { if (size_t(pos) >= range_.size()) {
// Note that this branch can execute if pos is negative as well. // Note that this branch can execute if pos is negative as well.
recordAndPos_.second = off_t(-1); recordAndPos_.second = off_t(-1);
...@@ -113,8 +109,8 @@ namespace { ...@@ -113,8 +109,8 @@ namespace {
constexpr uint32_t kHashSeed = 0xdeadbeef; // for mcurtiss constexpr uint32_t kHashSeed = 0xdeadbeef; // for mcurtiss
uint32_t headerHash(const Header& header) { uint32_t headerHash(const Header& header) {
return hash::SpookyHashV2::Hash32(&header, offsetof(Header, headerHash), return hash::SpookyHashV2::Hash32(
kHashSeed); &header, offsetof(Header, headerHash), kHashSeed);
} }
std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) { std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) {
...@@ -177,10 +173,8 @@ RecordInfo validateRecord(ByteRange range, uint32_t fileId) { ...@@ -177,10 +173,8 @@ RecordInfo validateRecord(ByteRange range, uint32_t fileId) {
} }
const Header* header = reinterpret_cast<const Header*>(range.begin()); const Header* header = reinterpret_cast<const Header*>(range.begin());
range.advance(sizeof(Header)); range.advance(sizeof(Header));
if (header->magic != Header::kMagic || if (header->magic != Header::kMagic || header->version != 0 ||
header->version != 0 || header->hashFunction != 0 || header->flags != 0 ||
header->hashFunction != 0 ||
header->flags != 0 ||
(fileId != 0 && header->fileId != fileId) || (fileId != 0 && header->fileId != fileId) ||
header->dataLength > range.size()) { header->dataLength > range.size()) {
return {0, {}}; return {0, {}};
...@@ -195,19 +189,18 @@ RecordInfo validateRecord(ByteRange range, uint32_t fileId) { ...@@ -195,19 +189,18 @@ RecordInfo validateRecord(ByteRange range, uint32_t fileId) {
return {header->fileId, range}; return {header->fileId, range};
} }
RecordInfo findRecord(ByteRange searchRange, RecordInfo
ByteRange wholeRange, findRecord(ByteRange searchRange, ByteRange wholeRange, uint32_t fileId) {
uint32_t fileId) {
static const uint32_t magic = Header::kMagic; static const uint32_t magic = Header::kMagic;
static const ByteRange magicRange(reinterpret_cast<const uint8_t*>(&magic), static const ByteRange magicRange(
sizeof(magic)); reinterpret_cast<const uint8_t*>(&magic), sizeof(magic));
DCHECK_GE(searchRange.begin(), wholeRange.begin()); DCHECK_GE(searchRange.begin(), wholeRange.begin());
DCHECK_LE(searchRange.end(), wholeRange.end()); DCHECK_LE(searchRange.end(), wholeRange.end());
const uint8_t* start = searchRange.begin(); const uint8_t* start = searchRange.begin();
const uint8_t* end = std::min(searchRange.end(), const uint8_t* end =
wholeRange.end() - sizeof(Header)); std::min(searchRange.end(), wholeRange.end() - sizeof(Header));
// end-1: the last place where a Header could start // end-1: the last place where a Header could start
while (start < end) { while (start < end) {
auto p = ByteRange(start, end + sizeof(magic)).find(magicRange); auto p = ByteRange(start, end + sizeof(magic)).find(magicRange);
......
...@@ -69,7 +69,9 @@ class RecordIOWriter { ...@@ -69,7 +69,9 @@ class RecordIOWriter {
* Return the position in the file where the next byte will be written. * Return the position in the file where the next byte will be written.
* Conservative, as stuff can be written at any time from another thread. * Conservative, as stuff can be written at any time from another thread.
*/ */
off_t filePos() const { return filePos_; } off_t filePos() const {
return filePos_;
}
private: private:
File file_; File file_;
...@@ -157,9 +159,8 @@ struct RecordInfo { ...@@ -157,9 +159,8 @@ struct RecordInfo {
uint32_t fileId; uint32_t fileId;
ByteRange record; ByteRange record;
}; };
RecordInfo findRecord(ByteRange searchRange, RecordInfo
ByteRange wholeRange, findRecord(ByteRange searchRange, ByteRange wholeRange, uint32_t fileId);
uint32_t fileId);
/** /**
* Search for the first valid record in range. * Search for the first valid record in range.
......
...@@ -70,7 +70,7 @@ class ShutdownSocketSet : private boost::noncopyable { ...@@ -70,7 +70,7 @@ class ShutdownSocketSet : private boost::noncopyable {
* read() and write() operations to the socket will fail. During normal * read() and write() operations to the socket will fail. During normal
* operation, just call ::shutdown() on the socket. * operation, just call ::shutdown() on the socket.
*/ */
void shutdown(int fd, bool abortive=false); void shutdown(int fd, bool abortive = false);
/** /**
* Immediate shutdown of all connections. This is a hard-hitting hammer; * Immediate shutdown of all connections. This is a hard-hitting hammer;
...@@ -92,7 +92,7 @@ class ShutdownSocketSet : private boost::noncopyable { ...@@ -92,7 +92,7 @@ class ShutdownSocketSet : private boost::noncopyable {
* *
* This is async-signal-safe and ignores errors. * This is async-signal-safe and ignores errors.
*/ */
void shutdownAll(bool abortive=false); void shutdownAll(bool abortive = false);
private: private:
void doShutdown(int fd, bool abortive); void doShutdown(int fd, bool abortive);
......
...@@ -38,6 +38,7 @@ namespace folly { ...@@ -38,6 +38,7 @@ namespace folly {
template <class T> template <class T>
class TypedIOBuf { class TypedIOBuf {
static_assert(std::is_standard_layout<T>::value, "must be standard layout"); static_assert(std::is_standard_layout<T>::value, "must be standard layout");
public: public:
typedef T value_type; typedef T value_type;
typedef value_type& reference; typedef value_type& reference;
...@@ -46,7 +47,7 @@ class TypedIOBuf { ...@@ -46,7 +47,7 @@ class TypedIOBuf {
typedef value_type* iterator; typedef value_type* iterator;
typedef const value_type* const_iterator; typedef const value_type* const_iterator;
explicit TypedIOBuf(IOBuf* buf) : buf_(buf) { } explicit TypedIOBuf(IOBuf* buf) : buf_(buf) {}
IOBuf* ioBuf() { IOBuf* ioBuf() {
return buf_; return buf_;
...@@ -73,7 +74,9 @@ class TypedIOBuf { ...@@ -73,7 +74,9 @@ class TypedIOBuf {
uint32_t length() const { uint32_t length() const {
return sdiv(buf_->length()); return sdiv(buf_->length());
} }
uint32_t size() const { return length(); } uint32_t size() const {
return length();
}
uint32_t headroom() const { uint32_t headroom() const {
return sdiv(buf_->headroom()); return sdiv(buf_->headroom());
...@@ -117,14 +120,28 @@ class TypedIOBuf { ...@@ -117,14 +120,28 @@ class TypedIOBuf {
void reserve(uint32_t minHeadroom, uint32_t minTailroom) { void reserve(uint32_t minHeadroom, uint32_t minTailroom) {
buf_->reserve(smul(minHeadroom), smul(minTailroom)); buf_->reserve(smul(minHeadroom), smul(minTailroom));
} }
void reserve(uint32_t minTailroom) { reserve(0, minTailroom); } void reserve(uint32_t minTailroom) {
reserve(0, minTailroom);
}
const T* cbegin() const { return data(); } const T* cbegin() const {
const T* cend() const { return tail(); } return data();
const T* begin() const { return cbegin(); } }
const T* end() const { return cend(); } const T* cend() const {
T* begin() { return writableData(); } return tail();
T* end() { return writableTail(); } }
const T* begin() const {
return cbegin();
}
const T* end() const {
return cend();
}
T* begin() {
return writableData();
}
T* end() {
return writableTail();
}
const T& front() const { const T& front() const {
assert(!empty()); assert(!empty());
...@@ -163,7 +180,9 @@ class TypedIOBuf { ...@@ -163,7 +180,9 @@ class TypedIOBuf {
void push(const T& data) { void push(const T& data) {
push(&data, &data + 1); push(&data, &data + 1);
} }
void push_back(const T& data) { push(data); } void push_back(const T& data) {
push(data);
}
/** /**
* Append multiple elements in a sequence; will call distance(). * Append multiple elements in a sequence; will call distance().
......
...@@ -18,10 +18,10 @@ ...@@ -18,10 +18,10 @@
#include <folly/FileUtil.h> #include <folly/FileUtil.h>
#include <folly/io/async/AsyncSocketException.h> #include <folly/io/async/AsyncSocketException.h>
using std::string;
using std::unique_ptr;
using folly::IOBuf; using folly::IOBuf;
using folly::IOBufQueue; using folly::IOBufQueue;
using std::string;
using std::unique_ptr;
namespace folly { namespace folly {
...@@ -30,8 +30,8 @@ AsyncPipeReader::~AsyncPipeReader() { ...@@ -30,8 +30,8 @@ AsyncPipeReader::~AsyncPipeReader() {
} }
void AsyncPipeReader::failRead(const AsyncSocketException& ex) { void AsyncPipeReader::failRead(const AsyncSocketException& ex) {
VLOG(5) << "AsyncPipeReader(this=" << this << ", fd=" << fd_ << VLOG(5) << "AsyncPipeReader(this=" << this << ", fd=" << fd_
"): failed while reading: " << ex.what(); << "): failed while reading: " << ex.what();
DCHECK(readCallback_ != nullptr); DCHECK(readCallback_ != nullptr);
AsyncReader::ReadCallback* callback = readCallback_; AsyncReader::ReadCallback* callback = readCallback_;
...@@ -124,8 +124,8 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept { ...@@ -124,8 +124,8 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept {
// No more data to read right now. // No more data to read right now.
return; return;
} else if (bytesRead < 0) { } else if (bytesRead < 0) {
AsyncSocketException ex(AsyncSocketException::INVALID_STATE, AsyncSocketException ex(
"read failed", errno); AsyncSocketException::INVALID_STATE, "read failed", errno);
failRead(ex); failRead(ex);
return; return;
} else { } else {
...@@ -142,13 +142,13 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept { ...@@ -142,13 +142,13 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept {
} }
} }
void AsyncPipeWriter::write(
void AsyncPipeWriter::write(unique_ptr<folly::IOBuf> buf, unique_ptr<folly::IOBuf> buf,
AsyncWriter::WriteCallback* callback) { AsyncWriter::WriteCallback* callback) {
if (closed()) { if (closed()) {
if (callback) { if (callback) {
AsyncSocketException ex(AsyncSocketException::NOT_OPEN, AsyncSocketException ex(
"attempt to write to closed pipe"); AsyncSocketException::NOT_OPEN, "attempt to write to closed pipe");
callback->writeErr(0, ex); callback->writeErr(0, ex);
} }
return; return;
...@@ -167,7 +167,8 @@ void AsyncPipeWriter::write(unique_ptr<folly::IOBuf> buf, ...@@ -167,7 +167,8 @@ void AsyncPipeWriter::write(unique_ptr<folly::IOBuf> buf,
} }
} }
void AsyncPipeWriter::writeChain(folly::AsyncWriter::WriteCallback* callback, void AsyncPipeWriter::writeChain(
folly::AsyncWriter::WriteCallback* callback,
std::unique_ptr<folly::IOBuf>&& buf, std::unique_ptr<folly::IOBuf>&& buf,
WriteFlags) { WriteFlags) {
write(std::move(buf), callback); write(std::move(buf), callback);
...@@ -186,8 +187,8 @@ void AsyncPipeWriter::closeOnEmpty() { ...@@ -186,8 +187,8 @@ void AsyncPipeWriter::closeOnEmpty() {
void AsyncPipeWriter::closeNow() { void AsyncPipeWriter::closeNow() {
VLOG(5) << "close now"; VLOG(5) << "close now";
if (!queue_.empty()) { if (!queue_.empty()) {
failAllWrites(AsyncSocketException(AsyncSocketException::NOT_OPEN, failAllWrites(AsyncSocketException(
"closed with pending writes")); AsyncSocketException::NOT_OPEN, "closed with pending writes"));
} }
if (fd_ >= 0) { if (fd_ >= 0) {
unregisterHandler(); unregisterHandler();
...@@ -213,7 +214,6 @@ void AsyncPipeWriter::failAllWrites(const AsyncSocketException& ex) { ...@@ -213,7 +214,6 @@ void AsyncPipeWriter::failAllWrites(const AsyncSocketException& ex) {
} }
} }
void AsyncPipeWriter::handlerReady(uint16_t events) noexcept { void AsyncPipeWriter::handlerReady(uint16_t events) noexcept {
CHECK(events & EventHandler::WRITE); CHECK(events & EventHandler::WRITE);
...@@ -225,7 +225,7 @@ void AsyncPipeWriter::handleWrite() { ...@@ -225,7 +225,7 @@ void AsyncPipeWriter::handleWrite() {
assert(!queue_.empty()); assert(!queue_.empty());
do { do {
auto& front = queue_.front(); auto& front = queue_.front();
folly::IOBufQueue &curQueue = front.first; folly::IOBufQueue& curQueue = front.first;
DCHECK(!curQueue.empty()); DCHECK(!curQueue.empty());
// someday, support writev. The logic for partial writes is a bit complex // someday, support writev. The logic for partial writes is a bit complex
const IOBuf* head = curQueue.front(); const IOBuf* head = curQueue.front();
...@@ -238,8 +238,8 @@ void AsyncPipeWriter::handleWrite() { ...@@ -238,8 +238,8 @@ void AsyncPipeWriter::handleWrite() {
registerHandler(EventHandler::WRITE); registerHandler(EventHandler::WRITE);
return; return;
} else { } else {
failAllWrites(AsyncSocketException(AsyncSocketException::INTERNAL_ERROR, failAllWrites(AsyncSocketException(
"write failed", errno)); AsyncSocketException::INTERNAL_ERROR, "write failed", errno));
closeNow(); closeNow();
return; return;
} }
......
...@@ -35,8 +35,9 @@ class AsyncPipeReader : public EventHandler, ...@@ -35,8 +35,9 @@ class AsyncPipeReader : public EventHandler,
public AsyncReader, public AsyncReader,
public DelayedDestruction { public DelayedDestruction {
public: public:
typedef std::unique_ptr<AsyncPipeReader, typedef std::
folly::DelayedDestruction::Destructor> UniquePtr; unique_ptr<AsyncPipeReader, folly::DelayedDestruction::Destructor>
UniquePtr;
template <typename... Args> template <typename... Args>
static UniquePtr newReader(Args&&... args) { static UniquePtr newReader(Args&&... args) {
...@@ -44,8 +45,7 @@ class AsyncPipeReader : public EventHandler, ...@@ -44,8 +45,7 @@ class AsyncPipeReader : public EventHandler,
} }
AsyncPipeReader(folly::EventBase* eventBase, int pipeFd) AsyncPipeReader(folly::EventBase* eventBase, int pipeFd)
: EventHandler(eventBase, pipeFd), : EventHandler(eventBase, pipeFd), fd_(pipeFd) {}
fd_(pipeFd) {}
/** /**
* Set the read callback and automatically install/uninstall the handler * Set the read callback and automatically install/uninstall the handler
...@@ -96,8 +96,9 @@ class AsyncPipeWriter : public EventHandler, ...@@ -96,8 +96,9 @@ class AsyncPipeWriter : public EventHandler,
public AsyncWriter, public AsyncWriter,
public DelayedDestruction { public DelayedDestruction {
public: public:
typedef std::unique_ptr<AsyncPipeWriter, typedef std::
folly::DelayedDestruction::Destructor> UniquePtr; unique_ptr<AsyncPipeWriter, folly::DelayedDestruction::Destructor>
UniquePtr;
template <typename... Args> template <typename... Args>
static UniquePtr newWriter(Args&&... args) { static UniquePtr newWriter(Args&&... args) {
...@@ -105,14 +106,14 @@ class AsyncPipeWriter : public EventHandler, ...@@ -105,14 +106,14 @@ class AsyncPipeWriter : public EventHandler,
} }
AsyncPipeWriter(folly::EventBase* eventBase, int pipeFd) AsyncPipeWriter(folly::EventBase* eventBase, int pipeFd)
: EventHandler(eventBase, pipeFd), : EventHandler(eventBase, pipeFd), fd_(pipeFd) {}
fd_(pipeFd) {}
/** /**
* Asynchronously write the given iobuf to this pipe, and invoke the callback * Asynchronously write the given iobuf to this pipe, and invoke the callback
* on success/error. * on success/error.
*/ */
void write(std::unique_ptr<folly::IOBuf> iob, void write(
std::unique_ptr<folly::IOBuf> iob,
AsyncWriter::WriteCallback* wcb = nullptr); AsyncWriter::WriteCallback* wcb = nullptr);
/** /**
...@@ -148,19 +149,22 @@ class AsyncPipeWriter : public EventHandler, ...@@ -148,19 +149,22 @@ class AsyncPipeWriter : public EventHandler,
} }
// AsyncWriter methods // AsyncWriter methods
void write(folly::AsyncWriter::WriteCallback* callback, void write(
folly::AsyncWriter::WriteCallback* callback,
const void* buf, const void* buf,
size_t bytes, size_t bytes,
WriteFlags flags = WriteFlags::NONE) override { WriteFlags flags = WriteFlags::NONE) override {
writeChain(callback, IOBuf::wrapBuffer(buf, bytes), flags); writeChain(callback, IOBuf::wrapBuffer(buf, bytes), flags);
} }
void writev(folly::AsyncWriter::WriteCallback*, void writev(
folly::AsyncWriter::WriteCallback*,
const iovec*, const iovec*,
size_t, size_t,
WriteFlags = WriteFlags::NONE) override { WriteFlags = WriteFlags::NONE) override {
throw std::runtime_error("writev is not supported. Please use writeChain."); throw std::runtime_error("writev is not supported. Please use writeChain.");
} }
void writeChain(folly::AsyncWriter::WriteCallback* callback, void writeChain(
folly::AsyncWriter::WriteCallback* callback,
std::unique_ptr<folly::IOBuf>&& buf, std::unique_ptr<folly::IOBuf>&& buf,
WriteFlags flags = WriteFlags::NONE) override; WriteFlags flags = WriteFlags::NONE) override;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -26,8 +26,7 @@ using std::string; ...@@ -26,8 +26,7 @@ using std::string;
namespace folly { namespace folly {
AsyncSignalHandler::AsyncSignalHandler(EventBase* eventBase) AsyncSignalHandler::AsyncSignalHandler(EventBase* eventBase)
: eventBase_(eventBase) { : eventBase_(eventBase) {}
}
AsyncSignalHandler::~AsyncSignalHandler() { AsyncSignalHandler::~AsyncSignalHandler() {
// Unregister any outstanding events // Unregister any outstanding events
...@@ -55,24 +54,21 @@ void AsyncSignalHandler::registerSignalHandler(int signum) { ...@@ -55,24 +54,21 @@ void AsyncSignalHandler::registerSignalHandler(int signum) {
signalEvents_.insert(make_pair(signum, event())); signalEvents_.insert(make_pair(signum, event()));
if (!ret.second) { if (!ret.second) {
// This signal has already been registered // This signal has already been registered
throw std::runtime_error(folly::to<string>( throw std::runtime_error(
"handler already registered for signal ", folly::to<string>("handler already registered for signal ", signum));
signum));
} }
struct event* ev = &(ret.first->second); struct event* ev = &(ret.first->second);
try { try {
signal_set(ev, signum, libeventCallback, this); signal_set(ev, signum, libeventCallback, this);
if (event_base_set(eventBase_->getLibeventBase(), ev) != 0 ) { if (event_base_set(eventBase_->getLibeventBase(), ev) != 0) {
throw std::runtime_error(folly::to<string>( throw std::runtime_error(folly::to<string>(
"error initializing event handler for signal ", "error initializing event handler for signal ", signum));
signum));
} }
if (event_add(ev, nullptr) != 0) { if (event_add(ev, nullptr) != 0) {
throw std::runtime_error(folly::to<string>( throw std::runtime_error(
"error adding event handler for signal ", folly::to<string>("error adding event handler for signal ", signum));
signum));
} }
} catch (...) { } catch (...) {
signalEvents_.erase(ret.first); signalEvents_.erase(ret.first);
...@@ -85,14 +81,16 @@ void AsyncSignalHandler::unregisterSignalHandler(int signum) { ...@@ -85,14 +81,16 @@ void AsyncSignalHandler::unregisterSignalHandler(int signum) {
if (it == signalEvents_.end()) { if (it == signalEvents_.end()) {
throw std::runtime_error(folly::to<string>( throw std::runtime_error(folly::to<string>(
"unable to unregister handler for signal ", "unable to unregister handler for signal ",
signum, ": signal not registered")); signum,
": signal not registered"));
} }
event_del(&it->second); event_del(&it->second);
signalEvents_.erase(it); signalEvents_.erase(it);
} }
void AsyncSignalHandler::libeventCallback(libevent_fd_t signum, void AsyncSignalHandler::libeventCallback(
libevent_fd_t signum,
short /* events */, short /* events */,
void* arg) { void* arg) {
AsyncSignalHandler* handler = static_cast<AsyncSignalHandler*>(arg); AsyncSignalHandler* handler = static_cast<AsyncSignalHandler*>(arg);
......
...@@ -105,8 +105,8 @@ class AsyncSignalHandler { ...@@ -105,8 +105,8 @@ class AsyncSignalHandler {
typedef std::map<int, struct event> SignalEventMap; typedef std::map<int, struct event> SignalEventMap;
// Forbidden copy constructor and assignment operator // Forbidden copy constructor and assignment operator
AsyncSignalHandler(AsyncSignalHandler const &); AsyncSignalHandler(AsyncSignalHandler const&);
AsyncSignalHandler& operator=(AsyncSignalHandler const &); AsyncSignalHandler& operator=(AsyncSignalHandler const&);
static void libeventCallback(libevent_fd_t signum, short events, void* arg); static void libeventCallback(libevent_fd_t signum, short events, void* arg);
......
This diff is collapsed.
This diff is collapsed.
...@@ -26,32 +26,27 @@ namespace folly { ...@@ -26,32 +26,27 @@ namespace folly {
AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager) AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager)
: timeoutManager_(timeoutManager) { : timeoutManager_(timeoutManager) {
folly_event_set( folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this); &event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr; event_.ev_base = nullptr;
timeoutManager_->attachTimeoutManager( timeoutManager_->attachTimeoutManager(
this, this, TimeoutManager::InternalEnum::NORMAL);
TimeoutManager::InternalEnum::NORMAL);
} }
AsyncTimeout::AsyncTimeout(EventBase* eventBase) AsyncTimeout::AsyncTimeout(EventBase* eventBase) : timeoutManager_(eventBase) {
: timeoutManager_(eventBase) {
folly_event_set( folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this); &event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr; event_.ev_base = nullptr;
if (eventBase) { if (eventBase) {
timeoutManager_->attachTimeoutManager( timeoutManager_->attachTimeoutManager(
this, this, TimeoutManager::InternalEnum::NORMAL);
TimeoutManager::InternalEnum::NORMAL);
} }
} }
AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager, AsyncTimeout::AsyncTimeout(
TimeoutManager* timeoutManager,
InternalEnum internal) InternalEnum internal)
: timeoutManager_(timeoutManager) { : timeoutManager_(timeoutManager) {
folly_event_set( folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this); &event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr; event_.ev_base = nullptr;
...@@ -60,14 +55,13 @@ AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager, ...@@ -60,14 +55,13 @@ AsyncTimeout::AsyncTimeout(TimeoutManager* timeoutManager,
AsyncTimeout::AsyncTimeout(EventBase* eventBase, InternalEnum internal) AsyncTimeout::AsyncTimeout(EventBase* eventBase, InternalEnum internal)
: timeoutManager_(eventBase) { : timeoutManager_(eventBase) {
folly_event_set( folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this); &event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr; event_.ev_base = nullptr;
timeoutManager_->attachTimeoutManager(this, internal); timeoutManager_->attachTimeoutManager(this, internal);
} }
AsyncTimeout::AsyncTimeout(): timeoutManager_(nullptr) { AsyncTimeout::AsyncTimeout() : timeoutManager_(nullptr) {
folly_event_set( folly_event_set(
&event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this); &event_, -1, EV_TIMEOUT, &AsyncTimeout::libeventCallback, this);
event_.ev_base = nullptr; event_.ev_base = nullptr;
......
...@@ -121,9 +121,11 @@ class AsyncTimeout : private boost::noncopyable { ...@@ -121,9 +121,11 @@ class AsyncTimeout : private boost::noncopyable {
* internal event. TimeoutManager::loop() will return when there are no more * internal event. TimeoutManager::loop() will return when there are no more
* non-internal events remaining. * non-internal events remaining.
*/ */
void attachTimeoutManager(TimeoutManager* timeoutManager, void attachTimeoutManager(
TimeoutManager* timeoutManager,
InternalEnum internal = InternalEnum::NORMAL); InternalEnum internal = InternalEnum::NORMAL);
void attachEventBase(EventBase* eventBase, void attachEventBase(
EventBase* eventBase,
InternalEnum internal = InternalEnum::NORMAL); InternalEnum internal = InternalEnum::NORMAL);
/** /**
...@@ -175,9 +177,8 @@ class AsyncTimeout : private boost::noncopyable { ...@@ -175,9 +177,8 @@ class AsyncTimeout : private boost::noncopyable {
*/ */
template <typename TCallback> template <typename TCallback>
static std::unique_ptr<AsyncTimeout> make( static std::unique_ptr<AsyncTimeout> make(
TimeoutManager &manager, TimeoutManager& manager,
TCallback &&callback TCallback&& callback);
);
/** /**
* Convenience function that wraps a function object as * Convenience function that wraps a function object as
...@@ -211,9 +212,8 @@ class AsyncTimeout : private boost::noncopyable { ...@@ -211,9 +212,8 @@ class AsyncTimeout : private boost::noncopyable {
template <typename TCallback> template <typename TCallback>
static std::unique_ptr<AsyncTimeout> schedule( static std::unique_ptr<AsyncTimeout> schedule(
TimeoutManager::timeout_type timeout, TimeoutManager::timeout_type timeout,
TimeoutManager &manager, TimeoutManager& manager,
TCallback &&callback TCallback&& callback);
);
private: private:
static void libeventCallback(libevent_fd_t fd, short events, void* arg); static void libeventCallback(libevent_fd_t fd, short events, void* arg);
...@@ -239,20 +239,15 @@ namespace detail { ...@@ -239,20 +239,15 @@ namespace detail {
* @author: Marcelo Juchem <marcelo@fb.com> * @author: Marcelo Juchem <marcelo@fb.com>
*/ */
template <typename TCallback> template <typename TCallback>
struct async_timeout_wrapper: struct async_timeout_wrapper : public AsyncTimeout {
public AsyncTimeout
{
template <typename UCallback> template <typename UCallback>
async_timeout_wrapper(TimeoutManager *manager, UCallback &&callback): async_timeout_wrapper(TimeoutManager* manager, UCallback&& callback)
AsyncTimeout(manager), : AsyncTimeout(manager), callback_(std::forward<UCallback>(callback)) {}
callback_(std::forward<UCallback>(callback))
{}
void timeoutExpired() noexcept override { void timeoutExpired() noexcept override {
static_assert( static_assert(
noexcept(std::declval<TCallback>()()), noexcept(std::declval<TCallback>()()),
"callback must be declared noexcept, e.g.: `[]() noexcept {}`" "callback must be declared noexcept, e.g.: `[]() noexcept {}`");
);
callback_(); callback_();
} }
...@@ -264,23 +259,18 @@ struct async_timeout_wrapper: ...@@ -264,23 +259,18 @@ struct async_timeout_wrapper:
template <typename TCallback> template <typename TCallback>
std::unique_ptr<AsyncTimeout> AsyncTimeout::make( std::unique_ptr<AsyncTimeout> AsyncTimeout::make(
TimeoutManager &manager, TimeoutManager& manager,
TCallback &&callback TCallback&& callback) {
) {
return std::unique_ptr<AsyncTimeout>( return std::unique_ptr<AsyncTimeout>(
new detail::async_timeout_wrapper<typename std::decay<TCallback>::type>( new detail::async_timeout_wrapper<typename std::decay<TCallback>::type>(
std::addressof(manager), std::addressof(manager), std::forward<TCallback>(callback)));
std::forward<TCallback>(callback)
)
);
} }
template <typename TCallback> template <typename TCallback>
std::unique_ptr<AsyncTimeout> AsyncTimeout::schedule( std::unique_ptr<AsyncTimeout> AsyncTimeout::schedule(
TimeoutManager::timeout_type timeout, TimeoutManager::timeout_type timeout,
TimeoutManager &manager, TimeoutManager& manager,
TCallback &&callback TCallback&& callback) {
) {
auto wrapper = AsyncTimeout::make(manager, std::forward<TCallback>(callback)); auto wrapper = AsyncTimeout::make(manager, std::forward<TCallback>(callback));
wrapper->scheduleTimeout(timeout); wrapper->scheduleTimeout(timeout);
return wrapper; return wrapper;
......
...@@ -33,7 +33,7 @@ constexpr bool kOpenSslModeMoveBufferOwnership = ...@@ -33,7 +33,7 @@ constexpr bool kOpenSslModeMoveBufferOwnership =
#else #else
false false
#endif #endif
; ;
namespace folly { namespace folly {
...@@ -120,7 +120,6 @@ inline bool isSet(WriteFlags a, WriteFlags b) { ...@@ -120,7 +120,6 @@ inline bool isSet(WriteFlags a, WriteFlags b) {
return (a & b) == b; return (a & b) == b;
} }
/** /**
* AsyncTransport defines an asynchronous API for streaming I/O. * AsyncTransport defines an asynchronous API for streaming I/O.
* *
...@@ -382,7 +381,9 @@ class AsyncTransport : public DelayedDestruction, public AsyncSocketBase { ...@@ -382,7 +381,9 @@ class AsyncTransport : public DelayedDestruction, public AsyncSocketBase {
/** /**
* Get the certificate used to authenticate the peer. * Get the certificate used to authenticate the peer.
*/ */
virtual ssl::X509UniquePtr getPeerCert() const { return nullptr; } virtual ssl::X509UniquePtr getPeerCert() const {
return nullptr;
}
/** /**
* The local certificate used for this connection. May be null * The local certificate used for this connection. May be null
...@@ -459,7 +460,9 @@ class AsyncTransport : public DelayedDestruction, public AsyncSocketBase { ...@@ -459,7 +460,9 @@ class AsyncTransport : public DelayedDestruction, public AsyncSocketBase {
* False if the transport does not have replay protection, but will in the * False if the transport does not have replay protection, but will in the
* future. * future.
*/ */
virtual bool isReplaySafe() const { return true; } virtual bool isReplaySafe() const {
return true;
}
/** /**
* Set the ReplaySafeCallback on this transport. * Set the ReplaySafeCallback on this transport.
...@@ -580,8 +583,8 @@ class AsyncReader { ...@@ -580,8 +583,8 @@ class AsyncReader {
* @param readBuf The unique pointer of read buffer. * @param readBuf The unique pointer of read buffer.
*/ */
virtual void readBufferAvailable(std::unique_ptr<IOBuf> /*readBuf*/) virtual void readBufferAvailable(
noexcept {} std::unique_ptr<IOBuf> /*readBuf*/) noexcept {}
/** /**
* readEOF() will be invoked when the transport is closed. * readEOF() will be invoked when the transport is closed.
...@@ -636,16 +639,24 @@ class AsyncWriter { ...@@ -636,16 +639,24 @@ class AsyncWriter {
* @param bytesWritten The number of bytes that were successfull * @param bytesWritten The number of bytes that were successfull
* @param ex An exception describing the error that occurred. * @param ex An exception describing the error that occurred.
*/ */
virtual void writeErr(size_t bytesWritten, virtual void writeErr(
size_t bytesWritten,
const AsyncSocketException& ex) noexcept = 0; const AsyncSocketException& ex) noexcept = 0;
}; };
// Write methods that aren't part of AsyncTransport // Write methods that aren't part of AsyncTransport
virtual void write(WriteCallback* callback, const void* buf, size_t bytes, virtual void write(
WriteCallback* callback,
const void* buf,
size_t bytes,
WriteFlags flags = WriteFlags::NONE) = 0; WriteFlags flags = WriteFlags::NONE) = 0;
virtual void writev(WriteCallback* callback, const iovec* vec, size_t count, virtual void writev(
WriteCallback* callback,
const iovec* vec,
size_t count,
WriteFlags flags = WriteFlags::NONE) = 0; WriteFlags flags = WriteFlags::NONE) = 0;
virtual void writeChain(WriteCallback* callback, virtual void writeChain(
WriteCallback* callback,
std::unique_ptr<IOBuf>&& buf, std::unique_ptr<IOBuf>&& buf,
WriteFlags flags = WriteFlags::NONE) = 0; WriteFlags flags = WriteFlags::NONE) = 0;
......
...@@ -35,8 +35,8 @@ namespace folly { ...@@ -35,8 +35,8 @@ namespace folly {
* more than 1 packet will not work because they will end up with * more than 1 packet will not work because they will end up with
* different event base to process. * different event base to process.
*/ */
class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback,
, public AsyncSocketBase { public AsyncSocketBase {
public: public:
class Callback { class Callback {
public: public:
...@@ -84,10 +84,7 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback ...@@ -84,10 +84,7 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
* is dropped and you get `truncated = true` in onDataAvailable callback * is dropped and you get `truncated = true` in onDataAvailable callback
*/ */
explicit AsyncUDPServerSocket(EventBase* evb, size_t sz = 1500) explicit AsyncUDPServerSocket(EventBase* evb, size_t sz = 1500)
: evb_(evb), : evb_(evb), packetSize_(sz), nextListener_(0) {}
packetSize_(sz),
nextListener_(0) {
}
~AsyncUDPServerSocket() override { ~AsyncUDPServerSocket() override {
if (socket_) { if (socket_) {
...@@ -126,12 +123,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback ...@@ -126,12 +123,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
void listen() { void listen() {
CHECK(socket_) << "Need to bind before listening"; CHECK(socket_) << "Need to bind before listening";
for (auto& listener: listeners_) { for (auto& listener : listeners_) {
auto callback = listener.second; auto callback = listener.second;
listener.first->runInEventBaseThread([callback] () mutable { listener.first->runInEventBaseThread(
callback->onListenStarted(); [callback]() mutable { callback->onListenStarted(); });
});
} }
socket_->resumeRead(this); socket_->resumeRead(this);
...@@ -207,13 +203,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback ...@@ -207,13 +203,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
// Schedule it in the listener's eventbase // Schedule it in the listener's eventbase
// XXX: Speed this up // XXX: Speed this up
auto f = [ auto f = [socket,
socket,
client, client,
callback, callback,
data = std::move(data), data = std::move(data),
truncated truncated]() mutable {
]() mutable {
callback->onDataAvailable(socket, client, std::move(data), truncated); callback->onDataAvailable(socket, client, std::move(data), truncated);
}; };
...@@ -229,12 +223,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback ...@@ -229,12 +223,11 @@ class AsyncUDPServerSocket : private AsyncUDPSocket::ReadCallback
} }
void onReadClosed() noexcept override { void onReadClosed() noexcept override {
for (auto& listener: listeners_) { for (auto& listener : listeners_) {
auto callback = listener.second; auto callback = listener.second;
listener.first->runInEventBaseThread([callback] () mutable { listener.first->runInEventBaseThread(
callback->onListenStopped(); [callback]() mutable { callback->onListenStopped(); });
});
} }
} }
......
...@@ -26,8 +26,8 @@ namespace folly { ...@@ -26,8 +26,8 @@ namespace folly {
template <class T> template <class T>
class DecoratedAsyncTransportWrapper : public folly::AsyncTransportWrapper { class DecoratedAsyncTransportWrapper : public folly::AsyncTransportWrapper {
public: public:
explicit DecoratedAsyncTransportWrapper(typename T::UniquePtr transport): explicit DecoratedAsyncTransportWrapper(typename T::UniquePtr transport)
transport_(std::move(transport)) {} : transport_(std::move(transport)) {}
const AsyncTransportWrapper* getWrappedTransport() const override { const AsyncTransportWrapper* getWrappedTransport() const override {
return transport_.get(); return transport_.get();
......
...@@ -96,9 +96,7 @@ class DelayedDestruction : public DelayedDestructionBase { ...@@ -96,9 +96,7 @@ class DelayedDestruction : public DelayedDestructionBase {
*/ */
~DelayedDestruction() override = default; ~DelayedDestruction() override = default;
DelayedDestruction() DelayedDestruction() : destroyPending_(false) {}
: destroyPending_(false) {
}
private: private:
/** /**
......
...@@ -61,30 +61,25 @@ class DelayedDestructionBase : private boost::noncopyable { ...@@ -61,30 +61,25 @@ class DelayedDestructionBase : private boost::noncopyable {
*/ */
class DestructorGuard { class DestructorGuard {
public: public:
explicit DestructorGuard(DelayedDestructionBase* dd = nullptr) : dd_(dd) {
explicit DestructorGuard(DelayedDestructionBase* dd = nullptr) :
dd_(dd) {
if (dd_ != nullptr) { if (dd_ != nullptr) {
++dd_->guardCount_; ++dd_->guardCount_;
assert(dd_->guardCount_ > 0); // check for wrapping assert(dd_->guardCount_ > 0); // check for wrapping
} }
} }
DestructorGuard(const DestructorGuard& dg) : DestructorGuard(const DestructorGuard& dg) : DestructorGuard(dg.dd_) {}
DestructorGuard(dg.dd_) {
}
DestructorGuard(DestructorGuard&& dg) noexcept : DestructorGuard(DestructorGuard&& dg) noexcept : dd_(dg.dd_) {
dd_(dg.dd_) {
dg.dd_ = nullptr; dg.dd_ = nullptr;
} }
DestructorGuard& operator =(DestructorGuard dg) noexcept { DestructorGuard& operator=(DestructorGuard dg) noexcept {
std::swap(dd_, dg.dd_); std::swap(dd_, dg.dd_);
return *this; return *this;
} }
DestructorGuard& operator =(DelayedDestructionBase* dd) { DestructorGuard& operator=(DelayedDestructionBase* dd) {
*this = DestructorGuard(dd); *this = DestructorGuard(dd);
return *this; return *this;
} }
...@@ -120,6 +115,7 @@ class DelayedDestructionBase : private boost::noncopyable { ...@@ -120,6 +115,7 @@ class DelayedDestructionBase : private boost::noncopyable {
class IntrusivePtr : private DestructorGuard { class IntrusivePtr : private DestructorGuard {
template <typename CopyAliasType> template <typename CopyAliasType>
friend class IntrusivePtr; friend class IntrusivePtr;
public: public:
template <typename... Args> template <typename... Args>
static IntrusivePtr<AliasType> make(Args&&... args) { static IntrusivePtr<AliasType> make(Args&&... args) {
...@@ -130,51 +126,47 @@ class DelayedDestructionBase : private boost::noncopyable { ...@@ -130,51 +126,47 @@ class DelayedDestructionBase : private boost::noncopyable {
IntrusivePtr(const IntrusivePtr&) = default; IntrusivePtr(const IntrusivePtr&) = default;
IntrusivePtr(IntrusivePtr&&) noexcept = default; IntrusivePtr(IntrusivePtr&&) noexcept = default;
template <typename CopyAliasType, typename = template <
typename std::enable_if< typename CopyAliasType,
std::is_convertible<CopyAliasType*, AliasType*>::value typename = typename std::enable_if<
>::type> std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
IntrusivePtr(const IntrusivePtr<CopyAliasType>& copy) : IntrusivePtr(const IntrusivePtr<CopyAliasType>& copy)
DestructorGuard(copy) { : DestructorGuard(copy) {}
}
template <typename CopyAliasType, typename = template <
typename std::enable_if< typename CopyAliasType,
std::is_convertible<CopyAliasType*, AliasType*>::value typename = typename std::enable_if<
>::type> std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
IntrusivePtr(IntrusivePtr<CopyAliasType>&& copy) : IntrusivePtr(IntrusivePtr<CopyAliasType>&& copy)
DestructorGuard(std::move(copy)) { : DestructorGuard(std::move(copy)) {}
}
explicit IntrusivePtr(AliasType* dd) : explicit IntrusivePtr(AliasType* dd) : DestructorGuard(dd) {}
DestructorGuard(dd) {
}
// Copying from a unique_ptr is safe because if the upcast to // Copying from a unique_ptr is safe because if the upcast to
// DelayedDestructionBase works, then the instance is already using // DelayedDestructionBase works, then the instance is already using
// intrusive ref-counting. // intrusive ref-counting.
template <typename CopyAliasType, typename Deleter, typename = template <
typename std::enable_if< typename CopyAliasType,
std::is_convertible<CopyAliasType*, AliasType*>::value typename Deleter,
>::type> typename = typename std::enable_if<
explicit IntrusivePtr(const std::unique_ptr<CopyAliasType, Deleter>& copy) : std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
DestructorGuard(copy.get()) { explicit IntrusivePtr(const std::unique_ptr<CopyAliasType, Deleter>& copy)
} : DestructorGuard(copy.get()) {}
IntrusivePtr& operator =(const IntrusivePtr&) = default; IntrusivePtr& operator=(const IntrusivePtr&) = default;
IntrusivePtr& operator =(IntrusivePtr&&) noexcept = default; IntrusivePtr& operator=(IntrusivePtr&&) noexcept = default;
template <typename CopyAliasType, typename = template <
typename std::enable_if< typename CopyAliasType,
std::is_convertible<CopyAliasType*, AliasType*>::value typename = typename std::enable_if<
>::type> std::is_convertible<CopyAliasType*, AliasType*>::value>::type>
IntrusivePtr& operator =(IntrusivePtr<CopyAliasType> copy) noexcept { IntrusivePtr& operator=(IntrusivePtr<CopyAliasType> copy) noexcept {
DestructorGuard::operator =(copy); DestructorGuard::operator=(copy);
return *this; return *this;
} }
IntrusivePtr& operator =(AliasType* dd) { IntrusivePtr& operator=(AliasType* dd) {
DestructorGuard::operator =(dd); DestructorGuard::operator=(dd);
return *this; return *this;
} }
...@@ -183,14 +175,14 @@ class DelayedDestructionBase : private boost::noncopyable { ...@@ -183,14 +175,14 @@ class DelayedDestructionBase : private boost::noncopyable {
} }
AliasType* get() const { AliasType* get() const {
return static_cast<AliasType *>(DestructorGuard::get()); return static_cast<AliasType*>(DestructorGuard::get());
} }
AliasType& operator *() const { AliasType& operator*() const {
return *get(); return *get();
} }
AliasType* operator ->() const { AliasType* operator->() const {
return get(); return get();
} }
...@@ -200,8 +192,7 @@ class DelayedDestructionBase : private boost::noncopyable { ...@@ -200,8 +192,7 @@ class DelayedDestructionBase : private boost::noncopyable {
}; };
protected: protected:
DelayedDestructionBase() DelayedDestructionBase() : guardCount_(0) {}
: guardCount_(0) {}
/** /**
* Get the number of DestructorGuards currently protecting this object. * Get the number of DestructorGuards currently protecting this object.
...@@ -237,12 +228,12 @@ class DelayedDestructionBase : private boost::noncopyable { ...@@ -237,12 +228,12 @@ class DelayedDestructionBase : private boost::noncopyable {
uint32_t guardCount_; uint32_t guardCount_;
}; };
inline bool operator ==( inline bool operator==(
const DelayedDestructionBase::DestructorGuard& left, const DelayedDestructionBase::DestructorGuard& left,
const DelayedDestructionBase::DestructorGuard& right) { const DelayedDestructionBase::DestructorGuard& right) {
return left.get() == right.get(); return left.get() == right.get();
} }
inline bool operator !=( inline bool operator!=(
const DelayedDestructionBase::DestructorGuard& left, const DelayedDestructionBase::DestructorGuard& left,
const DelayedDestructionBase::DestructorGuard& right) { const DelayedDestructionBase::DestructorGuard& right) {
return left.get() != right.get(); return left.get() != right.get();
...@@ -269,13 +260,13 @@ inline bool operator!=( ...@@ -269,13 +260,13 @@ inline bool operator!=(
} }
template <typename LeftAliasType, typename RightAliasType> template <typename LeftAliasType, typename RightAliasType>
inline bool operator ==( inline bool operator==(
const DelayedDestructionBase::IntrusivePtr<LeftAliasType>& left, const DelayedDestructionBase::IntrusivePtr<LeftAliasType>& left,
const DelayedDestructionBase::IntrusivePtr<RightAliasType>& right) { const DelayedDestructionBase::IntrusivePtr<RightAliasType>& right) {
return left.get() == right.get(); return left.get() == right.get();
} }
template <typename LeftAliasType, typename RightAliasType> template <typename LeftAliasType, typename RightAliasType>
inline bool operator !=( inline bool operator!=(
const DelayedDestructionBase::IntrusivePtr<LeftAliasType>& left, const DelayedDestructionBase::IntrusivePtr<LeftAliasType>& left,
const DelayedDestructionBase::IntrusivePtr<RightAliasType>& right) { const DelayedDestructionBase::IntrusivePtr<RightAliasType>& right) {
return left.get() != right.get(); return left.get() != right.get();
......
...@@ -73,21 +73,23 @@ static std::mutex libevent_mutex_; ...@@ -73,21 +73,23 @@ static std::mutex libevent_mutex_;
*/ */
EventBase::EventBase(bool enableTimeMeasurement) EventBase::EventBase(bool enableTimeMeasurement)
: runOnceCallbacks_(nullptr) : runOnceCallbacks_(nullptr),
, stop_(false) stop_(false),
, loopThread_() loopThread_(),
, queue_(nullptr) queue_(nullptr),
, fnRunner_(nullptr) fnRunner_(nullptr),
, maxLatency_(0) maxLatency_(0),
, avgLoopTime_(std::chrono::seconds(2)) avgLoopTime_(std::chrono::seconds(2)),
, maxLatencyLoopTime_(avgLoopTime_) maxLatencyLoopTime_(avgLoopTime_),
, enableTimeMeasurement_(enableTimeMeasurement) enableTimeMeasurement_(enableTimeMeasurement),
, nextLoopCnt_(uint64_t(-40)) // Early wrap-around so bugs will manifest soon nextLoopCnt_(
, latestLoopCnt_(nextLoopCnt_) uint64_t(-40)) // Early wrap-around so bugs will manifest soon
, startWork_() ,
, observer_(nullptr) latestLoopCnt_(nextLoopCnt_),
, observerSampleCount_(0) startWork_(),
, executionObserver_(nullptr) { observer_(nullptr),
observerSampleCount_(0),
executionObserver_(nullptr) {
struct event ev; struct event ev;
{ {
std::lock_guard<std::mutex> lock(libevent_mutex_); std::lock_guard<std::mutex> lock(libevent_mutex_);
...@@ -117,22 +119,24 @@ EventBase::EventBase(bool enableTimeMeasurement) ...@@ -117,22 +119,24 @@ EventBase::EventBase(bool enableTimeMeasurement)
// takes ownership of the event_base // takes ownership of the event_base
EventBase::EventBase(event_base* evb, bool enableTimeMeasurement) EventBase::EventBase(event_base* evb, bool enableTimeMeasurement)
: runOnceCallbacks_(nullptr) : runOnceCallbacks_(nullptr),
, stop_(false) stop_(false),
, loopThread_() loopThread_(),
, evb_(evb) evb_(evb),
, queue_(nullptr) queue_(nullptr),
, fnRunner_(nullptr) fnRunner_(nullptr),
, maxLatency_(0) maxLatency_(0),
, avgLoopTime_(std::chrono::seconds(2)) avgLoopTime_(std::chrono::seconds(2)),
, maxLatencyLoopTime_(avgLoopTime_) maxLatencyLoopTime_(avgLoopTime_),
, enableTimeMeasurement_(enableTimeMeasurement) enableTimeMeasurement_(enableTimeMeasurement),
, nextLoopCnt_(uint64_t(-40)) // Early wrap-around so bugs will manifest soon nextLoopCnt_(
, latestLoopCnt_(nextLoopCnt_) uint64_t(-40)) // Early wrap-around so bugs will manifest soon
, startWork_() ,
, observer_(nullptr) latestLoopCnt_(nextLoopCnt_),
, observerSampleCount_(0) startWork_(),
, executionObserver_(nullptr) { observer_(nullptr),
observerSampleCount_(0),
executionObserver_(nullptr) {
if (UNLIKELY(evb_ == nullptr)) { if (UNLIKELY(evb_ == nullptr)) {
LOG(ERROR) << "EventBase(): Pass nullptr as event base."; LOG(ERROR) << "EventBase(): Pass nullptr as event base.";
throw std::invalid_argument("EventBase(): event base cannot be nullptr"); throw std::invalid_argument("EventBase(): event base cannot be nullptr");
...@@ -233,8 +237,8 @@ void EventBase::resetLoadAvg(double value) { ...@@ -233,8 +237,8 @@ void EventBase::resetLoadAvg(double value) {
maxLatencyLoopTime_.reset(value); maxLatencyLoopTime_.reset(value);
} }
static std::chrono::milliseconds static std::chrono::milliseconds getTimeDelta(
getTimeDelta(std::chrono::steady_clock::time_point* prev) { std::chrono::steady_clock::time_point* prev) {
auto result = std::chrono::steady_clock::now() - *prev; auto result = std::chrono::steady_clock::now() - *prev;
*prev = std::chrono::steady_clock::now(); *prev = std::chrono::steady_clock::now();
...@@ -314,7 +318,7 @@ bool EventBase::loopBody(int flags, bool ignoreKeepAlive) { ...@@ -314,7 +318,7 @@ bool EventBase::loopBody(int flags, bool ignoreKeepAlive) {
LoopCallbackList callbacks; LoopCallbackList callbacks;
callbacks.swap(runBeforeLoopCallbacks_); callbacks.swap(runBeforeLoopCallbacks_);
while(!callbacks.empty()) { while (!callbacks.empty()) {
auto* item = &callbacks.front(); auto* item = &callbacks.front();
callbacks.pop_front(); callbacks.pop_front();
item->runLoopCallback(); item->runLoopCallback();
...@@ -388,8 +392,8 @@ bool EventBase::loopBody(int flags, bool ignoreKeepAlive) { ...@@ -388,8 +392,8 @@ bool EventBase::loopBody(int flags, bool ignoreKeepAlive) {
} }
if (enableTimeMeasurement_) { if (enableTimeMeasurement_) {
VLOG(11) << "EventBase " << this << " loop time: " << VLOG(11) << "EventBase " << this
getTimeDelta(&prev).count(); << " loop time: " << getTimeDelta(&prev).count();
} }
if (once) { if (once) {
...@@ -473,8 +477,8 @@ void EventBase::bumpHandlingTime() { ...@@ -473,8 +477,8 @@ void EventBase::bumpHandlingTime() {
return; return;
} }
VLOG(11) << "EventBase " << this << " " << __PRETTY_FUNCTION__ << VLOG(11) << "EventBase " << this << " " << __PRETTY_FUNCTION__
" (loop) latest " << latestLoopCnt_ << " next " << nextLoopCnt_; << " (loop) latest " << latestLoopCnt_ << " next " << nextLoopCnt_;
if (nothingHandledYet()) { if (nothingHandledYet()) {
latestLoopCnt_ = nextLoopCnt_; latestLoopCnt_ = nextLoopCnt_;
// set the time // set the time
...@@ -686,9 +690,7 @@ bool EventBase::nothingHandledYet() const noexcept { ...@@ -686,9 +690,7 @@ bool EventBase::nothingHandledYet() const noexcept {
return (nextLoopCnt_ != latestLoopCnt_); return (nextLoopCnt_ != latestLoopCnt_);
} }
void EventBase::attachTimeoutManager(AsyncTimeout* obj, void EventBase::attachTimeoutManager(AsyncTimeout* obj, InternalEnum internal) {
InternalEnum internal) {
struct event* ev = obj->getEvent(); struct event* ev = obj->getEvent();
assert(ev->ev_base == nullptr); assert(ev->ev_base == nullptr);
...@@ -705,7 +707,8 @@ void EventBase::detachTimeoutManager(AsyncTimeout* obj) { ...@@ -705,7 +707,8 @@ void EventBase::detachTimeoutManager(AsyncTimeout* obj) {
ev->ev_base = nullptr; ev->ev_base = nullptr;
} }
bool EventBase::scheduleTimeout(AsyncTimeout* obj, bool EventBase::scheduleTimeout(
AsyncTimeout* obj,
TimeoutManager::timeout_type timeout) { TimeoutManager::timeout_type timeout) {
dcheckIsInEventBaseThread(); dcheckIsInEventBaseThread();
// Set up the timeval and add the event // Set up the timeval and add the event
...@@ -738,8 +741,7 @@ void EventBase::setName(const std::string& name) { ...@@ -738,8 +741,7 @@ void EventBase::setName(const std::string& name) {
name_ = name; name_ = name;
if (isRunning()) { if (isRunning()) {
setThreadName(loopThread_.load(std::memory_order_relaxed), setThreadName(loopThread_.load(std::memory_order_relaxed), name_);
name_);
} }
} }
...@@ -755,8 +757,12 @@ void EventBase::scheduleAt(Func&& fn, TimePoint const& timeout) { ...@@ -755,8 +757,12 @@ void EventBase::scheduleAt(Func&& fn, TimePoint const& timeout) {
std::chrono::duration_cast<std::chrono::milliseconds>(duration)); std::chrono::duration_cast<std::chrono::milliseconds>(duration));
} }
const char* EventBase::getLibeventVersion() { return event_get_version(); } const char* EventBase::getLibeventVersion() {
const char* EventBase::getLibeventMethod() { return event_get_method(); } return event_get_version();
}
const char* EventBase::getLibeventMethod() {
return event_get_method();
}
VirtualEventBase& EventBase::getVirtualEventBase() { VirtualEventBase& EventBase::getVirtualEventBase() {
folly::call_once(virtualEventBaseInitFlag_, [&] { folly::call_once(virtualEventBaseInitFlag_, [&] {
......
...@@ -73,8 +73,7 @@ class EventBaseObserver { ...@@ -73,8 +73,7 @@ class EventBaseObserver {
virtual uint32_t getSampleRate() const = 0; virtual uint32_t getSampleRate() const = 0;
virtual void loopSample( virtual void loopSample(int64_t busyTime, int64_t idleTime) = 0;
int64_t busyTime, int64_t idleTime) = 0;
}; };
// Helper class that sets and retrieves the EventBase associated with a given // Helper class that sets and retrieves the EventBase associated with a given
...@@ -163,9 +162,9 @@ class EventBase : private boost::noncopyable, ...@@ -163,9 +162,9 @@ class EventBase : private boost::noncopyable,
} }
private: private:
typedef boost::intrusive::list< typedef boost::intrusive::
LoopCallback, list<LoopCallback, boost::intrusive::constant_time_size<false>>
boost::intrusive::constant_time_size<false> > List; List;
// EventBase needs access to LoopCallbackList (and therefore to hook_) // EventBase needs access to LoopCallbackList (and therefore to hook_)
friend class EventBase; friend class EventBase;
...@@ -257,8 +256,8 @@ class EventBase : private boost::noncopyable, ...@@ -257,8 +256,8 @@ class EventBase : private boost::noncopyable,
/** /**
* Same as loop(), but doesn't wait for all keep-alive tokens to be released. * Same as loop(), but doesn't wait for all keep-alive tokens to be released.
*/ */
[[deprecated("This should only be used in legacy unit tests")]] [[deprecated("This should only be used in legacy unit tests")]] bool
bool loopIgnoreKeepAlive(); loopIgnoreKeepAlive();
/** /**
* Wait for some events to become active, run them, then return. * Wait for some events to become active, run them, then return.
...@@ -535,7 +534,9 @@ class EventBase : private boost::noncopyable, ...@@ -535,7 +534,9 @@ class EventBase : private boost::noncopyable,
// Avoid using these functions if possible. These functions are not // Avoid using these functions if possible. These functions are not
// guaranteed to always be present if we ever provide alternative EventBase // guaranteed to always be present if we ever provide alternative EventBase
// implementations that do not use libevent internally. // implementations that do not use libevent internally.
event_base* getLibeventBase() const { return evb_; } event_base* getLibeventBase() const {
return evb_;
}
static const char* getLibeventVersion(); static const char* getLibeventVersion();
static const char* getLibeventMethod(); static const char* getLibeventMethod();
...@@ -780,7 +781,8 @@ class EventBase : private boost::noncopyable, ...@@ -780,7 +781,8 @@ class EventBase : private boost::noncopyable,
// see EventBaseLocal // see EventBaseLocal
friend class detail::EventBaseLocalBase; friend class detail::EventBaseLocalBase;
template <typename T> friend class EventBaseLocal; template <typename T>
friend class EventBaseLocal;
std::unordered_map<uint64_t, std::shared_ptr<void>> localStorage_; std::unordered_map<uint64_t, std::shared_ptr<void>> localStorage_;
std::unordered_set<detail::EventBaseLocalBaseBase*> localStorageToDtor_; std::unordered_set<detail::EventBaseLocalBaseBase*> localStorageToDtor_;
......
...@@ -19,12 +19,13 @@ ...@@ -19,12 +19,13 @@
#include <atomic> #include <atomic>
#include <thread> #include <thread>
namespace folly { namespace detail { namespace folly {
namespace detail {
EventBaseLocalBase::~EventBaseLocalBase() { EventBaseLocalBase::~EventBaseLocalBase() {
auto locked = eventBases_.rlock(); auto locked = eventBases_.rlock();
for (auto* evb : *locked) { for (auto* evb : *locked) {
evb->runInEventBaseThread([ this, evb, key = key_ ] { evb->runInEventBaseThread([this, evb, key = key_] {
evb->localStorage_.erase(key); evb->localStorage_.erase(key);
evb->localStorageToDtor_.erase(this); evb->localStorageToDtor_.erase(this);
}); });
...@@ -55,8 +56,7 @@ void EventBaseLocalBase::onEventBaseDestruction(EventBase& evb) { ...@@ -55,8 +56,7 @@ void EventBaseLocalBase::onEventBaseDestruction(EventBase& evb) {
void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) { void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) {
evb.dcheckIsInEventBaseThread(); evb.dcheckIsInEventBaseThread();
auto alreadyExists = auto alreadyExists = evb.localStorage_.find(key_) != evb.localStorage_.end();
evb.localStorage_.find(key_) != evb.localStorage_.end();
evb.localStorage_.emplace(key_, std::move(ptr)); evb.localStorage_.emplace(key_, std::move(ptr));
......
...@@ -73,7 +73,7 @@ class EventBaseLocalBase : public EventBaseLocalBaseBase, boost::noncopyable { ...@@ -73,7 +73,7 @@ class EventBaseLocalBase : public EventBaseLocalBaseBase, boost::noncopyable {
template <typename T> template <typename T>
class EventBaseLocal : public detail::EventBaseLocalBase { class EventBaseLocal : public detail::EventBaseLocalBase {
public: public:
EventBaseLocal(): EventBaseLocalBase() {} EventBaseLocal() : EventBaseLocalBase() {}
T* get(EventBase& evb) { T* get(EventBase& evb) {
return static_cast<T*>(getVoid(evb)); return static_cast<T*>(getVoid(evb));
......
...@@ -34,18 +34,17 @@ EventBaseManager* EventBaseManager::get() { ...@@ -34,18 +34,17 @@ EventBaseManager* EventBaseManager::get() {
} else { } else {
return new_mgr; return new_mgr;
} }
} }
/* /*
* EventBaseManager methods * EventBaseManager methods
*/ */
void EventBaseManager::setEventBase(EventBase *eventBase, void EventBaseManager::setEventBase(EventBase* eventBase, bool takeOwnership) {
bool takeOwnership) { EventBaseInfo* info = localStore_.get();
EventBaseInfo *info = localStore_.get();
if (info != nullptr) { if (info != nullptr) {
throw std::runtime_error("EventBaseManager: cannot set a new EventBase " throw std::runtime_error(
"EventBaseManager: cannot set a new EventBase "
"for this thread when one already exists"); "for this thread when one already exists");
} }
...@@ -55,7 +54,7 @@ void EventBaseManager::setEventBase(EventBase *eventBase, ...@@ -55,7 +54,7 @@ void EventBaseManager::setEventBase(EventBase *eventBase,
} }
void EventBaseManager::clearEventBase() { void EventBaseManager::clearEventBase() {
EventBaseInfo *info = localStore_.get(); EventBaseInfo* info = localStore_.get();
if (info != nullptr) { if (info != nullptr) {
this->untrackEventBase(info->eventBase); this->untrackEventBase(info->eventBase);
this->localStore_.reset(nullptr); this->localStore_.reset(nullptr);
...@@ -63,10 +62,10 @@ void EventBaseManager::clearEventBase() { ...@@ -63,10 +62,10 @@ void EventBaseManager::clearEventBase() {
} }
// XXX should this really be "const"? // XXX should this really be "const"?
EventBase * EventBaseManager::getEventBase() const { EventBase* EventBaseManager::getEventBase() const {
// have one? // have one?
auto *info = localStore_.get(); auto* info = localStore_.get();
if (! info) { if (!info) {
info = new EventBaseInfo(); info = new EventBaseInfo();
localStore_.reset(info); localStore_.reset(info);
...@@ -81,7 +80,7 @@ EventBase * EventBaseManager::getEventBase() const { ...@@ -81,7 +80,7 @@ EventBase * EventBaseManager::getEventBase() const {
// Simply removing the const causes trouble all over fbcode; // Simply removing the const causes trouble all over fbcode;
// lots of services build a const EventBaseManager and errors // lots of services build a const EventBaseManager and errors
// abound when we make this non-const. // abound when we make this non-const.
(const_cast<EventBaseManager *>(this))->trackEventBase(info->eventBase); (const_cast<EventBaseManager*>(this))->trackEventBase(info->eventBase);
} }
return info->eventBase; return info->eventBase;
......
...@@ -37,15 +37,12 @@ class EventBaseManager { ...@@ -37,15 +37,12 @@ class EventBaseManager {
public: public:
// XXX Constructing a EventBaseManager directly is DEPRECATED and not // XXX Constructing a EventBaseManager directly is DEPRECATED and not
// encouraged. You should instead use the global singleton if possible. // encouraged. You should instead use the global singleton if possible.
EventBaseManager() { EventBaseManager() {}
}
~EventBaseManager() { ~EventBaseManager() {}
}
explicit EventBaseManager( explicit EventBaseManager(const std::shared_ptr<EventBaseObserver>& observer)
const std::shared_ptr<EventBaseObserver>& observer : observer_(observer) {}
) : observer_(observer) {}
/** /**
* Get the global EventBaseManager for this program. Ideally all users * Get the global EventBaseManager for this program. Ideally all users
...@@ -87,7 +84,7 @@ class EventBaseManager { ...@@ -87,7 +84,7 @@ class EventBaseManager {
* EventBase, to make sure the EventBaseManager points to the correct * EventBase, to make sure the EventBaseManager points to the correct
* EventBase that is actually running in this thread. * EventBase that is actually running in this thread.
*/ */
void setEventBase(EventBase *eventBase, bool takeOwnership); void setEventBase(EventBase* eventBase, bool takeOwnership);
/** /**
* Clear the EventBase for this thread. * Clear the EventBase for this thread.
...@@ -107,22 +104,17 @@ class EventBaseManager { ...@@ -107,22 +104,17 @@ class EventBaseManager {
// grab the mutex for the caller // grab the mutex for the caller
std::lock_guard<std::mutex> g(*&eventBaseSetMutex_); std::lock_guard<std::mutex> g(*&eventBaseSetMutex_);
// give them only a const set to work with // give them only a const set to work with
const std::set<EventBase *>& constSet = eventBaseSet_; const std::set<EventBase*>& constSet = eventBaseSet_;
runnable(constSet); runnable(constSet);
} }
private: private:
struct EventBaseInfo { struct EventBaseInfo {
EventBaseInfo(EventBase *evb, bool owned) EventBaseInfo(EventBase* evb, bool owned) : eventBase(evb), owned_(owned) {}
: eventBase(evb),
owned_(owned) {}
EventBaseInfo() EventBaseInfo() : eventBase(new EventBase), owned_(true) {}
: eventBase(new EventBase)
, owned_(true) {}
EventBase *eventBase; EventBase* eventBase;
bool owned_; bool owned_;
~EventBaseInfo() { ~EventBaseInfo() {
if (owned_) { if (owned_) {
...@@ -132,15 +124,15 @@ class EventBaseManager { ...@@ -132,15 +124,15 @@ class EventBaseManager {
}; };
// Forbidden copy constructor and assignment opererator // Forbidden copy constructor and assignment opererator
EventBaseManager(EventBaseManager const &); EventBaseManager(EventBaseManager const&);
EventBaseManager& operator=(EventBaseManager const &); EventBaseManager& operator=(EventBaseManager const&);
void trackEventBase(EventBase *evb) { void trackEventBase(EventBase* evb) {
std::lock_guard<std::mutex> g(*&eventBaseSetMutex_); std::lock_guard<std::mutex> g(*&eventBaseSetMutex_);
eventBaseSet_.insert(evb); eventBaseSet_.insert(evb);
} }
void untrackEventBase(EventBase *evb) { void untrackEventBase(EventBase* evb) {
std::lock_guard<std::mutex> g(*&eventBaseSetMutex_); std::lock_guard<std::mutex> g(*&eventBaseSetMutex_);
eventBaseSet_.erase(evb); eventBaseSet_.erase(evb);
} }
...@@ -150,7 +142,7 @@ class EventBaseManager { ...@@ -150,7 +142,7 @@ class EventBaseManager {
// set of "active" EventBase instances // set of "active" EventBase instances
// (also see the mutex "eventBaseSetMutex_" below // (also see the mutex "eventBaseSetMutex_" below
// which governs access to this). // which governs access to this).
mutable std::set<EventBase *> eventBaseSet_; mutable std::set<EventBase*> eventBaseSet_;
// a mutex to use as a guard for the above set // a mutex to use as a guard for the above set
std::mutex eventBaseSetMutex_; std::mutex eventBaseSetMutex_;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#if defined(__GLIBC__) && !defined(__APPLE__) #if defined(__GLIBC__) && !defined(__APPLE__)
#if __GLIBC_PREREQ(2, 9) #if __GLIBC_PREREQ(2, 9)
# define FOLLY_GLIBC_2_9 #define FOLLY_GLIBC_2_9
#endif #endif
#endif #endif
...@@ -59,19 +59,18 @@ ...@@ -59,19 +59,18 @@
#endif #endif
#endif #endif
enum enum {
{
EFD_SEMAPHORE = 1, EFD_SEMAPHORE = 1,
#define EFD_SEMAPHORE EFD_SEMAPHORE #define EFD_SEMAPHORE EFD_SEMAPHORE
EFD_CLOEXEC = 02000000, EFD_CLOEXEC = 02000000,
#define EFD_CLOEXEC EFD_CLOEXEC #define EFD_CLOEXEC EFD_CLOEXEC
EFD_NONBLOCK = 04000 EFD_NONBLOCK = 04000
#define EFD_NONBLOCK EFD_NONBLOCK #define EFD_NONBLOCK EFD_NONBLOCK
}; };
// http://www.kernel.org/doc/man-pages/online/pages/man2/eventfd.2.html // http://www.kernel.org/doc/man-pages/online/pages/man2/eventfd.2.html
// Use the eventfd2 system call, as in glibc 2.9+ // Use the eventfd2 system call, as in glibc 2.9+
// (requires kernel 2.6.30+) // (requires kernel 2.6.30+)
#define eventfd(initval, flags) syscall(__NR_eventfd2,(initval),(flags)) #define eventfd(initval, flags) syscall(__NR_eventfd2, (initval), (flags))
#endif /* !(defined(__GLIBC__) && __GLIBC_PREREQ(2, 9)) */ #endif /* !(defined(__GLIBC__) && __GLIBC_PREREQ(2, 9)) */
...@@ -22,24 +22,26 @@ ...@@ -22,24 +22,26 @@
namespace folly { namespace folly {
# if LIBEVENT_VERSION_NUMBER <= 0x02010101 #if LIBEVENT_VERSION_NUMBER <= 0x02010101
# define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_##name #define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_##name
# else #else
# define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_evcallback.evcb_##name #define FOLLY_LIBEVENT_COMPAT_PLUCK(name) ev_evcallback.evcb_##name
# endif #endif
# define FOLLY_LIBEVENT_DEF_ACCESSORS(name) \ #define FOLLY_LIBEVENT_DEF_ACCESSORS(name) \
inline auto event_ref_##name(struct event* ev) -> \ inline auto event_ref_##name(struct event* ev) \
decltype(std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) \ ->decltype(std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) { \
{ return std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); } \ return std::ref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); \
inline auto event_ref_##name(struct event const* ev) -> \ } \
decltype(std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) \ inline auto event_ref_##name(struct event const* ev) \
{ return std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); } \ ->decltype(std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name))) { \
return std::cref(ev->FOLLY_LIBEVENT_COMPAT_PLUCK(name)); \
} \
// //
FOLLY_LIBEVENT_DEF_ACCESSORS(flags) FOLLY_LIBEVENT_DEF_ACCESSORS(flags)
# undef FOLLY_LIBEVENT_COMPAT_PLUCK #undef FOLLY_LIBEVENT_COMPAT_PLUCK
# undef FOLLY_LIBEVENT_DEF_ACCESSORS #undef FOLLY_LIBEVENT_DEF_ACCESSORS
/** /**
* low-level libevent utility functions * low-level libevent utility functions
...@@ -49,8 +51,8 @@ class EventUtil { ...@@ -49,8 +51,8 @@ class EventUtil {
static bool isEventRegistered(const struct event* ev) { static bool isEventRegistered(const struct event* ev) {
// If any of these flags are set, the event is registered. // If any of these flags are set, the event is registered.
enum { enum {
EVLIST_REGISTERED = (EVLIST_INSERTED | EVLIST_ACTIVE | EVLIST_REGISTERED =
EVLIST_TIMEOUT | EVLIST_SIGNAL) (EVLIST_INSERTED | EVLIST_ACTIVE | EVLIST_TIMEOUT | EVLIST_SIGNAL)
}; };
return (event_ref_flags(ev) & EVLIST_REGISTERED); return (event_ref_flags(ev) & EVLIST_REGISTERED);
} }
......
...@@ -50,7 +50,8 @@ HHWheelTimer::Callback::~Callback() { ...@@ -50,7 +50,8 @@ HHWheelTimer::Callback::~Callback() {
} }
} }
void HHWheelTimer::Callback::setScheduled(HHWheelTimer* wheel, void HHWheelTimer::Callback::setScheduled(
HHWheelTimer* wheel,
std::chrono::milliseconds timeout) { std::chrono::milliseconds timeout) {
assert(wheel_ == nullptr); assert(wheel_ == nullptr);
assert(expiration_ == decltype(expiration_){}); assert(expiration_ == decltype(expiration_){});
...@@ -108,7 +109,8 @@ HHWheelTimer::~HHWheelTimer() { ...@@ -108,7 +109,8 @@ HHWheelTimer::~HHWheelTimer() {
cancelAll(); cancelAll();
} }
void HHWheelTimer::scheduleTimeoutImpl(Callback* callback, void HHWheelTimer::scheduleTimeoutImpl(
Callback* callback,
std::chrono::milliseconds timeout) { std::chrono::milliseconds timeout) {
auto nextTick = calcNextTick(); auto nextTick = calcNextTick();
int64_t due = timeToWheelTicks(timeout) + nextTick; int64_t due = timeToWheelTicks(timeout) + nextTick;
...@@ -140,7 +142,8 @@ void HHWheelTimer::scheduleTimeoutImpl(Callback* callback, ...@@ -140,7 +142,8 @@ void HHWheelTimer::scheduleTimeoutImpl(Callback* callback,
list->push_back(*callback); list->push_back(*callback);
} }
void HHWheelTimer::scheduleTimeout(Callback* callback, void HHWheelTimer::scheduleTimeout(
Callback* callback,
std::chrono::milliseconds timeout) { std::chrono::milliseconds timeout) {
// Cancel the callback if it happens to be scheduled already. // Cancel the callback if it happens to be scheduled already.
callback->cancelTimeout(); callback->cancelTimeout();
......
...@@ -133,17 +133,16 @@ class HHWheelTimer : private folly::AsyncTimeout, ...@@ -133,17 +133,16 @@ class HHWheelTimer : private folly::AsyncTimeout,
expiration_ - now); expiration_ - now);
} }
void setScheduled(HHWheelTimer* wheel, void setScheduled(HHWheelTimer* wheel, std::chrono::milliseconds);
std::chrono::milliseconds);
void cancelTimeoutImpl(); void cancelTimeoutImpl();
HHWheelTimer* wheel_{nullptr}; HHWheelTimer* wheel_{nullptr};
std::chrono::steady_clock::time_point expiration_{}; std::chrono::steady_clock::time_point expiration_{};
int bucket_{-1}; int bucket_{-1};
typedef boost::intrusive::list< typedef boost::intrusive::
Callback, list<Callback, boost::intrusive::constant_time_size<false>>
boost::intrusive::constant_time_size<false> > List; List;
std::shared_ptr<RequestContext> context_; std::shared_ptr<RequestContext> context_;
...@@ -209,9 +208,9 @@ class HHWheelTimer : private folly::AsyncTimeout, ...@@ -209,9 +208,9 @@ class HHWheelTimer : private folly::AsyncTimeout,
* If the callback is already scheduled, this cancels the existing timeout * If the callback is already scheduled, this cancels the existing timeout
* before scheduling the new timeout. * before scheduling the new timeout.
*/ */
void scheduleTimeout(Callback* callback, void scheduleTimeout(Callback* callback, std::chrono::milliseconds timeout);
std::chrono::milliseconds timeout); void scheduleTimeoutImpl(
void scheduleTimeoutImpl(Callback* callback, Callback* callback,
std::chrono::milliseconds timeout); std::chrono::milliseconds timeout);
/** /**
...@@ -273,8 +272,8 @@ class HHWheelTimer : private folly::AsyncTimeout, ...@@ -273,8 +272,8 @@ class HHWheelTimer : private folly::AsyncTimeout,
private: private:
// Forbidden copy constructor and assignment operator // Forbidden copy constructor and assignment operator
HHWheelTimer(HHWheelTimer const &) = delete; HHWheelTimer(HHWheelTimer const&) = delete;
HHWheelTimer& operator=(HHWheelTimer const &) = delete; HHWheelTimer& operator=(HHWheelTimer const&) = delete;
// Methods inherited from AsyncTimeout // Methods inherited from AsyncTimeout
void timeoutExpired() noexcept override; void timeoutExpired() noexcept override;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -35,10 +35,7 @@ class TimeoutManager { ...@@ -35,10 +35,7 @@ class TimeoutManager {
typedef std::chrono::milliseconds timeout_type; typedef std::chrono::milliseconds timeout_type;
using Func = folly::Function<void()>; using Func = folly::Function<void()>;
enum class InternalEnum { enum class InternalEnum { INTERNAL, NORMAL };
INTERNAL,
NORMAL
};
TimeoutManager(); TimeoutManager();
...@@ -47,15 +44,15 @@ class TimeoutManager { ...@@ -47,15 +44,15 @@ class TimeoutManager {
/** /**
* Attaches/detaches TimeoutManager to AsyncTimeout * Attaches/detaches TimeoutManager to AsyncTimeout
*/ */
virtual void attachTimeoutManager(AsyncTimeout* obj, virtual void attachTimeoutManager(
AsyncTimeout* obj,
InternalEnum internal) = 0; InternalEnum internal) = 0;
virtual void detachTimeoutManager(AsyncTimeout* obj) = 0; virtual void detachTimeoutManager(AsyncTimeout* obj) = 0;
/** /**
* Schedules AsyncTimeout to fire after `timeout` milliseconds * Schedules AsyncTimeout to fire after `timeout` milliseconds
*/ */
virtual bool scheduleTimeout(AsyncTimeout* obj, virtual bool scheduleTimeout(AsyncTimeout* obj, timeout_type timeout) = 0;
timeout_type timeout) = 0;
/** /**
* Cancels the AsyncTimeout, if scheduled * Cancels the AsyncTimeout, if scheduled
......
...@@ -25,8 +25,8 @@ namespace folly { ...@@ -25,8 +25,8 @@ namespace folly {
* Helper class that redirects write() and writev() calls to writeChain(). * Helper class that redirects write() and writev() calls to writeChain().
*/ */
template <class T> template <class T>
class WriteChainAsyncTransportWrapper : class WriteChainAsyncTransportWrapper
public DecoratedAsyncTransportWrapper<T> { : public DecoratedAsyncTransportWrapper<T> {
public: public:
using DecoratedAsyncTransportWrapper<T>::DecoratedAsyncTransportWrapper; using DecoratedAsyncTransportWrapper<T>::DecoratedAsyncTransportWrapper;
......
...@@ -71,7 +71,8 @@ bool OpenSSLUtils::getTLSClientRandom( ...@@ -71,7 +71,8 @@ bool OpenSSLUtils::getTLSClientRandom(
return false; return false;
} }
bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx, bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(
X509_STORE_CTX* ctx,
sockaddr_storage* addrStorage, sockaddr_storage* addrStorage,
socklen_t* addrLen) { socklen_t* addrLen) {
// Grab the ssl idx and then the ssl object so that we can get the peer // Grab the ssl idx and then the ssl object so that we can get the peer
...@@ -93,7 +94,8 @@ bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx, ...@@ -93,7 +94,8 @@ bool OpenSSLUtils::getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx,
return true; return true;
} }
bool OpenSSLUtils::validatePeerCertNames(X509* cert, bool OpenSSLUtils::validatePeerCertNames(
X509* cert,
const sockaddr* addr, const sockaddr* addr,
socklen_t /* addrLen */) { socklen_t /* addrLen */) {
// Try to extract the names within the SAN extension from the certificate // Try to extract the names within the SAN extension from the certificate
......
...@@ -62,9 +62,8 @@ class OpenSSLUtils { ...@@ -62,9 +62,8 @@ class OpenSSLUtils {
*/ */
// TODO(agartrell): Add support for things like common name when // TODO(agartrell): Add support for things like common name when
// necessary. // necessary.
static bool validatePeerCertNames(X509* cert, static bool
const sockaddr* addr, validatePeerCertNames(X509* cert, const sockaddr* addr, socklen_t addrLen);
socklen_t addrLen);
/** /**
* Get the peer socket address from an X509_STORE_CTX*. Unlike the * Get the peer socket address from an X509_STORE_CTX*. Unlike the
...@@ -76,7 +75,8 @@ class OpenSSLUtils { ...@@ -76,7 +75,8 @@ class OpenSSLUtils {
* @param addrLen out param for length of address * @param addrLen out param for length of address
* @return true on success, false on failure * @return true on success, false on failure
*/ */
static bool getPeerAddressFromX509StoreCtx(X509_STORE_CTX* ctx, static bool getPeerAddressFromX509StoreCtx(
X509_STORE_CTX* ctx,
sockaddr_storage* addrStorage, sockaddr_storage* addrStorage,
socklen_t* addrLen); socklen_t* addrLen);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment