Commit 81823a9c authored by Tudor Bosman's avatar Tudor Bosman Committed by Dave Watson

Make IOBuf support 64-bit length and capacity

Summary:
Remove type_ (unused), pack flags in least significant bits of sharedInfo_.
sizeof(IOBuf) remains 56 bytes.

Test Plan: folly/io/test with -opt and -dbg; iobuf*_test with asan as well.

Reviewed By: simpkins@fb.com

FB internal diff: D1179993
parent ce4efaad
...@@ -575,7 +575,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor; ...@@ -575,7 +575,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor;
*/ */
class Appender : public detail::Writable<Appender> { class Appender : public detail::Writable<Appender> {
public: public:
Appender(IOBuf* buf, uint32_t growth) Appender(IOBuf* buf, uint64_t growth)
: buffer_(buf), : buffer_(buf),
crtBuf_(buf->prev()), crtBuf_(buf->prev()),
growth_(growth) { growth_(growth) {
...@@ -601,7 +601,7 @@ class Appender : public detail::Writable<Appender> { ...@@ -601,7 +601,7 @@ class Appender : public detail::Writable<Appender> {
* Ensure at least n contiguous bytes available to write. * Ensure at least n contiguous bytes available to write.
* Postcondition: length() >= n. * Postcondition: length() >= n.
*/ */
void ensure(uint32_t n) { void ensure(uint64_t n) {
if (LIKELY(length() >= n)) { if (LIKELY(length() >= n)) {
return; return;
} }
...@@ -653,7 +653,7 @@ class Appender : public detail::Writable<Appender> { ...@@ -653,7 +653,7 @@ class Appender : public detail::Writable<Appender> {
IOBuf* buffer_; IOBuf* buffer_;
IOBuf* crtBuf_; IOBuf* crtBuf_;
uint32_t growth_; uint64_t growth_;
}; };
class QueueAppender : public detail::Writable<QueueAppender> { class QueueAppender : public detail::Writable<QueueAppender> {
...@@ -663,11 +663,11 @@ class QueueAppender : public detail::Writable<QueueAppender> { ...@@ -663,11 +663,11 @@ class QueueAppender : public detail::Writable<QueueAppender> {
* space in the queue, we grow no more than growth bytes at once * space in the queue, we grow no more than growth bytes at once
* (unless you call ensure() with a bigger value yourself). * (unless you call ensure() with a bigger value yourself).
*/ */
QueueAppender(IOBufQueue* queue, uint32_t growth) { QueueAppender(IOBufQueue* queue, uint64_t growth) {
reset(queue, growth); reset(queue, growth);
} }
void reset(IOBufQueue* queue, uint32_t growth) { void reset(IOBufQueue* queue, uint64_t growth) {
queue_ = queue; queue_ = queue;
growth_ = growth; growth_ = growth;
} }
...@@ -682,7 +682,7 @@ class QueueAppender : public detail::Writable<QueueAppender> { ...@@ -682,7 +682,7 @@ class QueueAppender : public detail::Writable<QueueAppender> {
// Ensure at least n contiguous; can go above growth_, throws if // Ensure at least n contiguous; can go above growth_, throws if
// not enough room. // not enough room.
void ensure(uint32_t n) { queue_->preallocate(n, growth_); } void ensure(uint64_t n) { queue_->preallocate(n, growth_); }
template <class T> template <class T>
typename std::enable_if<std::is_integral<T>::value>::type typename std::enable_if<std::is_integral<T>::value>::type
......
This diff is collapsed.
This diff is collapsed.
...@@ -29,7 +29,7 @@ namespace { ...@@ -29,7 +29,7 @@ namespace {
using folly::IOBuf; using folly::IOBuf;
const size_t MIN_ALLOC_SIZE = 2000; const size_t MIN_ALLOC_SIZE = 2000;
const size_t MAX_ALLOC_SIZE = 8000; // Must fit within a uint32_t const size_t MAX_ALLOC_SIZE = 8000;
const size_t MAX_PACK_COPY = 4096; const size_t MAX_PACK_COPY = 4096;
/** /**
...@@ -46,7 +46,7 @@ appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) { ...@@ -46,7 +46,7 @@ appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
// reduce wastage (the tail's tailroom and the head's headroom) when // reduce wastage (the tail's tailroom and the head's headroom) when
// joining two IOBufQueues together. // joining two IOBufQueues together.
size_t copyRemaining = MAX_PACK_COPY; size_t copyRemaining = MAX_PACK_COPY;
uint32_t n; uint64_t n;
while (src && while (src &&
(n = src->length()) < copyRemaining && (n = src->length()) < copyRemaining &&
n < tail->tailroom()) { n < tail->tailroom()) {
...@@ -88,7 +88,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) { ...@@ -88,7 +88,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
return *this; return *this;
} }
std::pair<void*, uint32_t> std::pair<void*, uint64_t>
IOBufQueue::headroom() { IOBufQueue::headroom() {
if (head_) { if (head_) {
return std::make_pair(head_->writableBuffer(), head_->headroom()); return std::make_pair(head_->writableBuffer(), head_->headroom());
...@@ -98,7 +98,7 @@ IOBufQueue::headroom() { ...@@ -98,7 +98,7 @@ IOBufQueue::headroom() {
} }
void void
IOBufQueue::markPrepended(uint32_t n) { IOBufQueue::markPrepended(uint64_t n) {
if (n == 0) { if (n == 0) {
return; return;
} }
...@@ -108,7 +108,7 @@ IOBufQueue::markPrepended(uint32_t n) { ...@@ -108,7 +108,7 @@ IOBufQueue::markPrepended(uint32_t n) {
} }
void void
IOBufQueue::prepend(const void* buf, uint32_t n) { IOBufQueue::prepend(const void* buf, uint64_t n) {
auto p = headroom(); auto p = headroom();
if (n > p.second) { if (n > p.second) {
throw std::overflow_error("Not enough room to prepend"); throw std::overflow_error("Not enough room to prepend");
...@@ -156,7 +156,7 @@ IOBufQueue::append(const void* buf, size_t len) { ...@@ -156,7 +156,7 @@ IOBufQueue::append(const void* buf, size_t len) {
false); false);
} }
IOBuf* last = head_->prev(); IOBuf* last = head_->prev();
uint32_t copyLen = std::min(len, (size_t)last->tailroom()); uint64_t copyLen = std::min(len, (size_t)last->tailroom());
memcpy(last->writableTail(), src, copyLen); memcpy(last->writableTail(), src, copyLen);
src += copyLen; src += copyLen;
last->append(copyLen); last->append(copyLen);
...@@ -166,7 +166,7 @@ IOBufQueue::append(const void* buf, size_t len) { ...@@ -166,7 +166,7 @@ IOBufQueue::append(const void* buf, size_t len) {
} }
void void
IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) { IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
auto src = static_cast<const uint8_t*>(buf); auto src = static_cast<const uint8_t*>(buf);
while (len != 0) { while (len != 0) {
size_t n = std::min(len, size_t(blockSize)); size_t n = std::min(len, size_t(blockSize));
...@@ -176,9 +176,9 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) { ...@@ -176,9 +176,9 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) {
} }
} }
pair<void*,uint32_t> pair<void*,uint64_t>
IOBufQueue::preallocateSlow(uint32_t min, uint32_t newAllocationSize, IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize,
uint32_t max) { uint64_t max) {
// Allocate a new buffer of the requested max size. // Allocate a new buffer of the requested max size.
unique_ptr<IOBuf> newBuf(IOBuf::create(std::max(min, newAllocationSize))); unique_ptr<IOBuf> newBuf(IOBuf::create(std::max(min, newAllocationSize)));
appendToChain(head_, std::move(newBuf), false); appendToChain(head_, std::move(newBuf), false);
......
...@@ -54,18 +54,18 @@ class IOBufQueue { ...@@ -54,18 +54,18 @@ class IOBufQueue {
/** /**
* Return a space to prepend bytes and the amount of headroom available. * Return a space to prepend bytes and the amount of headroom available.
*/ */
std::pair<void*, uint32_t> headroom(); std::pair<void*, uint64_t> headroom();
/** /**
* Indicate that n bytes from the headroom have been used. * Indicate that n bytes from the headroom have been used.
*/ */
void markPrepended(uint32_t n); void markPrepended(uint64_t n);
/** /**
* Prepend an existing range; throws std::overflow_error if not enough * Prepend an existing range; throws std::overflow_error if not enough
* room. * room.
*/ */
void prepend(const void* buf, uint32_t n); void prepend(const void* buf, uint64_t n);
/** /**
* Add a buffer or buffer chain to the end of this queue. The * Add a buffer or buffer chain to the end of this queue. The
...@@ -115,7 +115,7 @@ class IOBufQueue { ...@@ -115,7 +115,7 @@ class IOBufQueue {
* Importantly, this method may be used to wrap buffers larger than 4GB. * Importantly, this method may be used to wrap buffers larger than 4GB.
*/ */
void wrapBuffer(const void* buf, size_t len, void wrapBuffer(const void* buf, size_t len,
uint32_t blockSize=(1U << 31)); // default block size: 2GB uint64_t blockSize=(1U << 31)); // default block size: 2GB
/** /**
* Obtain a writable block of contiguous bytes at the end of this * Obtain a writable block of contiguous bytes at the end of this
...@@ -137,9 +137,9 @@ class IOBufQueue { ...@@ -137,9 +137,9 @@ class IOBufQueue {
* callback, tell the application how much of the buffer they've * callback, tell the application how much of the buffer they've
* filled with data. * filled with data.
*/ */
std::pair<void*,uint32_t> preallocate( std::pair<void*,uint64_t> preallocate(
uint32_t min, uint32_t newAllocationSize, uint64_t min, uint64_t newAllocationSize,
uint32_t max = std::numeric_limits<uint32_t>::max()) { uint64_t max = std::numeric_limits<uint64_t>::max()) {
auto buf = tailBuf(); auto buf = tailBuf();
if (LIKELY(buf && buf->tailroom() >= min)) { if (LIKELY(buf && buf->tailroom() >= min)) {
return std::make_pair(buf->writableTail(), return std::make_pair(buf->writableTail(),
...@@ -159,7 +159,7 @@ class IOBufQueue { ...@@ -159,7 +159,7 @@ class IOBufQueue {
* invoke any other non-const methods on this IOBufQueue between * invoke any other non-const methods on this IOBufQueue between
* the call to preallocate and the call to postallocate(). * the call to preallocate and the call to postallocate().
*/ */
void postallocate(uint32_t n) { void postallocate(uint64_t n) {
head_->prev()->append(n); head_->prev()->append(n);
chainLength_ += n; chainLength_ += n;
} }
...@@ -168,7 +168,7 @@ class IOBufQueue { ...@@ -168,7 +168,7 @@ class IOBufQueue {
* Obtain a writable block of n contiguous bytes, allocating more space * Obtain a writable block of n contiguous bytes, allocating more space
* if necessary, and mark it as used. The caller can fill it later. * if necessary, and mark it as used. The caller can fill it later.
*/ */
void* allocate(uint32_t n) { void* allocate(uint64_t n) {
void* p = preallocate(n, n).first; void* p = preallocate(n, n).first;
postallocate(n); postallocate(n);
return p; return p;
...@@ -271,8 +271,8 @@ class IOBufQueue { ...@@ -271,8 +271,8 @@ class IOBufQueue {
IOBuf* buf = head_->prev(); IOBuf* buf = head_->prev();
return LIKELY(!buf->isSharedOne()) ? buf : nullptr; return LIKELY(!buf->isSharedOne()) ? buf : nullptr;
} }
std::pair<void*,uint32_t> preallocateSlow( std::pair<void*,uint64_t> preallocateSlow(
uint32_t min, uint32_t newAllocationSize, uint32_t max); uint64_t min, uint64_t newAllocationSize, uint64_t max);
static const size_t kChainLengthNotCached = (size_t)-1; static const size_t kChainLengthNotCached = (size_t)-1;
/** Not copyable */ /** Not copyable */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment