Commit 60da8ef5 authored by Dan Melnic's avatar Dan Melnic Committed by Facebook Github Bot

Add iouring folly support, refactor the async IO

Summary: Add iouring folly support, refactor the async IO

Reviewed By: kevin-vigor

Differential Revision: D17834511

fbshipit-source-id: e20c876a32730549f305334fd5eed02cccf23638
parent b35bea8f
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
find_path(LIBURING_INCLUDE_DIR NAMES liburing.h)
mark_as_advanced(LIBURING_INCLUDE_DIR)
find_library(LIBURING_LIBRARY NAMES uring)
mark_as_advanced(LIBURING_LIBRARY)
include(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(
LIBURING
REQUIRED_VARS LIBURING_LIBRARY LIBURING_INCLUDE_DIR)
if(LIBURING_FOUND)
set(LIBURING_LIBRARIES ${LIBURING_LIBRARY})
set(LIBURING_INCLUDE_DIRS ${LIBURING_INCLUDE_DIR})
endif()
......@@ -113,6 +113,10 @@ find_package(LibAIO)
list(APPEND FOLLY_LINK_LIBRARIES ${LIBAIO_LIBRARIES})
list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBAIO_INCLUDE_DIRS})
find_package(LibUring)
list(APPEND FOLLY_LINK_LIBRARIES ${LIBURING_LIBRARIES})
list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBURING_INCLUDE_DIRS})
find_package(Libsodium)
list(APPEND FOLLY_LINK_LIBRARIES ${LIBSODIUM_LIBRARIES})
list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBSODIUM_INCLUDE_DIRS})
......
......@@ -203,6 +203,23 @@ if (NOT ${LIBAIO_FOUND})
${FOLLY_DIR}/experimental/io/AsyncIO.h
)
endif()
if (NOT ${LIBURING_FOUND})
list(REMOVE_ITEM files
${FOLLY_DIR}/experimental/io/IoUring.cpp
)
list(REMOVE_ITEM hfiles
${FOLLY_DIR}/experimental/io/IoUring.h
)
endif()
if (NOT ${LIBAIO_FOUND} AND NOT ${LIBURING_FOUND})
list(REMOVE_ITEM files
${FOLLY_DIR}/experimental/io/AsyncBase.cpp
)
list(REMOVE_ITEM hfiles
${FOLLY_DIR}/experimental/io/AsyncBase.h
)
endif()
if (${LIBSODIUM_FOUND})
string(FIND "${CMAKE_LIBRARY_ARCHITECTURE}" "x86_64" IS_X86_64_ARCH)
if (${IS_X86_64_ARCH} STREQUAL "-1")
......
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/io/AsyncBase.h>
#include <sys/eventfd.h>
#include <cerrno>
#include <ostream>
#include <stdexcept>
#include <string>
#include <boost/intrusive/parent_from_member.hpp>
#include <glog/logging.h>
#include <folly/Exception.h>
#include <folly/Format.h>
#include <folly/Likely.h>
#include <folly/String.h>
#include <folly/portability/Unistd.h>
namespace folly {
AsyncBaseOp::AsyncBaseOp(NotificationCallback cb)
: cb_(std::move(cb)), state_(State::UNINITIALIZED), result_(-EINVAL) {}
void AsyncBaseOp::reset(NotificationCallback cb) {
CHECK_NE(state_, State::PENDING);
cb_ = std::move(cb);
state_ = State::UNINITIALIZED;
result_ = -EINVAL;
}
AsyncBaseOp::~AsyncBaseOp() {
CHECK_NE(state_, State::PENDING);
}
void AsyncBaseOp::start() {
DCHECK_EQ(state_, State::INITIALIZED);
state_ = State::PENDING;
}
void AsyncBaseOp::complete(ssize_t result) {
DCHECK_EQ(state_, State::PENDING);
state_ = State::COMPLETED;
result_ = result;
if (cb_) {
cb_(this);
}
}
void AsyncBaseOp::cancel() {
DCHECK_EQ(state_, State::PENDING);
state_ = State::CANCELED;
}
ssize_t AsyncBaseOp::result() const {
CHECK_EQ(state_, State::COMPLETED);
return result_;
}
void AsyncBaseOp::init() {
CHECK_EQ(state_, State::UNINITIALIZED);
state_ = State::INITIALIZED;
}
std::string AsyncBaseOp::fd2name(int fd) {
std::string path = folly::to<std::string>("/proc/self/fd/", fd);
char link[PATH_MAX];
const ssize_t length =
std::max<ssize_t>(readlink(path.c_str(), link, PATH_MAX), 0);
return path.assign(link, length);
}
AsyncBase::AsyncBase(size_t capacity, PollMode pollMode) : capacity_(capacity) {
CHECK_GT(capacity_, 0);
completed_.reserve(capacity_);
if (pollMode == POLLABLE) {
pollFd_ = eventfd(0, EFD_NONBLOCK);
checkUnixError(pollFd_, "AsyncBase: eventfd creation failed");
}
}
AsyncBase::~AsyncBase() {
CHECK_EQ(pending_, 0);
if (pollFd_ != -1) {
CHECK_ERR(close(pollFd_));
}
}
void AsyncBase::decrementPending() {
auto p =
pending_.fetch_add(static_cast<size_t>(-1), std::memory_order_acq_rel);
DCHECK_GE(p, 1);
}
void AsyncBase::submit(Op* op) {
CHECK_EQ(op->state(), Op::State::INITIALIZED);
initializeContext(); // on demand
// We can increment past capacity, but we'll clean up after ourselves.
auto p = pending_.fetch_add(1, std::memory_order_acq_rel);
if (p >= capacity_) {
decrementPending();
throw std::range_error("AsyncBase: too many pending requests");
}
int rc = submitOne(op);
if (rc < 0) {
decrementPending();
throwSystemErrorExplicit(-rc, "AsyncBase: io_submit failed");
}
submitted_++;
DCHECK_EQ(rc, 1);
op->start();
}
Range<AsyncBase::Op**> AsyncBase::wait(size_t minRequests) {
CHECK(isInit());
CHECK_EQ(pollFd_, -1) << "wait() only allowed on non-pollable object";
auto p = pending_.load(std::memory_order_acquire);
CHECK_LE(minRequests, p);
return doWait(WaitType::COMPLETE, minRequests, p, completed_);
}
Range<AsyncBase::Op**> AsyncBase::cancel() {
CHECK(isInit());
auto p = pending_.load(std::memory_order_acquire);
return doWait(WaitType::CANCEL, p, p, canceled_);
}
Range<AsyncBase::Op**> AsyncBase::pollCompleted() {
CHECK(isInit());
CHECK_NE(pollFd_, -1) << "pollCompleted() only allowed on pollable object";
uint64_t numEvents;
// This sets the eventFd counter to 0, see
// http://www.kernel.org/doc/man-pages/online/pages/man2/eventfd.2.html
ssize_t rc;
do {
rc = ::read(pollFd_, &numEvents, 8);
} while (rc == -1 && errno == EINTR);
if (UNLIKELY(rc == -1 && errno == EAGAIN)) {
return Range<Op**>(); // nothing completed
}
checkUnixError(rc, "AsyncBase: read from event fd failed");
DCHECK_EQ(rc, 8);
DCHECK_GT(numEvents, 0);
DCHECK_LE(numEvents, pending_);
// Don't reap more than numEvents, as we've just reset the counter to 0.
return doWait(WaitType::COMPLETE, numEvents, numEvents, completed_);
}
AsyncBaseQueue::AsyncBaseQueue(AsyncBase* asyncBase) : asyncBase_(asyncBase) {}
AsyncBaseQueue::~AsyncBaseQueue() {
CHECK_EQ(asyncBase_->pending(), 0);
}
void AsyncBaseQueue::submit(AsyncBaseOp* op) {
submit([op]() { return op; });
}
void AsyncBaseQueue::submit(OpFactory op) {
queue_.push_back(op);
maybeDequeue();
}
void AsyncBaseQueue::onCompleted(AsyncBaseOp* /* op */) {
maybeDequeue();
}
void AsyncBaseQueue::maybeDequeue() {
while (!queue_.empty() && asyncBase_->pending() < asyncBase_->capacity()) {
auto& opFactory = queue_.front();
auto op = opFactory();
queue_.pop_front();
// Interpose our completion callback
auto& nextCb = op->notificationCallback();
op->setNotificationCallback([this, nextCb](AsyncBaseOp* op2) {
this->onCompleted(op2);
if (nextCb) {
nextCb(op2);
}
});
asyncBase_->submit(op);
}
}
// debugging helpers:
namespace {
#define X(c) \
case c: \
return #c
const char* asyncIoOpStateToString(AsyncBaseOp::State state) {
switch (state) {
X(AsyncBaseOp::State::UNINITIALIZED);
X(AsyncBaseOp::State::INITIALIZED);
X(AsyncBaseOp::State::PENDING);
X(AsyncBaseOp::State::COMPLETED);
X(AsyncBaseOp::State::CANCELED);
}
return "<INVALID AsyncBaseOp::State>";
}
#undef X
} // namespace
std::ostream& operator<<(std::ostream& os, const AsyncBaseOp& op) {
op.toStream(os);
return os;
}
std::ostream& operator<<(std::ostream& os, AsyncBaseOp::State state) {
return os << asyncIoOpStateToString(state);
}
} // namespace folly
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <atomic>
#include <cstdint>
#include <deque>
#include <functional>
#include <iosfwd>
#include <mutex>
#include <utility>
#include <vector>
#include <folly/Portability.h>
#include <folly/Range.h>
#include <folly/portability/SysUio.h>
namespace folly {
class AsyncIOOp;
class IoUringOp;
/**
* An AsyncBaseOp represents a pending operation. You may set a notification
* callback or you may use this class's methods directly.
*
* The op must remain allocated until it is completed or canceled.
*/
class AsyncBaseOp {
friend class AsyncBase;
public:
using NotificationCallback = std::function<void(AsyncBaseOp*)>;
explicit AsyncBaseOp(NotificationCallback cb = NotificationCallback());
AsyncBaseOp(const AsyncBaseOp&) = delete;
AsyncBaseOp& operator=(const AsyncBaseOp&) = delete;
virtual ~AsyncBaseOp();
enum class State {
UNINITIALIZED,
INITIALIZED,
PENDING,
COMPLETED,
CANCELED,
};
/**
* Initiate a read request.
*/
virtual void pread(int fd, void* buf, size_t size, off_t start) = 0;
void pread(int fd, Range<unsigned char*> range, off_t start) {
pread(fd, range.begin(), range.size(), start);
}
virtual void preadv(int fd, const iovec* iov, int iovcnt, off_t start) = 0;
/**
* Initiate a write request.
*/
virtual void pwrite(int fd, const void* buf, size_t size, off_t start) = 0;
void pwrite(int fd, Range<const unsigned char*> range, off_t start) {
pwrite(fd, range.begin(), range.size(), start);
}
virtual void pwritev(int fd, const iovec* iov, int iovcnt, off_t start) = 0;
// we support only these subclasses
virtual AsyncIOOp* getAsyncIOOp() = 0;
virtual IoUringOp* getIoUringOp() = 0;
// ostream output
virtual void toStream(std::ostream& os) const = 0;
/**
* Return the current operation state.
*/
State state() const {
return state_;
}
/**
* user data get/set
*/
void* getUserData() const {
return userData_;
}
void setUserData(void* userData) {
userData_ = userData;
}
/**
* Reset the operation for reuse. It is an error to call reset() on
* an Op that is still pending.
*/
virtual void reset(NotificationCallback cb = NotificationCallback()) = 0;
void setNotificationCallback(NotificationCallback cb) {
cb_ = std::move(cb);
}
const NotificationCallback& notificationCallback() const {
return cb_;
}
/**
* Retrieve the result of this operation. Returns >=0 on success,
* -errno on failure (that is, using the Linux kernel error reporting
* conventions). Use checkKernelError (folly/Exception.h) on the result to
* throw a std::system_error in case of error instead.
*
* It is an error to call this if the Op hasn't completed.
*/
ssize_t result() const;
// debug helper
static std::string fd2name(int fd);
protected:
void init();
void start();
void complete(ssize_t result);
void cancel();
NotificationCallback cb_;
State state_;
ssize_t result_;
void* userData_{nullptr};
};
std::ostream& operator<<(std::ostream& stream, const AsyncBaseOp& op);
std::ostream& operator<<(std::ostream& stream, AsyncBaseOp::State state);
/**
* Generic C++ interface around Linux IO(io_submit, io_uring)
*/
class AsyncBase {
public:
using Op = AsyncBaseOp;
enum PollMode {
NOT_POLLABLE,
POLLABLE,
};
/**
* Create an AsyncBase context capable of holding at most 'capacity' pending
* requests at the same time. As requests complete, others can be scheduled,
* as long as this limit is not exceeded.
*
* If pollMode is POLLABLE, pollFd() will return a file descriptor that
* can be passed to poll / epoll / select and will become readable when
* any IOs on this AsyncBase have completed. If you do this, you must use
* pollCompleted() instead of wait() -- do not read from the pollFd()
* file descriptor directly.
*
* You may use the same AsyncBase object from multiple threads, as long as
* there is only one concurrent caller of wait() / pollCompleted() / cancel()
* (perhaps by always calling it from the same thread, or by providing
* appropriate mutual exclusion). In this case, pending() returns a snapshot
* of the current number of pending requests.
*/
explicit AsyncBase(size_t capacity, PollMode pollMode = NOT_POLLABLE);
AsyncBase(const AsyncBase&) = delete;
AsyncBase& operator=(const AsyncBase&) = delete;
virtual ~AsyncBase();
/**
* Wait for at least minRequests to complete. Returns the requests that
* have completed; the returned range is valid until the next call to
* wait(). minRequests may be 0 to not block.
*/
Range<Op**> wait(size_t minRequests);
/**
* Cancel all pending requests and return them; the returned range is
* valid until the next call to cancel().
*/
Range<Op**> cancel();
/**
* Return the number of pending requests.
*/
size_t pending() const {
return pending_;
}
/**
* Return the maximum number of requests that can be kept outstanding
* at any one time.
*/
size_t capacity() const {
return capacity_;
}
/**
* Return the accumulative number of submitted I/O, since this object
* has been created.
*/
size_t totalSubmits() const {
return submitted_;
}
/**
* If POLLABLE, return a file descriptor that can be passed to poll / epoll
* and will become readable when any async IO operations have completed.
* If NOT_POLLABLE, return -1.
*/
int pollFd() const {
return pollFd_;
}
/**
* If POLLABLE, call instead of wait after the file descriptor returned
* by pollFd() became readable. The returned range is valid until the next
* call to pollCompleted().
*/
Range<Op**> pollCompleted();
/**
* Submit an op for execution.
*/
void submit(Op* op);
protected:
void complete(Op* op, ssize_t result) {
op->complete(result);
}
void cancel(Op* op) {
op->cancel();
}
bool isInit() const {
return init_.load(std::memory_order_relaxed);
}
void decrementPending();
virtual void initializeContext() = 0;
virtual int submitOne(AsyncBase::Op* op) = 0;
enum class WaitType { COMPLETE, CANCEL };
virtual Range<AsyncBase::Op**> doWait(
WaitType type,
size_t minRequests,
size_t maxRequests,
std::vector<Op*>& result) = 0;
std::atomic<bool> init_{false};
std::mutex initMutex_;
std::atomic<size_t> pending_{0};
std::atomic<size_t> submitted_{0};
const size_t capacity_;
int pollFd_{-1};
std::vector<Op*> completed_;
std::vector<Op*> canceled_;
};
/**
* Wrapper around AsyncBase that allows you to schedule more requests than
* the AsyncBase's object capacity. Other requests are queued and processed
* in a FIFO order.
*/
class AsyncBaseQueue {
public:
/**
* Create a queue, using the given AsyncBase object.
* The AsyncBase object may not be used by anything else until the
* queue is destroyed.
*/
explicit AsyncBaseQueue(AsyncBase* asyncBase);
~AsyncBaseQueue();
size_t queued() const {
return queue_.size();
}
/**
* Submit an op to the AsyncBase queue. The op will be queued until
* the AsyncBase object has room.
*/
void submit(AsyncBaseOp* op);
/**
* Submit a delayed op to the AsyncBase queue; this allows you to postpone
* creation of the Op (which may require allocating memory, etc) until
* the AsyncBase object has room.
*/
using OpFactory = std::function<AsyncBaseOp*()>;
void submit(OpFactory op);
private:
void onCompleted(AsyncBaseOp* op);
void maybeDequeue();
AsyncBase* asyncBase_;
std::deque<OpFactory> queue_;
};
} // namespace folly
......@@ -31,10 +31,60 @@
#include <folly/String.h>
#include <folly/portability/Unistd.h>
// debugging helpers
namespace {
#define X(c) \
case c: \
return #c
const char* iocbCmdToString(short int cmd_short) {
io_iocb_cmd cmd = static_cast<io_iocb_cmd>(cmd_short);
switch (cmd) {
X(IO_CMD_PREAD);
X(IO_CMD_PWRITE);
X(IO_CMD_FSYNC);
X(IO_CMD_FDSYNC);
X(IO_CMD_POLL);
X(IO_CMD_NOOP);
X(IO_CMD_PREADV);
X(IO_CMD_PWRITEV);
};
return "<INVALID io_iocb_cmd>";
}
#undef X
void toStream(std::ostream& os, const iocb& cb) {
os << folly::format(
"data={}, key={}, opcode={}, reqprio={}, fd={}, f={}, ",
cb.data,
cb.key,
iocbCmdToString(cb.aio_lio_opcode),
cb.aio_reqprio,
cb.aio_fildes,
folly::AsyncBaseOp::fd2name(cb.aio_fildes));
switch (cb.aio_lio_opcode) {
case IO_CMD_PREAD:
case IO_CMD_PWRITE:
os << folly::format(
"buf={}, offset={}, nbytes={}, ",
cb.u.c.buf,
cb.u.c.offset,
cb.u.c.nbytes);
break;
default:
os << "[TODO: write debug string for "
<< iocbCmdToString(cb.aio_lio_opcode) << "] ";
break;
}
}
} // namespace
namespace folly {
AsyncIOOp::AsyncIOOp(NotificationCallback cb)
: cb_(std::move(cb)), state_(State::UNINITIALIZED), result_(-EINVAL) {
AsyncIOOp::AsyncIOOp(NotificationCallback cb) : AsyncBaseOp(std::move(cb)) {
memset(&iocb_, 0, sizeof(iocb_));
}
......@@ -47,31 +97,6 @@ void AsyncIOOp::reset(NotificationCallback cb) {
}
AsyncIOOp::~AsyncIOOp() {
CHECK_NE(state_, State::PENDING);
}
void AsyncIOOp::start() {
DCHECK_EQ(state_, State::INITIALIZED);
state_ = State::PENDING;
}
void AsyncIOOp::complete(ssize_t result) {
DCHECK_EQ(state_, State::PENDING);
state_ = State::COMPLETED;
result_ = result;
if (cb_) {
cb_(this);
}
}
void AsyncIOOp::cancel() {
DCHECK_EQ(state_, State::PENDING);
state_ = State::CANCELED;
}
ssize_t AsyncIOOp::result() const {
CHECK_EQ(state_, State::COMPLETED);
return result_;
}
void AsyncIOOp::pread(int fd, void* buf, size_t size, off_t start) {
......@@ -79,10 +104,6 @@ void AsyncIOOp::pread(int fd, void* buf, size_t size, off_t start) {
io_prep_pread(&iocb_, fd, buf, size, start);
}
void AsyncIOOp::pread(int fd, Range<unsigned char*> range, off_t start) {
pread(fd, range.begin(), range.size(), start);
}
void AsyncIOOp::preadv(int fd, const iovec* iov, int iovcnt, off_t start) {
init();
io_prep_preadv(&iocb_, fd, iov, iovcnt, start);
......@@ -93,49 +114,49 @@ void AsyncIOOp::pwrite(int fd, const void* buf, size_t size, off_t start) {
io_prep_pwrite(&iocb_, fd, const_cast<void*>(buf), size, start);
}
void AsyncIOOp::pwrite(int fd, Range<const unsigned char*> range, off_t start) {
pwrite(fd, range.begin(), range.size(), start);
}
void AsyncIOOp::pwritev(int fd, const iovec* iov, int iovcnt, off_t start) {
init();
io_prep_pwritev(&iocb_, fd, iov, iovcnt, start);
}
void AsyncIOOp::init() {
CHECK_EQ(state_, State::UNINITIALIZED);
state_ = State::INITIALIZED;
}
void AsyncIOOp::toStream(std::ostream& os) const {
os << "{" << state_ << ", ";
AsyncIO::AsyncIO(size_t capacity, PollMode pollMode) : capacity_(capacity) {
CHECK_GT(capacity_, 0);
completed_.reserve(capacity_);
if (pollMode == POLLABLE) {
pollFd_ = eventfd(0, EFD_NONBLOCK);
checkUnixError(pollFd_, "AsyncIO: eventfd creation failed");
if (state_ != AsyncBaseOp::State::UNINITIALIZED) {
::toStream(os, iocb_);
}
if (state_ == AsyncBaseOp::State::COMPLETED) {
os << "result=" << result_;
if (result_ < 0) {
os << " (" << errnoStr(-result_) << ')';
}
os << ", ";
}
os << "}";
}
std::ostream& operator<<(std::ostream& os, const AsyncIOOp& op) {
op.toStream(os);
return os;
}
AsyncIO::AsyncIO(size_t capacity, PollMode pollMode)
: AsyncBase(capacity, pollMode) {}
AsyncIO::~AsyncIO() {
CHECK_EQ(pending_, 0);
if (ctx_) {
int rc = io_queue_release(ctx_);
CHECK_EQ(rc, 0) << "io_queue_release: " << errnoStr(-rc);
}
if (pollFd_ != -1) {
CHECK_ERR(close(pollFd_));
}
}
void AsyncIO::decrementPending() {
auto p = pending_.fetch_add(-1, std::memory_order_acq_rel);
DCHECK_GE(p, 1);
}
void AsyncIO::initializeContext() {
if (!ctxSet_.load(std::memory_order_acquire)) {
if (!init_.load(std::memory_order_acquire)) {
std::lock_guard<std::mutex> lock(initMutex_);
if (!ctxSet_.load(std::memory_order_relaxed)) {
if (!init_.load(std::memory_order_relaxed)) {
int rc = io_queue_init(capacity_, &ctx_);
// returns negative errno
if (rc == -EAGAIN) {
......@@ -156,78 +177,33 @@ void AsyncIO::initializeContext() {
checkKernelError(rc, "AsyncIO: io_queue_init failed");
DCHECK(ctx_);
ctxSet_.store(true, std::memory_order_release);
init_.store(true, std::memory_order_release);
}
}
}
void AsyncIO::submit(Op* op) {
CHECK_EQ(op->state(), Op::State::INITIALIZED);
initializeContext(); // on demand
int AsyncIO::submitOne(AsyncBase::Op* op) {
// -1 return here will trigger throw if op isn't an AsyncIOOp
AsyncIOOp* aop = op->getAsyncIOOp();
// We can increment past capacity, but we'll clean up after ourselves.
auto p = pending_.fetch_add(1, std::memory_order_acq_rel);
if (p >= capacity_) {
decrementPending();
throw std::range_error("AsyncIO: too many pending requests");
if (!aop) {
return -1;
}
iocb* cb = &op->iocb_;
iocb* cb = &aop->iocb_;
cb->data = nullptr; // unused
if (pollFd_ != -1) {
io_set_eventfd(cb, pollFd_);
}
int rc = io_submit(ctx_, 1, &cb);
if (rc < 0) {
decrementPending();
throwSystemErrorExplicit(-rc, "AsyncIO: io_submit failed");
}
submitted_++;
DCHECK_EQ(rc, 1);
op->start();
}
Range<AsyncIO::Op**> AsyncIO::wait(size_t minRequests) {
CHECK(ctx_);
CHECK_EQ(pollFd_, -1) << "wait() only allowed on non-pollable object";
auto p = pending_.load(std::memory_order_acquire);
CHECK_LE(minRequests, p);
return doWait(WaitType::COMPLETE, minRequests, p, completed_);
return io_submit(ctx_, 1, &cb);
}
Range<AsyncIO::Op**> AsyncIO::cancel() {
CHECK(ctx_);
auto p = pending_.load(std::memory_order_acquire);
return doWait(WaitType::CANCEL, p, p, canceled_);
}
Range<AsyncIO::Op**> AsyncIO::pollCompleted() {
CHECK(ctx_);
CHECK_NE(pollFd_, -1) << "pollCompleted() only allowed on pollable object";
uint64_t numEvents;
// This sets the eventFd counter to 0, see
// http://www.kernel.org/doc/man-pages/online/pages/man2/eventfd.2.html
ssize_t rc;
do {
rc = ::read(pollFd_, &numEvents, 8);
} while (rc == -1 && errno == EINTR);
if (UNLIKELY(rc == -1 && errno == EAGAIN)) {
return Range<Op**>(); // nothing completed
}
checkUnixError(rc, "AsyncIO: read from event fd failed");
DCHECK_EQ(rc, 8);
DCHECK_GT(numEvents, 0);
DCHECK_LE(numEvents, pending_);
// Don't reap more than numEvents, as we've just reset the counter to 0.
return doWait(WaitType::COMPLETE, numEvents, numEvents, completed_);
}
Range<AsyncIO::Op**> AsyncIO::doWait(
Range<AsyncBase::Op**> AsyncIO::doWait(
WaitType type,
size_t minRequests,
size_t maxRequests,
std::vector<Op*>& result) {
std::vector<AsyncBase::Op*>& result) {
io_event events[maxRequests];
// Unfortunately, Linux AIO doesn't implement io_cancel, so even for
......@@ -255,16 +231,16 @@ Range<AsyncIO::Op**> AsyncIO::doWait(
result.clear();
for (size_t i = 0; i < count; ++i) {
DCHECK(events[i].obj);
CHECK(events[i].obj);
Op* op = boost::intrusive::get_parent_from_member(
events[i].obj, &AsyncIOOp::iocb_);
decrementPending();
switch (type) {
case WaitType::COMPLETE:
op->complete(events[i].res);
complete(op, events[i].res);
break;
case WaitType::CANCEL:
op->cancel();
cancel(op);
break;
}
result.push_back(op);
......@@ -273,138 +249,4 @@ Range<AsyncIO::Op**> AsyncIO::doWait(
return range(result);
}
AsyncIOQueue::AsyncIOQueue(AsyncIO* asyncIO) : asyncIO_(asyncIO) {}
AsyncIOQueue::~AsyncIOQueue() {
CHECK_EQ(asyncIO_->pending(), 0);
}
void AsyncIOQueue::submit(AsyncIOOp* op) {
submit([op]() { return op; });
}
void AsyncIOQueue::submit(OpFactory op) {
queue_.push_back(op);
maybeDequeue();
}
void AsyncIOQueue::onCompleted(AsyncIOOp* /* op */) {
maybeDequeue();
}
void AsyncIOQueue::maybeDequeue() {
while (!queue_.empty() && asyncIO_->pending() < asyncIO_->capacity()) {
auto& opFactory = queue_.front();
auto op = opFactory();
queue_.pop_front();
// Interpose our completion callback
auto& nextCb = op->notificationCallback();
op->setNotificationCallback([this, nextCb](AsyncIOOp* op2) {
this->onCompleted(op2);
if (nextCb) {
nextCb(op2);
}
});
asyncIO_->submit(op);
}
}
// debugging helpers:
namespace {
#define X(c) \
case c: \
return #c
const char* asyncIoOpStateToString(AsyncIOOp::State state) {
switch (state) {
X(AsyncIOOp::State::UNINITIALIZED);
X(AsyncIOOp::State::INITIALIZED);
X(AsyncIOOp::State::PENDING);
X(AsyncIOOp::State::COMPLETED);
X(AsyncIOOp::State::CANCELED);
}
return "<INVALID AsyncIOOp::State>";
}
const char* iocbCmdToString(short int cmd_short) {
io_iocb_cmd cmd = static_cast<io_iocb_cmd>(cmd_short);
switch (cmd) {
X(IO_CMD_PREAD);
X(IO_CMD_PWRITE);
X(IO_CMD_FSYNC);
X(IO_CMD_FDSYNC);
X(IO_CMD_POLL);
X(IO_CMD_NOOP);
X(IO_CMD_PREADV);
X(IO_CMD_PWRITEV);
};
return "<INVALID io_iocb_cmd>";
}
#undef X
std::string fd2name(int fd) {
std::string path = folly::to<std::string>("/proc/self/fd/", fd);
char link[PATH_MAX];
const ssize_t length =
std::max<ssize_t>(readlink(path.c_str(), link, PATH_MAX), 0);
return path.assign(link, length);
}
std::ostream& operator<<(std::ostream& os, const iocb& cb) {
os << folly::format(
"data={}, key={}, opcode={}, reqprio={}, fd={}, f={}, ",
cb.data,
cb.key,
iocbCmdToString(cb.aio_lio_opcode),
cb.aio_reqprio,
cb.aio_fildes,
fd2name(cb.aio_fildes));
switch (cb.aio_lio_opcode) {
case IO_CMD_PREAD:
case IO_CMD_PWRITE:
os << folly::format(
"buf={}, offset={}, nbytes={}, ",
cb.u.c.buf,
cb.u.c.offset,
cb.u.c.nbytes);
break;
default:
os << "[TODO: write debug string for "
<< iocbCmdToString(cb.aio_lio_opcode) << "] ";
break;
}
return os;
}
} // namespace
std::ostream& operator<<(std::ostream& os, const AsyncIOOp& op) {
os << "{" << op.state_ << ", ";
if (op.state_ != AsyncIOOp::State::UNINITIALIZED) {
os << op.iocb_;
}
if (op.state_ == AsyncIOOp::State::COMPLETED) {
os << "result=" << op.result_;
if (op.result_ < 0) {
os << " (" << errnoStr(-op.result_) << ')';
}
os << ", ";
}
return os << "}";
}
std::ostream& operator<<(std::ostream& os, AsyncIOOp::State state) {
return os << asyncIoOpStateToString(state);
}
} // namespace folly
......@@ -16,267 +16,80 @@
#pragma once
#include <sys/types.h>
#include <atomic>
#include <cstdint>
#include <deque>
#include <functional>
#include <iosfwd>
#include <mutex>
#include <utility>
#include <vector>
#include <libaio.h>
#include <folly/Portability.h>
#include <folly/Range.h>
#include <folly/portability/SysUio.h>
#include <folly/experimental/io/AsyncBase.h>
namespace folly {
/**
* An AsyncIOOp represents a pending operation. You may set a notification
* callback or you may use this class's methods directly.
*
* The op must remain allocated until it is completed or canceled.
*/
class AsyncIOOp {
class AsyncIOOp : public AsyncBaseOp {
friend class AsyncIO;
friend std::ostream& operator<<(std::ostream& stream, const AsyncIOOp& o);
public:
typedef std::function<void(AsyncIOOp*)> NotificationCallback;
explicit AsyncIOOp(NotificationCallback cb = NotificationCallback());
AsyncIOOp(const AsyncIOOp&) = delete;
AsyncIOOp& operator=(const AsyncIOOp&) = delete;
~AsyncIOOp();
enum class State {
UNINITIALIZED,
INITIALIZED,
PENDING,
COMPLETED,
CANCELED,
};
~AsyncIOOp() override;
/**
* Initiate a read request.
*/
void pread(int fd, void* buf, size_t size, off_t start);
void pread(int fd, Range<unsigned char*> range, off_t start);
void preadv(int fd, const iovec* iov, int iovcnt, off_t start);
void pread(int fd, void* buf, size_t size, off_t start) override;
void preadv(int fd, const iovec* iov, int iovcnt, off_t start) override;
/**
* Initiate a write request.
*/
void pwrite(int fd, const void* buf, size_t size, off_t start);
void pwrite(int fd, Range<const unsigned char*> range, off_t start);
void pwritev(int fd, const iovec* iov, int iovcnt, off_t start);
void pwrite(int fd, const void* buf, size_t size, off_t start) override;
void pwritev(int fd, const iovec* iov, int iovcnt, off_t start) override;
/**
* Return the current operation state.
*/
State state() const {
return state_;
}
/**
* Reset the operation for reuse. It is an error to call reset() on
* an Op that is still pending.
*/
void reset(NotificationCallback cb = NotificationCallback());
void reset(NotificationCallback cb = NotificationCallback()) override;
void setNotificationCallback(NotificationCallback cb) {
cb_ = std::move(cb);
AsyncIOOp* getAsyncIOOp() override {
return this;
}
const NotificationCallback& notificationCallback() const {
return cb_;
IoUringOp* getIoUringOp() override {
return nullptr;
}
/**
* Retrieve the result of this operation. Returns >=0 on success,
* -errno on failure (that is, using the Linux kernel error reporting
* conventions). Use checkKernelError (folly/Exception.h) on the result to
* throw a std::system_error in case of error instead.
*
* It is an error to call this if the Op hasn't completed.
*/
ssize_t result() const;
void toStream(std::ostream& os) const override;
private:
void init();
void start();
void complete(ssize_t result);
void cancel();
NotificationCallback cb_;
iocb iocb_;
State state_;
ssize_t result_;
};
std::ostream& operator<<(std::ostream& stream, const AsyncIOOp& o);
std::ostream& operator<<(std::ostream& stream, AsyncIOOp::State state);
std::ostream& operator<<(std::ostream& stream, const AsyncIOOp& op);
/**
* C++ interface around Linux Async IO.
*/
class AsyncIO {
class AsyncIO : public AsyncBase {
public:
typedef AsyncIOOp Op;
enum PollMode {
NOT_POLLABLE,
POLLABLE,
};
using Op = AsyncIOOp;
/**
* Create an AsyncIO context capable of holding at most 'capacity' pending
* requests at the same time. As requests complete, others can be scheduled,
* as long as this limit is not exceeded.
*
* Note: the maximum number of allowed concurrent requests is controlled
* by the fs.aio-max-nr sysctl, the default value is usually 64K.
*
* If pollMode is POLLABLE, pollFd() will return a file descriptor that
* can be passed to poll / epoll / select and will become readable when
* any IOs on this AsyncIO have completed. If you do this, you must use
* pollCompleted() instead of wait() -- do not read from the pollFd()
* file descriptor directly.
*
* You may use the same AsyncIO object from multiple threads, as long as
* there is only one concurrent caller of wait() / pollCompleted() / cancel()
* (perhaps by always calling it from the same thread, or by providing
* appropriate mutual exclusion). In this case, pending() returns a snapshot
* of the current number of pending requests.
*/
explicit AsyncIO(size_t capacity, PollMode pollMode = NOT_POLLABLE);
AsyncIO(const AsyncIO&) = delete;
AsyncIO& operator=(const AsyncIO&) = delete;
~AsyncIO();
/**
* Wait for at least minRequests to complete. Returns the requests that
* have completed; the returned range is valid until the next call to
* wait(). minRequests may be 0 to not block.
*/
Range<Op**> wait(size_t minRequests);
/**
* Cancel all pending requests and return them; the returned range is
* valid until the next call to cancel().
*/
Range<Op**> cancel();
/**
* Return the number of pending requests.
*/
size_t pending() const {
return pending_;
}
/**
* Return the maximum number of requests that can be kept outstanding
* at any one time.
*/
size_t capacity() const {
return capacity_;
}
/**
* Return the accumulative number of submitted I/O, since this object
* has been created.
*/
size_t totalSubmits() const {
return submitted_;
}
/**
* If POLLABLE, return a file descriptor that can be passed to poll / epoll
* and will become readable when any async IO operations have completed.
* If NOT_POLLABLE, return -1.
*/
int pollFd() const {
return pollFd_;
}
/**
* If POLLABLE, call instead of wait after the file descriptor returned
* by pollFd() became readable. The returned range is valid until the next
* call to pollCompleted().
*/
Range<Op**> pollCompleted();
/**
* Submit an op for execution.
*/
void submit(Op* op);
~AsyncIO() override;
private:
void decrementPending();
void initializeContext();
void initializeContext() override;
int submitOne(AsyncBase::Op* op) override;
enum class WaitType { COMPLETE, CANCEL };
Range<AsyncIO::Op**> doWait(
Range<AsyncBase::Op**> doWait(
WaitType type,
size_t minRequests,
size_t maxRequests,
std::vector<Op*>& result);
std::vector<AsyncBase::Op*>& result) override;
io_context_t ctx_{nullptr};
std::atomic<bool> ctxSet_{false};
std::mutex initMutex_;
std::atomic<size_t> pending_{0};
std::atomic<size_t> submitted_{0};
const size_t capacity_;
int pollFd_{-1};
std::vector<Op*> completed_;
std::vector<Op*> canceled_;
};
/**
* Wrapper around AsyncIO that allows you to schedule more requests than
* the AsyncIO's object capacity. Other requests are queued and processed
* in a FIFO order.
*/
class AsyncIOQueue {
public:
/**
* Create a queue, using the given AsyncIO object.
* The AsyncIO object may not be used by anything else until the
* queue is destroyed.
*/
explicit AsyncIOQueue(AsyncIO* asyncIO);
~AsyncIOQueue();
size_t queued() const {
return queue_.size();
}
/**
* Submit an op to the AsyncIO queue. The op will be queued until
* the AsyncIO object has room.
*/
void submit(AsyncIOOp* op);
/**
* Submit a delayed op to the AsyncIO queue; this allows you to postpone
* creation of the Op (which may require allocating memory, etc) until
* the AsyncIO object has room.
*/
typedef std::function<AsyncIOOp*()> OpFactory;
void submit(OpFactory op);
private:
void onCompleted(AsyncIOOp* op);
void maybeDequeue();
AsyncIO* asyncIO_;
std::deque<OpFactory> queue_;
};
using AsyncIOQueue = AsyncBaseQueue;
} // namespace folly
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/io/IoUring.h>
#include <sys/eventfd.h>
#include <cerrno>
#include <ostream>
#include <stdexcept>
#include <string>
#include <boost/intrusive/parent_from_member.hpp>
#include <glog/logging.h>
#include <folly/Exception.h>
#include <folly/Format.h>
#include <folly/Likely.h>
#include <folly/String.h>
#include <folly/portability/Unistd.h>
// helpers
namespace {
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
uint32_t roundUpToNextPowerOfTwo(uint32_t num) {
if (num == 0) {
return 0;
}
num--;
num |= num >> 1;
num |= num >> 2;
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
return num + 1;
}
#define X(c) \
case c: \
return #c
const char* ioUringOpToString(unsigned char op) {
switch (op) {
X(IORING_OP_NOP);
X(IORING_OP_READV);
X(IORING_OP_WRITEV);
X(IORING_OP_FSYNC);
X(IORING_OP_READ_FIXED);
X(IORING_OP_WRITE_FIXED);
X(IORING_OP_POLL_ADD);
X(IORING_OP_POLL_REMOVE);
X(IORING_OP_SYNC_FILE_RANGE);
X(IORING_OP_SENDMSG);
X(IORING_OP_RECVMSG);
X(IORING_OP_TIMEOUT);
};
return "<INVALID op>";
}
#undef X
void toStream(std::ostream& os, const struct io_uring_sqe& sqe) {
os << folly::format(
"user_data={}, opcode={}, ioprio={}, f={}, ",
sqe.user_data,
ioUringOpToString(sqe.opcode),
sqe.ioprio,
folly::AsyncBaseOp::fd2name(sqe.fd));
switch (sqe.opcode) {
case IORING_OP_READV:
case IORING_OP_WRITEV: {
auto offset = sqe.off;
auto* iovec = reinterpret_cast<struct iovec*>(sqe.addr);
os << "{";
for (unsigned int i = 0; i < sqe.len; i++) {
if (i) {
os << ",";
}
os << folly::format(
"buf={}, offset={}, nbytes={}",
iovec[i].iov_base,
offset,
iovec[i].iov_len);
// advance the offset
offset += iovec[i].iov_len;
}
os << "}";
} break;
default:
os << "[TODO: write debug string for " << ioUringOpToString(sqe.opcode)
<< "] ";
break;
}
}
} // namespace
namespace folly {
IoUringOp::IoUringOp(NotificationCallback cb) : AsyncBaseOp(std::move(cb)) {}
void IoUringOp::reset(NotificationCallback cb) {
CHECK_NE(state_, State::PENDING);
cb_ = std::move(cb);
state_ = State::UNINITIALIZED;
result_ = -EINVAL;
}
IoUringOp::~IoUringOp() {}
void IoUringOp::pread(int fd, void* buf, size_t size, off_t start) {
init();
iov_[0].iov_base = buf;
iov_[0].iov_len = size;
io_uring_prep_readv(&sqe_, fd, iov_, 1, start);
io_uring_sqe_set_data(&sqe_, this);
}
void IoUringOp::preadv(int fd, const iovec* iov, int iovcnt, off_t start) {
init();
io_uring_prep_readv(&sqe_, fd, iov, iovcnt, start);
io_uring_sqe_set_data(&sqe_, this);
}
void IoUringOp::pwrite(int fd, const void* buf, size_t size, off_t start) {
init();
iov_[0].iov_base = const_cast<void*>(buf);
iov_[0].iov_len = size;
io_uring_prep_writev(&sqe_, fd, iov_, 1, start);
io_uring_sqe_set_data(&sqe_, this);
}
void IoUringOp::pwritev(int fd, const iovec* iov, int iovcnt, off_t start) {
init();
io_uring_prep_writev(&sqe_, fd, iov, iovcnt, start);
io_uring_sqe_set_data(&sqe_, this);
}
void IoUringOp::toStream(std::ostream& os) const {
os << "{" << state_ << ", ";
if (state_ != AsyncBaseOp::State::UNINITIALIZED) {
::toStream(os, sqe_);
}
if (state_ == AsyncBaseOp::State::COMPLETED) {
os << "result=" << result_;
if (result_ < 0) {
os << " (" << errnoStr(-result_) << ')';
}
os << ", ";
}
os << "}";
}
std::ostream& operator<<(std::ostream& os, const IoUringOp& op) {
op.toStream(os);
return os;
}
IoUring::IoUring(size_t capacity, PollMode pollMode, size_t maxSubmit)
: AsyncBase(capacity, pollMode),
maxSubmit_((maxSubmit <= capacity) ? maxSubmit : capacity) {
::memset(&ioRing_, 0, sizeof(ioRing_));
::memset(&params_, 0, sizeof(params_));
params_.flags |= IORING_SETUP_CQSIZE;
params_.cq_entries = roundUpToNextPowerOfTwo(capacity_);
}
IoUring::~IoUring() {
CHECK_EQ(pending_, 0);
if (ioRing_.ring_fd > 0) {
::io_uring_queue_exit(&ioRing_);
ioRing_.ring_fd = -1;
}
}
bool IoUring::isAvailable() {
IoUring ioUring(1);
try {
ioUring.initializeContext();
} catch (...) {
return false;
}
return true;
}
void IoUring::initializeContext() {
if (!init_.load(std::memory_order_acquire)) {
std::lock_guard<std::mutex> lock(initMutex_);
if (!init_.load(std::memory_order_relaxed)) {
int rc = ::io_uring_queue_init_params(
roundUpToNextPowerOfTwo(maxSubmit_), &ioRing_, &params_);
checkKernelError(rc, "IoUring: io_uring_queue_init_params failed");
DCHECK_GT(ioRing_.ring_fd, 0);
if (pollFd_ != -1) {
CHECK_ERR(io_uring_register_eventfd(&ioRing_, pollFd_));
}
init_.store(true, std::memory_order_release);
}
}
}
int IoUring::submitOne(AsyncBase::Op* op) {
// -1 return here will trigger throw if op isn't an IoUringOp
IoUringOp* iop = op->getIoUringOp();
if (!iop) {
return -1;
}
SharedMutex::WriteHolder lk(submitMutex_);
auto* sqe = io_uring_get_sqe(&ioRing_);
if (!sqe) {
return -1;
}
*sqe = iop->getSqe();
return io_uring_submit(&ioRing_);
}
Range<AsyncBase::Op**> IoUring::doWait(
WaitType type,
size_t minRequests,
size_t maxRequests,
std::vector<AsyncBase::Op*>& result) {
result.clear();
size_t count = 0;
while (count < maxRequests) {
struct io_uring_cqe* cqe = nullptr;
if (!io_uring_peek_cqe(&ioRing_, &cqe) && cqe) {
count++;
Op* op = reinterpret_cast<Op*>(io_uring_cqe_get_data(cqe));
CHECK(op);
auto res = cqe->res;
io_uring_cqe_seen(&ioRing_, cqe);
decrementPending();
switch (type) {
case WaitType::COMPLETE:
op->complete(res);
break;
case WaitType::CANCEL:
op->cancel();
break;
}
result.push_back(op);
} else {
if (count < minRequests) {
io_uring_enter(
ioRing_.ring_fd,
0,
minRequests - count,
IORING_ENTER_GETEVENTS,
nullptr);
} else {
break;
}
}
}
return range(result);
}
} // namespace folly
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
extern "C" {
#include <liburing.h>
}
#include <folly/SharedMutex.h>
#include <folly/experimental/io/AsyncBase.h>
namespace folly {
/**
* An IoUringOp represents a pending operation. You may set a notification
* callback or you may use this class's methods directly.
*
* The op must remain allocated until it is completed or canceled.
*/
class IoUringOp : public AsyncBaseOp {
friend class IoUring;
friend std::ostream& operator<<(std::ostream& stream, const IoUringOp& o);
public:
explicit IoUringOp(NotificationCallback cb = NotificationCallback());
IoUringOp(const IoUringOp&) = delete;
IoUringOp& operator=(const IoUringOp&) = delete;
~IoUringOp() override;
/**
* Initiate a read request.
*/
void pread(int fd, void* buf, size_t size, off_t start) override;
void preadv(int fd, const iovec* iov, int iovcnt, off_t start) override;
/**
* Initiate a write request.
*/
void pwrite(int fd, const void* buf, size_t size, off_t start) override;
void pwritev(int fd, const iovec* iov, int iovcnt, off_t start) override;
void reset(NotificationCallback cb = NotificationCallback()) override;
AsyncIOOp* getAsyncIOOp() override {
return nullptr;
}
IoUringOp* getIoUringOp() override {
return this;
}
void toStream(std::ostream& os) const override;
const struct io_uring_sqe& getSqe() const {
return sqe_;
}
private:
struct io_uring_sqe sqe_;
struct iovec iov_[1];
};
std::ostream& operator<<(std::ostream& stream, const IoUringOp& op);
/**
* C++ interface around Linux io_uring
*/
class IoUring : public AsyncBase {
public:
using Op = IoUringOp;
/**
* Note: the maximum number of allowed concurrent requests is controlled
* by the kernel IORING_MAX_ENTRIES and the memlock limit,
* The default IORING_MAX_ENTRIES value is usually 32K.
*/
explicit IoUring(
size_t capacity,
PollMode pollMode = NOT_POLLABLE,
size_t maxSubmit = 1);
IoUring(const IoUring&) = delete;
IoUring& operator=(const IoUring&) = delete;
~IoUring() override;
static bool isAvailable();
private:
void initializeContext() override;
int submitOne(AsyncBase::Op* op) override;
Range<AsyncBase::Op**> doWait(
WaitType type,
size_t minRequests,
size_t maxRequests,
std::vector<AsyncBase::Op*>& result) override;
size_t maxSubmit_;
struct io_uring_params params_;
struct io_uring ioRing_;
SharedMutex submitMutex_;
};
using IoUringQueue = AsyncBaseQueue;
} // namespace folly
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/io/test/AsyncBaseTestLib.h>
namespace folly {
namespace test {
void TestUtil::waitUntilReadable(int fd) {
pollfd pfd;
pfd.fd = fd;
pfd.events = POLLIN;
int r;
do {
r = poll(&pfd, 1, -1); // wait forever
} while (r == -1 && errno == EINTR);
PCHECK(r == 1);
CHECK_EQ(pfd.revents, POLLIN); // no errors etc
}
folly::Range<folly::AsyncBase::Op**> TestUtil::readerWait(
folly::AsyncBase* reader) {
int fd = reader->pollFd();
if (fd == -1) {
return reader->wait(1);
} else {
waitUntilReadable(fd);
return reader->pollCompleted();
}
}
TestUtil::ManagedBuffer TestUtil::allocateAligned(size_t size) {
void* buf;
int rc = posix_memalign(&buf, kAlign, size);
CHECK_EQ(rc, 0) << folly::errnoStr(rc);
return ManagedBuffer(reinterpret_cast<char*>(buf), free);
}
TemporaryFile::TemporaryFile(size_t size)
: path_(folly::fs::temp_directory_path() / folly::fs::unique_path()) {
CHECK_EQ(size % sizeof(uint32_t), 0);
size /= sizeof(uint32_t);
const uint32_t seed = 42;
std::mt19937 rnd(seed);
const size_t bufferSize = 1U << 16;
uint32_t buffer[bufferSize];
FILE* fp = ::fopen(path_.c_str(), "wb");
PCHECK(fp != nullptr);
while (size) {
size_t n = std::min(size, bufferSize);
for (size_t i = 0; i < n; ++i) {
buffer[i] = rnd();
}
size_t written = ::fwrite(buffer, sizeof(uint32_t), n, fp);
PCHECK(written == n);
size -= written;
}
PCHECK(::fclose(fp) == 0);
}
TemporaryFile::~TemporaryFile() {
try {
folly::fs::remove(path_);
} catch (const folly::fs::filesystem_error& e) {
LOG(ERROR) << "fs::remove: " << folly::exceptionStr(e);
}
}
TemporaryFile& TemporaryFile::getTempFile() {
static TemporaryFile sTempFile(6 << 20); // 6MiB
return sTempFile;
}
} // namespace test
} // namespace folly
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdio>
#include <cstdlib>
#include <memory>
#include <random>
#include <thread>
#include <vector>
#include <glog/logging.h>
#include <folly/ScopeGuard.h>
#include <folly/String.h>
#include <folly/experimental/io/FsUtil.h>
#include <folly/portability/GTest.h>
#include <folly/portability/Sockets.h>
#include <folly/test/TestUtils.h>
#include <folly/experimental/io/AsyncBase.h>
namespace folly {
namespace test {
constexpr size_t kAlign = 4096; // align reads to 4096 B (for O_DIRECT)
struct TestSpec {
off_t start;
size_t size;
};
struct TestUtil {
static void waitUntilReadable(int fd);
static folly::Range<folly::AsyncBase::Op**> readerWait(
folly::AsyncBase* reader);
using ManagedBuffer = std::unique_ptr<char, void (*)(void*)>;
static ManagedBuffer allocateAligned(size_t size);
};
// Temporary file that is NOT kept open but is deleted on exit.
// Generate random-looking but reproduceable data.
class TemporaryFile {
public:
explicit TemporaryFile(size_t size);
~TemporaryFile();
const folly::fs::path path() const {
return path_;
}
static TemporaryFile& getTempFile();
private:
folly::fs::path path_;
};
template <typename TAsync>
void testReadsSerially(
const std::vector<TestSpec>& specs,
folly::AsyncBase::PollMode pollMode) {
TAsync aioReader(1, pollMode);
typename TAsync::Op op;
int fd =
::open(TemporaryFile::getTempFile().path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< folly::errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
for (size_t i = 0; i < specs.size(); i++) {
auto buf = TestUtil::allocateAligned(specs[i].size);
op.pread(fd, buf.get(), specs[i].size, specs[i].start);
aioReader.submit(&op);
EXPECT_EQ((i + 1), aioReader.totalSubmits());
EXPECT_EQ(aioReader.pending(), 1);
auto ops = test::TestUtil::readerWait(&aioReader);
EXPECT_EQ(1, ops.size());
EXPECT_TRUE(ops[0] == &op);
EXPECT_EQ(aioReader.pending(), 0);
ssize_t res = op.result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(specs[i].size, res);
op.reset();
}
}
template <typename TAsync>
void testReadsParallel(
const std::vector<TestSpec>& specs,
folly::AsyncBase::PollMode pollMode,
bool multithreaded) {
TAsync aioReader(specs.size(), pollMode);
std::unique_ptr<typename TAsync::Op[]> ops(new
typename TAsync::Op[specs.size()]);
uintptr_t sizeOf = sizeof(typename TAsync::Op);
std::vector<TestUtil::ManagedBuffer> bufs;
bufs.reserve(specs.size());
int fd =
::open(TemporaryFile::getTempFile().path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< folly::errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
std::vector<std::thread> threads;
if (multithreaded) {
threads.reserve(specs.size());
}
for (size_t i = 0; i < specs.size(); i++) {
bufs.push_back(TestUtil::allocateAligned(specs[i].size));
}
auto submit = [&](size_t i) {
ops[i].pread(fd, bufs[i].get(), specs[i].size, specs[i].start);
aioReader.submit(&ops[i]);
};
for (size_t i = 0; i < specs.size(); i++) {
if (multithreaded) {
threads.emplace_back([&submit, i] { submit(i); });
} else {
submit(i);
}
}
for (auto& t : threads) {
t.join();
}
std::vector<bool> pending(specs.size(), true);
size_t remaining = specs.size();
while (remaining != 0) {
EXPECT_EQ(remaining, aioReader.pending());
auto completed = test::TestUtil::readerWait(&aioReader);
size_t nrRead = completed.size();
EXPECT_NE(nrRead, 0);
remaining -= nrRead;
for (size_t i = 0; i < nrRead; i++) {
int id = (reinterpret_cast<uintptr_t>(completed[i]) -
reinterpret_cast<uintptr_t>(ops.get())) /
sizeOf;
EXPECT_GE(id, 0);
EXPECT_LT(id, specs.size());
EXPECT_TRUE(pending[id]);
pending[id] = false;
ssize_t res = ops[id].result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(specs[id].size, res);
}
}
EXPECT_EQ(specs.size(), aioReader.totalSubmits());
EXPECT_EQ(aioReader.pending(), 0);
for (size_t i = 0; i < pending.size(); i++) {
EXPECT_FALSE(pending[i]);
}
}
template <typename TAsync>
void testReadsQueued(
const std::vector<TestSpec>& specs,
folly::AsyncBase::PollMode pollMode) {
size_t readerCapacity = std::max(specs.size() / 2, size_t(1));
TAsync aioReader(readerCapacity, pollMode);
folly::AsyncBaseQueue aioQueue(&aioReader);
std::unique_ptr<typename TAsync::Op[]> ops(new
typename TAsync::Op[specs.size()]);
uintptr_t sizeOf = sizeof(typename TAsync::Op);
std::vector<TestUtil::ManagedBuffer> bufs;
int fd =
::open(TemporaryFile::getTempFile().path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< folly::errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
for (size_t i = 0; i < specs.size(); i++) {
bufs.push_back(TestUtil::allocateAligned(specs[i].size));
ops[i].pread(fd, bufs[i].get(), specs[i].size, specs[i].start);
aioQueue.submit(&ops[i]);
}
std::vector<bool> pending(specs.size(), true);
size_t remaining = specs.size();
while (remaining != 0) {
if (remaining >= readerCapacity) {
EXPECT_EQ(readerCapacity, aioReader.pending());
EXPECT_EQ(remaining - readerCapacity, aioQueue.queued());
} else {
EXPECT_EQ(remaining, aioReader.pending());
EXPECT_EQ(0, aioQueue.queued());
}
auto completed = test::TestUtil::readerWait(&aioReader);
size_t nrRead = completed.size();
EXPECT_NE(nrRead, 0);
remaining -= nrRead;
for (size_t i = 0; i < nrRead; i++) {
int id = (reinterpret_cast<uintptr_t>(completed[i]) -
reinterpret_cast<uintptr_t>(ops.get())) /
sizeOf;
EXPECT_GE(id, 0);
EXPECT_LT(id, specs.size());
EXPECT_TRUE(pending[id]);
pending[id] = false;
ssize_t res = ops[id].result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(specs[id].size, res);
}
}
EXPECT_EQ(specs.size(), aioReader.totalSubmits());
EXPECT_EQ(aioReader.pending(), 0);
EXPECT_EQ(aioQueue.queued(), 0);
for (size_t i = 0; i < pending.size(); i++) {
EXPECT_FALSE(pending[i]);
}
}
template <typename TAsync>
void testReads(
const std::vector<TestSpec>& specs,
folly::AsyncBase::PollMode pollMode) {
testReadsSerially<TAsync>(specs, pollMode);
testReadsParallel<TAsync>(specs, pollMode, false);
testReadsParallel<TAsync>(specs, pollMode, true);
testReadsQueued<TAsync>(specs, pollMode);
}
template <typename T>
class AsyncTest : public ::testing::Test {};
TYPED_TEST_CASE_P(AsyncTest);
TYPED_TEST_P(AsyncTest, ZeroAsyncDataNotPollable) {
test::testReads<TypeParam>({{0, 0}}, folly::AsyncBase::NOT_POLLABLE);
}
TYPED_TEST_P(AsyncTest, ZeroAsyncDataPollable) {
test::testReads<TypeParam>({{0, 0}}, folly::AsyncBase::POLLABLE);
}
TYPED_TEST_P(AsyncTest, SingleAsyncDataNotPollable) {
test::testReads<TypeParam>(
{{0, test::kAlign}}, folly::AsyncBase::NOT_POLLABLE);
test::testReads<TypeParam>(
{{0, test::kAlign}}, folly::AsyncBase::NOT_POLLABLE);
}
TYPED_TEST_P(AsyncTest, SingleAsyncDataPollable) {
test::testReads<TypeParam>({{0, test::kAlign}}, folly::AsyncBase::POLLABLE);
test::testReads<TypeParam>({{0, test::kAlign}}, folly::AsyncBase::POLLABLE);
}
TYPED_TEST_P(AsyncTest, MultipleAsyncDataNotPollable) {
test::testReads<TypeParam>(
{{test::kAlign, 2 * test::kAlign},
{test::kAlign, 2 * test::kAlign},
{test::kAlign, 4 * test::kAlign}},
folly::AsyncBase::NOT_POLLABLE);
test::testReads<TypeParam>(
{{test::kAlign, 2 * test::kAlign},
{test::kAlign, 2 * test::kAlign},
{test::kAlign, 4 * test::kAlign}},
folly::AsyncBase::NOT_POLLABLE);
test::testReads<TypeParam>(
{{0, 5 * 1024 * 1024}, {test::kAlign, 5 * 1024 * 1024}},
folly::AsyncBase::NOT_POLLABLE);
test::testReads<TypeParam>(
{
{test::kAlign, 0},
{test::kAlign, test::kAlign},
{test::kAlign, 2 * test::kAlign},
{test::kAlign, 20 * test::kAlign},
{test::kAlign, 1024 * 1024},
},
folly::AsyncBase::NOT_POLLABLE);
}
TYPED_TEST_P(AsyncTest, MultipleAsyncDataPollable) {
test::testReads<TypeParam>(
{{test::kAlign, 2 * test::kAlign},
{test::kAlign, 2 * test::kAlign},
{test::kAlign, 4 * test::kAlign}},
folly::AsyncBase::POLLABLE);
test::testReads<TypeParam>(
{{test::kAlign, 2 * test::kAlign},
{test::kAlign, 2 * test::kAlign},
{test::kAlign, 4 * test::kAlign}},
folly::AsyncBase::POLLABLE);
test::testReads<TypeParam>(
{{0, 5 * 1024 * 1024}, {test::kAlign, 5 * 1024 * 1024}},
folly::AsyncBase::NOT_POLLABLE);
test::testReads<TypeParam>(
{
{test::kAlign, 0},
{test::kAlign, test::kAlign},
{test::kAlign, 2 * test::kAlign},
{test::kAlign, 20 * test::kAlign},
{test::kAlign, 1024 * 1024},
},
folly::AsyncBase::NOT_POLLABLE);
}
TYPED_TEST_P(AsyncTest, ManyAsyncDataNotPollable) {
{
std::vector<test::TestSpec> v;
for (int i = 0; i < 1000; i++) {
v.push_back({off_t(test::kAlign * i), test::kAlign});
}
test::testReads<TypeParam>(v, folly::AsyncBase::NOT_POLLABLE);
}
}
TYPED_TEST_P(AsyncTest, ManyAsyncDataPollable) {
{
std::vector<test::TestSpec> v;
for (int i = 0; i < 1000; i++) {
v.push_back({off_t(test::kAlign * i), test::kAlign});
}
test::testReads<TypeParam>(v, folly::AsyncBase::POLLABLE);
}
}
TYPED_TEST_P(AsyncTest, NonBlockingWait) {
TypeParam aioReader(1, folly::AsyncBase::NOT_POLLABLE);
typename TypeParam::Op op;
int fd = ::open(
test::TemporaryFile::getTempFile().path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< folly::errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
size_t size = 2 * test::kAlign;
auto buf = test::TestUtil::allocateAligned(size);
op.pread(fd, buf.get(), size, 0);
aioReader.submit(&op);
EXPECT_EQ(aioReader.pending(), 1);
folly::Range<folly::AsyncBase::Op**> completed;
while (completed.empty()) {
// poll without blocking until the read request completes.
completed = aioReader.wait(0);
}
EXPECT_EQ(completed.size(), 1);
EXPECT_TRUE(completed[0] == &op);
ssize_t res = op.result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(size, res);
EXPECT_EQ(aioReader.pending(), 0);
}
TYPED_TEST_P(AsyncTest, Cancel) {
constexpr size_t kNumOpsBatch1 = 10;
constexpr size_t kNumOpsBatch2 = 10;
TypeParam aioReader(
kNumOpsBatch1 + kNumOpsBatch2, folly::AsyncBase::NOT_POLLABLE);
int fd = ::open(
test::TemporaryFile::getTempFile().path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< folly::errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
size_t completed = 0;
std::vector<std::unique_ptr<folly::AsyncBase::Op>> ops;
std::vector<test::TestUtil::ManagedBuffer> bufs;
const auto schedule = [&](size_t n) {
for (size_t i = 0; i < n; ++i) {
const size_t size = 2 * test::kAlign;
bufs.push_back(test::TestUtil::allocateAligned(size));
ops.push_back(std::make_unique<typename TypeParam::Op>());
auto& op = *ops.back();
op.setNotificationCallback([&](folly::AsyncBaseOp*) { ++completed; });
op.pread(fd, bufs.back().get(), size, 0);
aioReader.submit(&op);
}
};
// Mix completed and canceled operations for this test.
// In order to achieve that, schedule in two batches and do partial
// wait() after the first one.
schedule(kNumOpsBatch1);
EXPECT_EQ(aioReader.pending(), kNumOpsBatch1);
EXPECT_EQ(completed, 0);
auto result = aioReader.wait(1);
EXPECT_GE(result.size(), 1);
EXPECT_EQ(completed, result.size());
EXPECT_EQ(aioReader.pending(), kNumOpsBatch1 - result.size());
schedule(kNumOpsBatch2);
EXPECT_EQ(aioReader.pending(), ops.size() - result.size());
EXPECT_EQ(completed, result.size());
auto canceled = aioReader.cancel();
EXPECT_EQ(canceled.size(), ops.size() - result.size());
EXPECT_EQ(aioReader.pending(), 0);
EXPECT_EQ(completed, result.size());
size_t foundCompleted = 0;
for (auto& op : ops) {
if (op->state() == folly::AsyncBaseOp::State::COMPLETED) {
++foundCompleted;
} else {
EXPECT_TRUE(op->state() == folly::AsyncBaseOp::State::CANCELED) << *op;
}
}
EXPECT_EQ(foundCompleted, completed);
}
REGISTER_TYPED_TEST_CASE_P(
AsyncTest,
ZeroAsyncDataNotPollable,
ZeroAsyncDataPollable,
SingleAsyncDataNotPollable,
SingleAsyncDataPollable,
MultipleAsyncDataNotPollable,
MultipleAsyncDataPollable,
ManyAsyncDataNotPollable,
ManyAsyncDataPollable,
NonBlockingWait,
Cancel);
} // namespace test
} // namespace folly
......@@ -15,452 +15,12 @@
*/
#include <folly/experimental/io/AsyncIO.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdio>
#include <cstdlib>
#include <memory>
#include <random>
#include <thread>
#include <vector>
#include <glog/logging.h>
#include <folly/ScopeGuard.h>
#include <folly/String.h>
#include <folly/experimental/io/FsUtil.h>
#include <folly/portability/GTest.h>
#include <folly/portability/Sockets.h>
#include <folly/test/TestUtils.h>
namespace fs = folly::fs;
#include <folly/experimental/io/test/AsyncBaseTestLib.h>
using folly::AsyncIO;
using folly::AsyncIOOp;
using folly::AsyncIOQueue;
using folly::errnoStr;
namespace {
constexpr size_t kAlign = 4096; // align reads to 4096 B (for O_DIRECT)
struct TestSpec {
off_t start;
size_t size;
};
void waitUntilReadable(int fd) {
pollfd pfd;
pfd.fd = fd;
pfd.events = POLLIN;
int r;
do {
r = poll(&pfd, 1, -1); // wait forever
} while (r == -1 && errno == EINTR);
PCHECK(r == 1);
CHECK_EQ(pfd.revents, POLLIN); // no errors etc
}
folly::Range<AsyncIO::Op**> readerWait(AsyncIO* reader) {
int fd = reader->pollFd();
if (fd == -1) {
return reader->wait(1);
} else {
waitUntilReadable(fd);
return reader->pollCompleted();
}
}
// Temporary file that is NOT kept open but is deleted on exit.
// Generate random-looking but reproduceable data.
class TemporaryFile {
public:
explicit TemporaryFile(size_t size);
~TemporaryFile();
const fs::path path() const {
return path_;
}
private:
fs::path path_;
};
TemporaryFile::TemporaryFile(size_t size)
: path_(fs::temp_directory_path() / fs::unique_path()) {
CHECK_EQ(size % sizeof(uint32_t), 0);
size /= sizeof(uint32_t);
const uint32_t seed = 42;
std::mt19937 rnd(seed);
const size_t bufferSize = 1U << 16;
uint32_t buffer[bufferSize];
FILE* fp = ::fopen(path_.c_str(), "wb");
PCHECK(fp != nullptr);
while (size) {
size_t n = std::min(size, bufferSize);
for (size_t i = 0; i < n; ++i) {
buffer[i] = rnd();
}
size_t written = ::fwrite(buffer, sizeof(uint32_t), n, fp);
PCHECK(written == n);
size -= written;
}
PCHECK(::fclose(fp) == 0);
}
TemporaryFile::~TemporaryFile() {
try {
fs::remove(path_);
} catch (const fs::filesystem_error& e) {
LOG(ERROR) << "fs::remove: " << folly::exceptionStr(e);
}
}
TemporaryFile tempFile(6 << 20); // 6MiB
typedef std::unique_ptr<char, void (*)(void*)> ManagedBuffer;
ManagedBuffer allocateAligned(size_t size) {
void* buf;
int rc = posix_memalign(&buf, kAlign, size);
CHECK_EQ(rc, 0) << errnoStr(rc);
return ManagedBuffer(reinterpret_cast<char*>(buf), free);
}
void testReadsSerially(
const std::vector<TestSpec>& specs,
AsyncIO::PollMode pollMode) {
AsyncIO aioReader(1, pollMode);
AsyncIO::Op op;
int fd = ::open(tempFile.path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
for (size_t i = 0; i < specs.size(); i++) {
auto buf = allocateAligned(specs[i].size);
op.pread(fd, buf.get(), specs[i].size, specs[i].start);
aioReader.submit(&op);
EXPECT_EQ((i + 1), aioReader.totalSubmits());
EXPECT_EQ(aioReader.pending(), 1);
auto ops = readerWait(&aioReader);
EXPECT_EQ(1, ops.size());
EXPECT_TRUE(ops[0] == &op);
EXPECT_EQ(aioReader.pending(), 0);
ssize_t res = op.result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(specs[i].size, res);
op.reset();
}
}
void testReadsParallel(
const std::vector<TestSpec>& specs,
AsyncIO::PollMode pollMode,
bool multithreaded) {
AsyncIO aioReader(specs.size(), pollMode);
std::unique_ptr<AsyncIO::Op[]> ops(new AsyncIO::Op[specs.size()]);
std::vector<ManagedBuffer> bufs;
bufs.reserve(specs.size());
int fd = ::open(tempFile.path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
std::vector<std::thread> threads;
if (multithreaded) {
threads.reserve(specs.size());
}
for (size_t i = 0; i < specs.size(); i++) {
bufs.push_back(allocateAligned(specs[i].size));
}
auto submit = [&](size_t i) {
ops[i].pread(fd, bufs[i].get(), specs[i].size, specs[i].start);
aioReader.submit(&ops[i]);
};
for (size_t i = 0; i < specs.size(); i++) {
if (multithreaded) {
threads.emplace_back([&submit, i] { submit(i); });
} else {
submit(i);
}
}
for (auto& t : threads) {
t.join();
}
std::vector<bool> pending(specs.size(), true);
size_t remaining = specs.size();
while (remaining != 0) {
EXPECT_EQ(remaining, aioReader.pending());
auto completed = readerWait(&aioReader);
size_t nrRead = completed.size();
EXPECT_NE(nrRead, 0);
remaining -= nrRead;
for (size_t i = 0; i < nrRead; i++) {
int id = completed[i] - ops.get();
EXPECT_GE(id, 0);
EXPECT_LT(id, specs.size());
EXPECT_TRUE(pending[id]);
pending[id] = false;
ssize_t res = ops[id].result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(specs[id].size, res);
}
}
EXPECT_EQ(specs.size(), aioReader.totalSubmits());
EXPECT_EQ(aioReader.pending(), 0);
for (size_t i = 0; i < pending.size(); i++) {
EXPECT_FALSE(pending[i]);
}
}
void testReadsQueued(
const std::vector<TestSpec>& specs,
AsyncIO::PollMode pollMode) {
size_t readerCapacity = std::max(specs.size() / 2, size_t(1));
AsyncIO aioReader(readerCapacity, pollMode);
AsyncIOQueue aioQueue(&aioReader);
std::unique_ptr<AsyncIO::Op[]> ops(new AsyncIO::Op[specs.size()]);
std::vector<ManagedBuffer> bufs;
int fd = ::open(tempFile.path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
for (size_t i = 0; i < specs.size(); i++) {
bufs.push_back(allocateAligned(specs[i].size));
ops[i].pread(fd, bufs[i].get(), specs[i].size, specs[i].start);
aioQueue.submit(&ops[i]);
}
std::vector<bool> pending(specs.size(), true);
size_t remaining = specs.size();
while (remaining != 0) {
if (remaining >= readerCapacity) {
EXPECT_EQ(readerCapacity, aioReader.pending());
EXPECT_EQ(remaining - readerCapacity, aioQueue.queued());
} else {
EXPECT_EQ(remaining, aioReader.pending());
EXPECT_EQ(0, aioQueue.queued());
}
auto completed = readerWait(&aioReader);
size_t nrRead = completed.size();
EXPECT_NE(nrRead, 0);
remaining -= nrRead;
for (size_t i = 0; i < nrRead; i++) {
int id = completed[i] - ops.get();
EXPECT_GE(id, 0);
EXPECT_LT(id, specs.size());
EXPECT_TRUE(pending[id]);
pending[id] = false;
ssize_t res = ops[id].result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(specs[id].size, res);
}
}
EXPECT_EQ(specs.size(), aioReader.totalSubmits());
EXPECT_EQ(aioReader.pending(), 0);
EXPECT_EQ(aioQueue.queued(), 0);
for (size_t i = 0; i < pending.size(); i++) {
EXPECT_FALSE(pending[i]);
}
}
void testReads(const std::vector<TestSpec>& specs, AsyncIO::PollMode pollMode) {
testReadsSerially(specs, pollMode);
testReadsParallel(specs, pollMode, false);
testReadsParallel(specs, pollMode, true);
testReadsQueued(specs, pollMode);
}
} // namespace
TEST(AsyncIO, ZeroAsyncDataNotPollable) {
testReads({{0, 0}}, AsyncIO::NOT_POLLABLE);
}
TEST(AsyncIO, ZeroAsyncDataPollable) {
testReads({{0, 0}}, AsyncIO::POLLABLE);
}
TEST(AsyncIO, SingleAsyncDataNotPollable) {
testReads({{0, kAlign}}, AsyncIO::NOT_POLLABLE);
testReads({{0, kAlign}}, AsyncIO::NOT_POLLABLE);
}
TEST(AsyncIO, SingleAsyncDataPollable) {
testReads({{0, kAlign}}, AsyncIO::POLLABLE);
testReads({{0, kAlign}}, AsyncIO::POLLABLE);
}
TEST(AsyncIO, MultipleAsyncDataNotPollable) {
testReads(
{{kAlign, 2 * kAlign}, {kAlign, 2 * kAlign}, {kAlign, 4 * kAlign}},
AsyncIO::NOT_POLLABLE);
testReads(
{{kAlign, 2 * kAlign}, {kAlign, 2 * kAlign}, {kAlign, 4 * kAlign}},
AsyncIO::NOT_POLLABLE);
testReads(
{{0, 5 * 1024 * 1024}, {kAlign, 5 * 1024 * 1024}}, AsyncIO::NOT_POLLABLE);
testReads(
{
{kAlign, 0},
{kAlign, kAlign},
{kAlign, 2 * kAlign},
{kAlign, 20 * kAlign},
{kAlign, 1024 * 1024},
},
AsyncIO::NOT_POLLABLE);
}
TEST(AsyncIO, MultipleAsyncDataPollable) {
testReads(
{{kAlign, 2 * kAlign}, {kAlign, 2 * kAlign}, {kAlign, 4 * kAlign}},
AsyncIO::POLLABLE);
testReads(
{{kAlign, 2 * kAlign}, {kAlign, 2 * kAlign}, {kAlign, 4 * kAlign}},
AsyncIO::POLLABLE);
testReads(
{{0, 5 * 1024 * 1024}, {kAlign, 5 * 1024 * 1024}}, AsyncIO::NOT_POLLABLE);
testReads(
{
{kAlign, 0},
{kAlign, kAlign},
{kAlign, 2 * kAlign},
{kAlign, 20 * kAlign},
{kAlign, 1024 * 1024},
},
AsyncIO::NOT_POLLABLE);
}
TEST(AsyncIO, ManyAsyncDataNotPollable) {
{
std::vector<TestSpec> v;
for (int i = 0; i < 1000; i++) {
v.push_back({off_t(kAlign * i), kAlign});
}
testReads(v, AsyncIO::NOT_POLLABLE);
}
}
TEST(AsyncIO, ManyAsyncDataPollable) {
{
std::vector<TestSpec> v;
for (int i = 0; i < 1000; i++) {
v.push_back({off_t(kAlign * i), kAlign});
}
testReads(v, AsyncIO::POLLABLE);
}
}
TEST(AsyncIO, NonBlockingWait) {
AsyncIO aioReader(1, AsyncIO::NOT_POLLABLE);
AsyncIO::Op op;
int fd = ::open(tempFile.path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
size_t size = 2 * kAlign;
auto buf = allocateAligned(size);
op.pread(fd, buf.get(), size, 0);
aioReader.submit(&op);
EXPECT_EQ(aioReader.pending(), 1);
folly::Range<AsyncIO::Op**> completed;
while (completed.empty()) {
// poll without blocking until the read request completes.
completed = aioReader.wait(0);
}
EXPECT_EQ(completed.size(), 1);
EXPECT_TRUE(completed[0] == &op);
ssize_t res = op.result();
EXPECT_LE(0, res) << folly::errnoStr(-res);
EXPECT_EQ(size, res);
EXPECT_EQ(aioReader.pending(), 0);
}
TEST(AsyncIO, Cancel) {
constexpr size_t kNumOpsBatch1 = 10;
constexpr size_t kNumOpsBatch2 = 10;
AsyncIO aioReader(kNumOpsBatch1 + kNumOpsBatch2, AsyncIO::NOT_POLLABLE);
int fd = ::open(tempFile.path().c_str(), O_DIRECT | O_RDONLY);
SKIP_IF(fd == -1) << "Tempfile can't be opened with O_DIRECT: "
<< errnoStr(errno);
SCOPE_EXIT {
::close(fd);
};
size_t completed = 0;
std::vector<std::unique_ptr<AsyncIO::Op>> ops;
std::vector<ManagedBuffer> bufs;
const auto schedule = [&](size_t n) {
for (size_t i = 0; i < n; ++i) {
const size_t size = 2 * kAlign;
bufs.push_back(allocateAligned(size));
ops.push_back(std::make_unique<AsyncIO::Op>());
auto& op = *ops.back();
op.setNotificationCallback([&](AsyncIOOp*) { ++completed; });
op.pread(fd, bufs.back().get(), size, 0);
aioReader.submit(&op);
}
};
// Mix completed and canceled operations for this test.
// In order to achieve that, schedule in two batches and do partial
// wait() after the first one.
schedule(kNumOpsBatch1);
EXPECT_EQ(aioReader.pending(), kNumOpsBatch1);
EXPECT_EQ(completed, 0);
auto result = aioReader.wait(1);
EXPECT_GE(result.size(), 1);
EXPECT_EQ(completed, result.size());
EXPECT_EQ(aioReader.pending(), kNumOpsBatch1 - result.size());
schedule(kNumOpsBatch2);
EXPECT_EQ(aioReader.pending(), ops.size() - result.size());
EXPECT_EQ(completed, result.size());
auto canceled = aioReader.cancel();
EXPECT_EQ(canceled.size(), ops.size() - result.size());
EXPECT_EQ(aioReader.pending(), 0);
EXPECT_EQ(completed, result.size());
size_t foundCompleted = 0;
for (auto& op : ops) {
if (op->state() == AsyncIOOp::State::COMPLETED) {
++foundCompleted;
} else {
EXPECT_TRUE(op->state() == AsyncIOOp::State::CANCELED) << *op;
}
}
EXPECT_EQ(foundCompleted, completed);
}
namespace folly {
namespace test {
INSTANTIATE_TYPED_TEST_CASE_P(AsyncTest, AsyncTest, AsyncIO);
} // namespace test
} // namespace folly
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/io/IoUring.h>
#include <folly/experimental/io/test/AsyncBaseTestLib.h>
#include <folly/init/Init.h>
using folly::IoUring;
namespace folly {
namespace test {
INSTANTIATE_TYPED_TEST_CASE_P(AsyncTest, AsyncTest, IoUring);
} // namespace test
} // namespace folly
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv);
bool avail = IoUring::isAvailable();
if (!avail) {
LOG(INFO)
<< "Not running tests since this kernel version does not support io_uring";
return 0;
}
return RUN_ALL_TESTS();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment