Commit 9bb6a2a6 authored by octal's avatar octal

Http client now uses a bounded MPMC queue internally to queue up requests

parent 8228a32b
...@@ -13,11 +13,15 @@ using namespace Net::Http; ...@@ -13,11 +13,15 @@ using namespace Net::Http;
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
if (argc < 1) { if (argc < 1) {
std::cerr << "Usage: http_client page" << std::endl; std::cerr << "Usage: http_client page [count]" << std::endl;
return 1; return 1;
} }
std::string page = argv[1]; std::string page = argv[1];
int count = 1;
if (argc == 3) {
count = std::stoi(argv[2]);
}
Experimental::Client client; Experimental::Client client;
...@@ -25,18 +29,38 @@ int main(int argc, char *argv[]) { ...@@ -25,18 +29,38 @@ int main(int argc, char *argv[]) {
.threads(1) .threads(1)
.maxConnectionsPerHost(8); .maxConnectionsPerHost(8);
client.init(opts); client.init(opts);
auto resp = client.get(page).cookie(Cookie("FOO", "bar")).send();
Async::Barrier<Response> barrier(resp); std::vector<Async::Promise<Response>> responses;
std::atomic<size_t> completedRequests(0);
std::atomic<size_t> failedRequests(0);
auto start = std::chrono::system_clock::now();
resp.then([](Response response) { for (int i = 0; i < count; ++i) {
auto resp = client.get(page).cookie(Cookie("FOO", "bar")).send();
resp.then([&](Response response) {
++completedRequests;
std::cout << "Response code = " << response.code() << std::endl; std::cout << "Response code = " << response.code() << std::endl;
auto body = response.body(); auto body = response.body();
if (!body.empty()) if (!body.empty())
std::cout << "Response body = " << body << std::endl; std::cout << "Response body = " << body << std::endl;
}, Async::NoExcept); }, Async::IgnoreException);
responses.push_back(std::move(resp));
}
auto sync = Async::whenAll(responses.begin(), responses.end());
Async::Barrier<std::vector<Response>> barrier(sync);
barrier.wait_for(std::chrono::seconds(5));
barrier.wait_for(std::chrono::seconds(2)); auto end = std::chrono::system_clock::now();
std::cout << "Summary of excution" << std::endl
<< "Total number of requests sent : " << count << std::endl
<< "Total number of responses received: " << completedRequests.load() << std::endl
<< "Total number of requests failed : " << failedRequests.load() << std::endl
<< "Total time of execution : "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
client.shutdown(); client.shutdown();
} }
...@@ -590,6 +590,10 @@ namespace Async { ...@@ -590,6 +590,10 @@ namespace Async {
core_ = nullptr; core_ = nullptr;
} }
Resolver clone() {
return Resolver(core_);
}
private: private:
std::shared_ptr<Private::Core> core_; std::shared_ptr<Private::Core> core_;
}; };
...@@ -601,6 +605,12 @@ namespace Async { ...@@ -601,6 +605,12 @@ namespace Async {
: core_(core) : core_(core)
{ } { }
Rejection(const Rejection& other) = delete;
Rejection& operator=(const Rejection& other) = delete;
Rejection(Rejection&& other) = default;
Rejection& operator=(Rejection&& other) = default;
template<typename Exc> template<typename Exc>
bool operator()(Exc exc) const { bool operator()(Exc exc) const {
if (!core_) return false; if (!core_) return false;
...@@ -622,6 +632,10 @@ namespace Async { ...@@ -622,6 +632,10 @@ namespace Async {
core_ = nullptr; core_ = nullptr;
} }
Rejection clone() {
return Rejection(core_);
}
private: private:
std::shared_ptr<Private::Core> core_; std::shared_ptr<Private::Core> core_;
...@@ -914,11 +928,6 @@ namespace Async { ...@@ -914,11 +928,6 @@ namespace Async {
struct Any; struct Any;
} }
template<typename T>
Barrier<T> make_barrier(Promise<T>& promise) {
return Barrier<T>(promise);
}
class Any { class Any {
public: public:
friend class Impl::Any; friend class Impl::Any;
......
...@@ -340,7 +340,7 @@ private: ...@@ -340,7 +340,7 @@ private:
typedef std::lock_guard<Lock> Guard; typedef std::lock_guard<Lock> Guard;
Lock queuesLock; Lock queuesLock;
std::unordered_map<std::string, Queue<Connection::RequestData>> requestsQueues; std::unordered_map<std::string, MPMCQueue<Connection::RequestData *, 128>> requestsQueues;
RequestBuilder prepareRequest(std::string resource, Http::Method method); RequestBuilder prepareRequest(std::string resource, Http::Method method);
......
...@@ -363,6 +363,16 @@ public: ...@@ -363,6 +363,16 @@ public:
friend class Private::ResponseLineStep; friend class Private::ResponseLineStep;
friend class Private::Parser<Http::Response>; friend class Private::Parser<Http::Response>;
Response()
: Message()
{ }
Response(Version version)
: Message()
{
version_ = version;
}
Response(const Response& other) = default; Response(const Response& other) = default;
Response& operator=(const Response& other) = default; Response& operator=(const Response& other) = default;
Response(Response&& other) = default; Response(Response&& other) = default;
...@@ -396,16 +406,6 @@ public: ...@@ -396,16 +406,6 @@ public:
return version_; return version_;
} }
protected:
Response()
: Message()
{ }
Response(Version version)
: Message()
{
version_ = version;
}
}; };
class ResponseWriter : public Response { class ResponseWriter : public Response {
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <sys/eventfd.h> #include <sys/eventfd.h>
#include <unistd.h> #include <unistd.h>
static constexpr size_t CachelineSize = 64;
template<typename T> template<typename T>
class Mailbox { class Mailbox {
...@@ -300,3 +301,92 @@ public: ...@@ -300,3 +301,92 @@ public:
private: private:
int event_fd; int event_fd;
}; };
// A Multi-Producer Multi-Consumer bounded queue
// taken from http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
template<typename T, size_t Size>
class MPMCQueue {
static_assert(Size >= 2 && ((Size & (Size - 1)) == 0), "The size must be a power of 2");
static constexpr size_t Mask = Size - 1;
public:
MPMCQueue(const MPMCQueue& other) = delete;
MPMCQueue& operator=(const MPMCQueue& other) = delete;
MPMCQueue() {
for (size_t i = 0; i < Size; ++i) {
cells_[i].sequence.store(i, std::memory_order_relaxed);
}
enqueueIndex.store(0, std::memory_order_relaxed);
dequeueIndex.store(0, std::memory_order_relaxed);
}
template<typename U>
bool enqueue(U&& data) {
Cell* target;
size_t index = enqueueIndex.load(std::memory_order_relaxed);
for (;;) {
target = cell(index);
size_t seq = target->sequence.load(std::memory_order_acquire);
auto diff = static_cast<std::intptr_t>(seq) - static_cast<std::intptr_t>(index);
if (diff == 0) {
if (enqueueIndex.compare_exchange_weak
(index, index + 1, std::memory_order_relaxed))
break;
}
else if (diff < 0) return false;
else {
index = enqueueIndex.load(std::memory_order_relaxed);
}
}
target->data = std::forward<U>(data);
target->sequence.store(index + 1, std::memory_order_release);
return true;
}
bool dequeue(T& data) {
Cell* target;
size_t index = dequeueIndex.load(std::memory_order_relaxed);
for (;;) {
target = cell(index);
size_t seq = target->sequence.load(std::memory_order_acquire);
auto diff = static_cast<std::intptr_t>(seq) - static_cast<std::intptr_t>(index + 1);
if (diff == 0) {
if (dequeueIndex.compare_exchange_weak
(index, index + 1, std::memory_order_relaxed))
break;
}
else if (diff < 0)
return false;
else {
index = dequeueIndex.load(std::memory_order_relaxed);
}
}
data = target->data;
target->sequence.store(index + Mask + 1, std::memory_order_release);
return true;
}
private:
struct Cell {
std::atomic<size_t> sequence;
T data;
};
size_t cellIndex(size_t index) const {
return index & Mask;
}
Cell* cell(size_t index) {
return &cells_[cellIndex(index)];
}
std::array<Cell, Size> cells_;
alignas(CachelineSize) std::atomic<size_t> enqueueIndex;
alignas(CachelineSize) std::atomic<size_t> dequeueIndex;
};
...@@ -708,9 +708,6 @@ RequestBuilder::send() { ...@@ -708,9 +708,6 @@ RequestBuilder::send() {
Client::Options& Client::Options&
Client::Options::threads(int val) { Client::Options::threads(int val) {
if (val > 1) {
throw std::invalid_argument("Multi-threaded client is not yet supported");
}
threads_ = val; threads_ = val;
return *this; return *this;
} }
...@@ -800,10 +797,14 @@ Client::doRequest( ...@@ -800,10 +797,14 @@ Client::doRequest(
return Async::Promise<Response>([=](Async::Resolver& resolve, Async::Rejection& reject) { return Async::Promise<Response>([=](Async::Resolver& resolve, Async::Rejection& reject) {
Guard guard(queuesLock); Guard guard(queuesLock);
std::unique_ptr<Connection::RequestData> data(
new Connection::RequestData(std::move(resolve), std::move(reject), request, timeout, nullptr));
auto& queue = requestsQueues[s.first]; auto& queue = requestsQueues[s.first];
auto entry = queue.allocEntry( if (!queue.enqueue(data.get()))
Connection::RequestData(std::move(resolve), std::move(reject), request, timeout, nullptr)); data->reject(std::runtime_error("Queue is full"));
queue.push(entry); else
data.release();
}); });
} }
else { else {
...@@ -836,29 +837,28 @@ Client::processRequestQueue() { ...@@ -836,29 +837,28 @@ Client::processRequestQueue() {
const auto& domain = queues.first; const auto& domain = queues.first;
auto& queue = queues.second; auto& queue = queues.second;
if (queue.empty()) continue;
for (;;) { for (;;) {
auto conn = pool.pickConnection(domain); auto conn = pool.pickConnection(domain);
if (!conn) if (!conn)
break; break;
auto& queue = queues.second; auto& queue = queues.second;
auto entry = queue.popSafe(); Connection::RequestData *data;
if (!entry) { if (!queue.dequeue(data)) {
pool.releaseConnection(conn); pool.releaseConnection(conn);
break; break;
} }
auto& req = entry->data();
conn->performImpl( conn->performImpl(
req.request, data->request,
req.timeout, data->timeout,
std::move(req.resolve), std::move(req.reject), std::move(data->resolve), std::move(data->reject),
[=]() { [=]() {
pool.releaseConnection(conn); pool.releaseConnection(conn);
processRequestQueue(); processRequestQueue();
}); });
delete data;
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment