Commit 576db850 authored by Ian Roddis's avatar Ian Roddis

Augmenting test, removing context preference from transport

parent 04a67aa7
......@@ -31,30 +31,15 @@ public:
template<typename Buf>
Async::Promise<ssize_t> asyncWrite(Fd fd, const Buf& buffer, int flags = 0) {
// If the I/O operation has been initiated from an other thread, we queue it and we'll process
// it in our own thread so that we make sure that every I/O operation happens in the right thread
auto ctx = context();
const bool isInRightThread = std::this_thread::get_id() == ctx.thread();
if (!isInRightThread) {
return Async::Promise<ssize_t>([=](Async::Deferred<ssize_t> deferred) mutable {
BufferHolder holder(buffer);
auto detached = holder.detach();
WriteEntry write(std::move(deferred), detached, flags);
write.peerFd = fd;
auto *e = writesQueue.allocEntry(std::move(write));
writesQueue.push(e);
});
}
return Async::Promise<ssize_t>([&](Async::Resolver& resolve, Async::Rejection& reject) {
auto it = toWrite.find(fd);
if (it != std::end(toWrite)) {
reject(Pistache::Error("Multiple writes on the same fd"));
return;
}
asyncWriteImpl(fd, flags, BufferHolder(buffer), Async::Deferred<ssize_t>(std::move(resolve), std::move(reject)));
// Always enqueue reponses for sending. Giving preference to consumer
// context means chunked responses could be sent out of order.
return Async::Promise<ssize_t>([=](Async::Deferred<ssize_t> deferred) mutable {
BufferHolder holder(buffer);
auto detached = holder.detach();
WriteEntry write(std::move(deferred), detached, flags);
write.peerFd = fd;
auto *e = writesQueue.allocEntry(std::move(write));
writesQueue.push(e);
});
}
......
......@@ -15,15 +15,29 @@ void dumpData(const Rest::Request&req, Http::ResponseWriter response) {
UNUSED(req);
auto stream = response.stream(Http::Code::Ok);
std::streamsize n = 1000;
char data = 'A';
char letter = 'A';
std::mutex responseGuard;
std::vector<std::thread> workers;
auto sendPayload =
[&responseGuard, &stream, n](char let) -> void {
size_t chunk_size = n / 10;
{
std::unique_lock<std::mutex> l(responseGuard);
for (size_t chunk = 0; chunk < 10; ++chunk) {
std::string payload(chunk_size * chunk, let);
stream.write(payload.c_str(), n);
stream.flush();
}
}
};
for (size_t i ; i < 26; ++i) {
std::cout << "Sending " << n << " bytes of " << data << std::endl;
std::string payload(data++, n);
stream.write(payload.c_str(), n);
stream.flush();
workers.emplace_back(std::thread(sendPayload, letter + i));
}
for (auto &w : workers) { w.join(); }
stream.ends();
}
......@@ -87,16 +101,12 @@ TEST(stream, from_description)
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &ss);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
std::cout << "Curl failed: " << curl_easy_strerror(res) << std::endl;
throw std::runtime_error(curl_easy_strerror(res));
}
curl_easy_cleanup(curl);
}
std::cout << "GOT HERE" << std::endl;
std::cout << ss.str() << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(150));
ASSERT_EQ(ss.str().size(), 26000);
kill(pid, SIGTERM);
int r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment