Unverified Commit 41566ea1 authored by Dennis Jenkins's avatar Dennis Jenkins Committed by GitHub

Merge pull request #550 from iroddis/response_limit

Request and response limits
parents 7cb671e0 48cc34ba
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <limits>
// Allow compile-time overload // Allow compile-time overload
namespace Pistache namespace Pistache
...@@ -16,9 +17,10 @@ namespace Const ...@@ -16,9 +17,10 @@ namespace Const
static constexpr size_t DefaultTimerPoolSize = 128; static constexpr size_t DefaultTimerPoolSize = 128;
// Defined from CMakeLists.txt in project root // Defined from CMakeLists.txt in project root
static constexpr size_t DefaultMaxPayload = 4096; static constexpr size_t DefaultMaxRequestSize = 4096;
static constexpr size_t DefaultMaxResponseSize = std::numeric_limits<uint32_t>::max();
static constexpr size_t ChunkSize = 1024; static constexpr size_t ChunkSize = 1024;
static constexpr uint16_t HTTP_STANDARD_PORT = 80; static constexpr uint16_t HTTP_STANDARD_PORT = 80;
} // namespace Const } // namespace Const
} // namespace Pistache } // namespace Pistache
\ No newline at end of file
...@@ -21,13 +21,18 @@ public: ...@@ -21,13 +21,18 @@ public:
Options& threads(int val); Options& threads(int val);
Options& flags(Flags<Tcp::Options> flags); Options& flags(Flags<Tcp::Options> flags);
Options& backlog(int val); Options& backlog(int val);
Options& maxRequestSize(size_t val);
Options& maxResponseSize(size_t val);
[[deprecated("Replaced by maxRequestSize(val)")]]
Options& maxPayload(size_t val); Options& maxPayload(size_t val);
private: private:
int threads_; int threads_;
Flags<Tcp::Options> flags_; Flags<Tcp::Options> flags_;
int backlog_; int backlog_;
size_t maxPayload_; size_t maxRequestSize_;
size_t maxResponseSize_;
Options(); Options();
}; };
Endpoint(); Endpoint();
......
/* stream.h /* stream.h
Mathieu Stefani, 05 September 2015 Mathieu Stefani, 05 September 2015
A set of classes to control input over a sequence of bytes A set of classes to control input over a sequence of bytes
*/ */
...@@ -120,7 +120,7 @@ private: ...@@ -120,7 +120,7 @@ private:
}; };
template<typename CharT> template<typename CharT>
size_t ArrayStreamBuf<CharT>::maxSize = Const::DefaultMaxPayload; size_t ArrayStreamBuf<CharT>::maxSize = Const::DefaultMaxRequestSize;
struct RawBuffer struct RawBuffer
{ {
...@@ -158,11 +158,10 @@ public: ...@@ -158,11 +158,10 @@ public:
typedef typename Base::traits_type traits_type; typedef typename Base::traits_type traits_type;
typedef typename Base::int_type int_type; typedef typename Base::int_type int_type;
DynamicStreamBuf( static size_t maxSize;
size_t size,
size_t maxSize = std::numeric_limits<uint32_t>::max()) DynamicStreamBuf( size_t size )
: maxSize_(maxSize) : data_()
, data_()
{ {
reserve(size); reserve(size);
} }
...@@ -171,14 +170,12 @@ public: ...@@ -171,14 +170,12 @@ public:
DynamicStreamBuf& operator=(const DynamicStreamBuf& other) = delete; DynamicStreamBuf& operator=(const DynamicStreamBuf& other) = delete;
DynamicStreamBuf(DynamicStreamBuf&& other) DynamicStreamBuf(DynamicStreamBuf&& other)
: maxSize_(other.maxSize_) : data_(std::move(other.data_)) {
, data_(std::move(other.data_)) {
setp(other.pptr(), other.epptr()); setp(other.pptr(), other.epptr());
other.setp(nullptr, nullptr); other.setp(nullptr, nullptr);
} }
DynamicStreamBuf& operator=(DynamicStreamBuf&& other) { DynamicStreamBuf& operator=(DynamicStreamBuf&& other) {
maxSize_ = other.maxSize_;
data_ = std::move(other.data_); data_ = std::move(other.data_);
setp(other.pptr(), other.epptr()); setp(other.pptr(), other.epptr());
other.setp(nullptr, nullptr); other.setp(nullptr, nullptr);
...@@ -200,7 +197,6 @@ public: ...@@ -200,7 +197,6 @@ public:
private: private:
void reserve(size_t size); void reserve(size_t size);
size_t maxSize_;
std::vector<char> data_; std::vector<char> data_;
}; };
......
...@@ -102,11 +102,13 @@ size_t FileBuffer::size() const ...@@ -102,11 +102,13 @@ size_t FileBuffer::size() const
return size_; return size_;
} }
size_t DynamicStreamBuf::maxSize = Const::DefaultMaxResponseSize;
DynamicStreamBuf::int_type DynamicStreamBuf::int_type
DynamicStreamBuf::overflow(DynamicStreamBuf::int_type ch) { DynamicStreamBuf::overflow(DynamicStreamBuf::int_type ch) {
if (!traits_type::eq_int_type(ch, traits_type::eof())) { if (!traits_type::eq_int_type(ch, traits_type::eof())) {
const auto size = data_.size(); const auto size = data_.size();
if (size < maxSize_) { if (size < maxSize) {
reserve((size ? size : 1u) * 2); reserve((size ? size : 1u) * 2);
*pptr() = ch; *pptr() = ch;
pbump(1); pbump(1);
...@@ -120,7 +122,7 @@ DynamicStreamBuf::overflow(DynamicStreamBuf::int_type ch) { ...@@ -120,7 +122,7 @@ DynamicStreamBuf::overflow(DynamicStreamBuf::int_type ch) {
void void
DynamicStreamBuf::reserve(size_t size) DynamicStreamBuf::reserve(size_t size)
{ {
if (size > maxSize_) size = maxSize_; if (size > maxSize) size = maxSize;
const size_t oldSize = data_.size(); const size_t oldSize = data_.size();
data_.resize(size); data_.resize(size);
this->setp(&data_[0] + oldSize, &data_[0] + size); this->setp(&data_[0] + oldSize, &data_[0] + size);
......
...@@ -16,7 +16,8 @@ Endpoint::Options::Options() ...@@ -16,7 +16,8 @@ Endpoint::Options::Options()
: threads_(1) : threads_(1)
, flags_() , flags_()
, backlog_(Const::MaxBacklog) , backlog_(Const::MaxBacklog)
, maxPayload_(Const::DefaultMaxPayload) , maxRequestSize_(Const::DefaultMaxRequestSize)
, maxResponseSize_(Const::DefaultMaxResponseSize)
{ } { }
Endpoint::Options& Endpoint::Options&
...@@ -37,9 +38,20 @@ Endpoint::Options::backlog(int val) { ...@@ -37,9 +38,20 @@ Endpoint::Options::backlog(int val) {
return *this; return *this;
} }
Endpoint::Options&
Endpoint::Options::maxRequestSize(size_t val) {
maxRequestSize_ = val;
return *this;
}
Endpoint::Options& Endpoint::Options&
Endpoint::Options::maxPayload(size_t val) { Endpoint::Options::maxPayload(size_t val) {
maxPayload_ = val; return maxRequestSize(val);
}
Endpoint::Options&
Endpoint::Options::maxResponseSize(size_t val) {
maxResponseSize_ = val;
return *this; return *this;
} }
...@@ -53,7 +65,8 @@ Endpoint::Endpoint(const Address& addr) ...@@ -53,7 +65,8 @@ Endpoint::Endpoint(const Address& addr)
void void
Endpoint::init(const Endpoint::Options& options) { Endpoint::init(const Endpoint::Options& options) {
listener.init(options.threads_, options.flags_); listener.init(options.threads_, options.flags_);
ArrayStreamBuf<char>::maxSize = options.maxPayload_; ArrayStreamBuf<char>::maxSize = options.maxRequestSize_;
DynamicStreamBuf::maxSize = options.maxResponseSize_;
} }
void void
......
...@@ -25,7 +25,7 @@ if (PISTACHE_ENABLE_NETWORK_TESTS) ...@@ -25,7 +25,7 @@ if (PISTACHE_ENABLE_NETWORK_TESTS)
pistache_test(net_test) pistache_test(net_test)
endif (PISTACHE_ENABLE_NETWORK_TESTS) endif (PISTACHE_ENABLE_NETWORK_TESTS)
pistache_test(listener_test) pistache_test(listener_test)
pistache_test(payload_test) pistache_test(request_size_test)
pistache_test(streaming_test) pistache_test(streaming_test)
pistache_test(rest_server_test) pistache_test(rest_server_test)
pistache_test(string_view_test) pistache_test(string_view_test)
......
...@@ -34,11 +34,11 @@ struct TestSet { ...@@ -34,11 +34,11 @@ struct TestSet {
Http::Code actualCode; Http::Code actualCode;
}; };
using PayloadTestSets = std::vector<TestSet>; using RequestSizeTestSets = std::vector<TestSet>;
void testPayloads(const std::string& url, const PayloadTestSets& testPayloads) { void testRequestSizes(const std::string& url, const RequestSizeTestSets& testRequestSizes) {
// Client tests to make sure the payload is enforced // Client tests to make sure the payload is enforced
PayloadTestSets testResults; RequestSizeTestSets testResults;
std::mutex resultsetMutex; std::mutex resultsetMutex;
Http::Client client; Http::Client client;
...@@ -48,8 +48,8 @@ void testPayloads(const std::string& url, const PayloadTestSets& testPayloads) { ...@@ -48,8 +48,8 @@ void testPayloads(const std::string& url, const PayloadTestSets& testPayloads) {
client.init(client_opts); client.init(client_opts);
std::vector<Async::Promise<Http::Response>> responses; std::vector<Async::Promise<Http::Response>> responses;
responses.reserve(testPayloads.size()); responses.reserve(testRequestSizes.size());
for (auto & t : testPayloads) { for (auto & t : testRequestSizes) {
std::string payload(t.bytes, 'A'); std::string payload(t.bytes, 'A');
auto response = client.post(url).body(payload).timeout(std::chrono::seconds(wait_time)).send(); auto response = client.post(url).body(payload).timeout(std::chrono::seconds(wait_time)).send();
response.then([t,&testResults,&resultsetMutex](Http::Response rsp) { response.then([t,&testResults,&resultsetMutex](Http::Response rsp) {
...@@ -78,11 +78,11 @@ void handleEcho(const Rest::Request& /*request*/, Http::ResponseWriter response) ...@@ -78,11 +78,11 @@ void handleEcho(const Rest::Request& /*request*/, Http::ResponseWriter response)
response.send(Http::Code::Ok, "", MIME(Text, Plain)); response.send(Http::Code::Ok, "", MIME(Text, Plain));
} }
TEST(payload, from_description) TEST(request_size, from_description)
{ {
const Address addr(Ipv4::any(), Port(0)); const Address addr(Ipv4::any(), Port(0));
const size_t threads = 20; const size_t threads = 20;
const size_t maxPayload = 1024; // very small const size_t maxRequestSize = 1024; // very small
Rest::Description desc("Rest Description Test", "v1"); Rest::Description desc("Rest Description Test", "v1");
Rest::Router router; Rest::Router router;
...@@ -98,7 +98,7 @@ TEST(payload, from_description) ...@@ -98,7 +98,7 @@ TEST(payload, from_description)
auto opts = Http::Endpoint::options() auto opts = Http::Endpoint::options()
.threads(threads) .threads(threads)
.flags(flags) .flags(flags)
.maxPayload(maxPayload); .maxRequestSize(maxRequestSize);
auto endpoint = std::make_shared<Pistache::Http::Endpoint>(addr); auto endpoint = std::make_shared<Pistache::Http::Endpoint>(addr);
endpoint->init(opts); endpoint->init(opts);
...@@ -110,18 +110,18 @@ TEST(payload, from_description) ...@@ -110,18 +110,18 @@ TEST(payload, from_description)
std::this_thread::sleep_for(std::chrono::milliseconds(150)); std::this_thread::sleep_for(std::chrono::milliseconds(150));
const auto port = endpoint->getPort(); const auto port = endpoint->getPort();
PayloadTestSets payloads{ RequestSizeTestSets payloads{
{800, Http::Code::Ok} {800, Http::Code::Ok}
, {1024, Http::Code::Request_Entity_Too_Large} , {1024, Http::Code::Request_Entity_Too_Large}
,{2048, Http::Code::Request_Entity_Too_Large} ,{2048, Http::Code::Request_Entity_Too_Large}
}; };
testPayloads("127.0.0.1:" + std::to_string(port), payloads); testRequestSizes("127.0.0.1:" + std::to_string(port), payloads);
endpoint->shutdown(); endpoint->shutdown();
} }
TEST(payload, manual_construction) { TEST(request_size, manual_construction) {
class MyHandler : public Http::Handler { class MyHandler : public Http::Handler {
public: public:
HTTP_PROTOTYPE(MyHandler) HTTP_PROTOTYPE(MyHandler)
...@@ -140,14 +140,14 @@ TEST(payload, manual_construction) { ...@@ -140,14 +140,14 @@ TEST(payload, manual_construction) {
const Address addr(Ipv4::any(), Port(0)); const Address addr(Ipv4::any(), Port(0));
const int threads = 20; const int threads = 20;
const auto flags = Tcp::Options::ReuseAddr; const auto flags = Tcp::Options::ReuseAddr;
const size_t maxPayload = 2048; const size_t maxRequestSize = 2048;
// Build in-process server threads. // Build in-process server threads.
auto endpoint = std::make_shared<Http::Endpoint>(addr); auto endpoint = std::make_shared<Http::Endpoint>(addr);
auto opts = Http::Endpoint::options() auto opts = Http::Endpoint::options()
.threads(threads) .threads(threads)
.flags(flags) .flags(flags)
.maxPayload(maxPayload); .maxRequestSize(maxRequestSize);
endpoint->init(opts); endpoint->init(opts);
endpoint->setHandler(Http::make_handler<MyHandler>()); endpoint->setHandler(Http::make_handler<MyHandler>());
...@@ -158,14 +158,14 @@ TEST(payload, manual_construction) { ...@@ -158,14 +158,14 @@ TEST(payload, manual_construction) {
std::this_thread::sleep_for(std::chrono::milliseconds(150)); std::this_thread::sleep_for(std::chrono::milliseconds(150));
const auto port = endpoint->getPort(); const auto port = endpoint->getPort();
PayloadTestSets payloads{ RequestSizeTestSets payloads{
{1024, Http::Code::Ok} {1024, Http::Code::Ok}
, {1800, Http::Code::Ok} , {1800, Http::Code::Ok}
, {2048, Http::Code::Request_Entity_Too_Large} , {2048, Http::Code::Request_Entity_Too_Large}
, {4096, Http::Code::Request_Entity_Too_Large} , {4096, Http::Code::Request_Entity_Too_Large}
}; };
testPayloads("127.0.0.1:" + std::to_string(port), payloads); testRequestSizes("127.0.0.1:" + std::to_string(port), payloads);
endpoint->shutdown(); endpoint->shutdown();
} }
...@@ -154,7 +154,7 @@ TEST(router_test, test_notfound_exactly_once) { ...@@ -154,7 +154,7 @@ TEST(router_test, test_notfound_exactly_once) {
Address addr(Ipv4::any(), 0); Address addr(Ipv4::any(), 0);
auto endpoint = std::make_shared<Http::Endpoint>(addr); auto endpoint = std::make_shared<Http::Endpoint>(addr);
auto opts = Http::Endpoint::options().threads(1).maxPayload(4096); auto opts = Http::Endpoint::options().threads(1).maxRequestSize(4096);
endpoint->init(opts); endpoint->init(opts);
int count_found = 0; int count_found = 0;
......
...@@ -107,7 +107,7 @@ TEST(streaming, from_description) ...@@ -107,7 +107,7 @@ TEST(streaming, from_description)
auto opts = Http::Endpoint::options() auto opts = Http::Endpoint::options()
.threads(threads) .threads(threads)
.flags(flags) .flags(flags)
.maxPayload(1024*1024) .maxRequestSize(1024*1024)
; ;
endpoint = std::make_shared<Pistache::Http::Endpoint>(addr); endpoint = std::make_shared<Pistache::Http::Endpoint>(addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment