Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
P
pistache
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Libraries
pistache
Commits
c5fd213e
Unverified
Commit
c5fd213e
authored
Oct 05, 2018
by
Dennis Jenkins
Committed by
GitHub
Oct 05, 2018
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #304 from iroddis/streaming_test
Streaming test
parents
fb27ff31
4b185059
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
209 additions
and
100 deletions
+209
-100
include/pistache/transport.h
include/pistache/transport.h
+14
-35
src/common/transport.cc
src/common/transport.cc
+86
-64
tests/CMakeLists.txt
tests/CMakeLists.txt
+2
-1
tests/streaming_test.cc
tests/streaming_test.cc
+107
-0
No files found.
include/pistache/transport.h
View file @
c5fd213e
...
...
@@ -12,6 +12,8 @@
#include <pistache/async.h>
#include <pistache/stream.h>
#include <deque>
namespace
Pistache
{
namespace
Tcp
{
...
...
@@ -31,30 +33,15 @@ public:
template
<
typename
Buf
>
Async
::
Promise
<
ssize_t
>
asyncWrite
(
Fd
fd
,
const
Buf
&
buffer
,
int
flags
=
0
)
{
// If the I/O operation has been initiated from an other thread, we queue it and we'll process
// it in our own thread so that we make sure that every I/O operation happens in the right thread
auto
ctx
=
context
();
const
bool
isInRightThread
=
std
::
this_thread
::
get_id
()
==
ctx
.
thread
();
if
(
!
isInRightThread
)
{
return
Async
::
Promise
<
ssize_t
>
([
=
](
Async
::
Deferred
<
ssize_t
>
deferred
)
mutable
{
BufferHolder
holder
(
buffer
);
auto
detached
=
holder
.
detach
();
WriteEntry
write
(
std
::
move
(
deferred
),
detached
,
flags
);
write
.
peerFd
=
fd
;
auto
*
e
=
writesQueue
.
allocEntry
(
std
::
move
(
write
));
writesQueue
.
push
(
e
);
});
}
return
Async
::
Promise
<
ssize_t
>
([
&
](
Async
::
Resolver
&
resolve
,
Async
::
Rejection
&
reject
)
{
auto
it
=
toWrite
.
find
(
fd
);
if
(
it
!=
std
::
end
(
toWrite
))
{
reject
(
Pistache
::
Error
(
"Multiple writes on the same fd"
));
return
;
}
asyncWriteImpl
(
fd
,
flags
,
BufferHolder
(
buffer
),
Async
::
Deferred
<
ssize_t
>
(
std
::
move
(
resolve
),
std
::
move
(
reject
)));
// Always enqueue reponses for sending. Giving preference to consumer
// context means chunked responses could be sent out of order.
return
Async
::
Promise
<
ssize_t
>
([
=
](
Async
::
Deferred
<
ssize_t
>
deferred
)
mutable
{
BufferHolder
holder
(
buffer
);
auto
detached
=
holder
.
detach
();
WriteEntry
write
(
std
::
move
(
deferred
),
detached
,
flags
);
write
.
peerFd
=
fd
;
auto
*
e
=
writesQueue
.
allocEntry
(
std
::
move
(
write
));
writesQueue
.
push
(
e
);
});
}
...
...
@@ -208,13 +195,8 @@ private:
std
::
shared_ptr
<
Peer
>
peer
;
};
/* @Incomplete: this should be a std::dequeue.
If an asyncWrite on a particular fd is initiated whereas the fd is not write-ready
yet and some writes are still on-hold, writes should queue-up so that when the
fd becomes ready again, we can write everything
*/
PollableQueue
<
WriteEntry
>
writesQueue
;
std
::
unordered_map
<
Fd
,
WriteEntry
>
toWrite
;
std
::
unordered_map
<
Fd
,
std
::
deque
<
WriteEntry
>
>
toWrite
;
PollableQueue
<
TimerEntry
>
timersQueue
;
std
::
unordered_map
<
Fd
,
TimerEntry
>
timers
;
...
...
@@ -242,11 +224,8 @@ private:
void
armTimerMsImpl
(
TimerEntry
entry
);
void
asyncWriteImpl
(
Fd
fd
,
WriteEntry
&
entry
,
WriteStatus
status
=
FirstTry
);
void
asyncWriteImpl
(
Fd
fd
,
int
flags
,
const
BufferHolder
&
buffer
,
Async
::
Deferred
<
ssize_t
>
deferred
,
WriteStatus
status
=
FirstTry
);
// This will attempt to drain the write queue for the fd
void
asyncWriteImpl
(
Fd
fd
);
void
handlePeerDisconnection
(
const
std
::
shared_ptr
<
Peer
>&
peer
);
void
handleIncoming
(
const
std
::
shared_ptr
<
Peer
>&
peer
);
...
...
src/common/transport.cc
View file @
c5fd213e
...
...
@@ -54,6 +54,8 @@ Transport::handleNewPeer(const std::shared_ptr<Tcp::Peer>& peer) {
}
else
{
handlePeer
(
peer
);
}
int
fd
=
peer
->
fd
();
toWrite
.
emplace
(
fd
,
std
::
deque
<
WriteEntry
>
{});
}
void
...
...
@@ -99,8 +101,8 @@ Transport::onReady(const Aio::FdSet& fds) {
reactor
()
->
modifyFd
(
key
(),
fd
,
NotifyOn
::
Read
,
Polling
::
Mode
::
Edge
);
auto
&
write
=
it
->
second
;
asyncWriteImpl
(
fd
,
write
,
Retry
);
// Try to drain the queue
asyncWriteImpl
(
fd
);
}
}
}
...
...
@@ -164,80 +166,94 @@ Transport::handlePeerDisconnection(const std::shared_ptr<Peer>& peer) {
throw
std
::
runtime_error
(
"Could not find peer to erase"
);
peers
.
erase
(
it
);
toWrite
.
erase
(
fd
);
close
(
fd
);
}
void
Transport
::
asyncWriteImpl
(
Fd
fd
,
Transport
::
WriteEntry
&
entry
,
WriteStatus
status
)
{
asyncWriteImpl
(
fd
,
entry
.
flags
,
entry
.
buffer
,
std
::
move
(
entry
.
deferred
),
status
);
}
void
Transport
::
asyncWriteImpl
(
Fd
fd
,
int
flags
,
const
BufferHolder
&
buffer
,
Async
::
Deferred
<
ssize_t
>
deferred
,
WriteStatus
status
)
{
auto
cleanUp
=
[
&
]()
{
// Clean up buffers
auto
&
wq
=
toWrite
[
fd
];
while
(
wq
.
size
()
>
0
)
{
auto
&
entry
=
wq
.
front
();
const
BufferHolder
&
buffer
=
entry
.
buffer
;
if
(
buffer
.
isRaw
())
{
auto
raw
=
buffer
.
raw
();
if
(
raw
.
isOwned
)
delete
[]
raw
.
data
;
}
}
toWrite
.
erase
(
fd
);
if
(
status
==
Retry
)
toWrite
.
erase
(
fd
);
};
size_t
totalWritten
=
buffer
.
offset
();
for
(;;)
{
ssize_t
bytesWritten
=
0
;
auto
len
=
buffer
.
size
()
-
totalWritten
;
if
(
buffer
.
isRaw
())
{
auto
raw
=
buffer
.
raw
();
auto
ptr
=
raw
.
data
+
totalWritten
;
bytesWritten
=
::
send
(
fd
,
ptr
,
len
,
flags
|
MSG_NOSIGNAL
);
}
else
{
auto
file
=
buffer
.
fd
();
off_t
offset
=
totalWritten
;
bytesWritten
=
::
sendfile
(
fd
,
file
,
&
offset
,
len
);
}
if
(
bytesWritten
<
0
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
// save for a future retry with the totalWritten offset.
if
(
status
==
Retry
)
{
toWrite
.
erase
(
fd
);
}
toWrite
.
insert
(
std
::
make_pair
(
fd
,
WriteEntry
(
std
::
move
(
deferred
),
buffer
.
detach
(
totalWritten
),
flags
)));
close
(
fd
);
}
reactor
()
->
modifyFd
(
key
(),
fd
,
NotifyOn
::
Read
|
NotifyOn
::
Write
,
Polling
::
Mode
::
Edge
);
void
Transport
::
asyncWriteImpl
(
Fd
fd
)
{
auto
it
=
toWrite
.
find
(
fd
);
// cleanup will have been handled by handlePeerDisconnection
if
(
it
==
std
::
end
(
toWrite
))
{
return
;
}
auto
&
wq
=
it
->
second
;
while
(
wq
.
size
()
>
0
)
{
auto
&
entry
=
wq
.
front
();
int
flags
=
entry
.
flags
;
const
BufferHolder
&
buffer
=
entry
.
buffer
;
Async
::
Deferred
<
ssize_t
>
deferred
=
std
::
move
(
entry
.
deferred
);
auto
cleanUp
=
[
&
]()
{
if
(
buffer
.
isRaw
())
{
auto
raw
=
buffer
.
raw
();
if
(
raw
.
isOwned
)
delete
[]
raw
.
data
;
}
else
{
cleanUp
();
deferred
.
reject
(
Pistache
::
Error
::
system
(
"Could not write data"
));
wq
.
pop_front
();
if
(
wq
.
size
()
==
0
)
{
toWrite
.
erase
(
fd
);
reactor
()
->
modifyFd
(
key
(),
fd
,
NotifyOn
::
Read
,
Polling
::
Mode
::
Edge
);
}
break
;
}
else
{
totalWritten
+=
bytesWritten
;
if
(
totalWritten
>=
buffer
.
size
())
{
cleanUp
();
if
(
buffer
.
isFile
())
{
// done with the file buffer, nothing else knows whether to
// close it with the way the code is written.
::
close
(
buffer
.
fd
());
};
bool
halt
=
false
;
size_t
totalWritten
=
buffer
.
offset
();
for
(;;)
{
ssize_t
bytesWritten
=
0
;
auto
len
=
buffer
.
size
()
-
totalWritten
;
if
(
buffer
.
isRaw
())
{
auto
raw
=
buffer
.
raw
();
auto
ptr
=
raw
.
data
+
totalWritten
;
bytesWritten
=
::
send
(
fd
,
ptr
,
len
,
flags
|
MSG_NOSIGNAL
);
}
else
{
auto
file
=
buffer
.
fd
();
off_t
offset
=
totalWritten
;
bytesWritten
=
::
sendfile
(
fd
,
file
,
&
offset
,
len
);
}
if
(
bytesWritten
<
0
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
wq
.
pop_front
();
wq
.
push_front
(
WriteEntry
(
std
::
move
(
deferred
),
buffer
.
detach
(
totalWritten
),
flags
));
reactor
()
->
modifyFd
(
key
(),
fd
,
NotifyOn
::
Read
|
NotifyOn
::
Write
,
Polling
::
Mode
::
Edge
);
}
else
{
cleanUp
();
deferred
.
reject
(
Pistache
::
Error
::
system
(
"Could not write data"
));
halt
=
true
;
}
// Cast to match the type of defered template
// to avoid a BadType exception
deferred
.
resolve
(
static_cast
<
ssize_t
>
(
totalWritten
));
break
;
}
else
{
totalWritten
+=
bytesWritten
;
if
(
totalWritten
>=
buffer
.
size
())
{
cleanUp
();
if
(
buffer
.
isFile
())
{
// done with the file buffer, nothing else knows whether to
// close it with the way the code is written.
::
close
(
buffer
.
fd
());
}
// Cast to match the type of defered template
// to avoid a BadType exception
deferred
.
resolve
(
static_cast
<
ssize_t
>
(
totalWritten
));
break
;
}
}
}
if
(
halt
)
break
;
}
}
...
...
@@ -299,7 +315,13 @@ Transport::handleWriteQueue() {
if
(
!
entry
)
break
;
auto
&
write
=
entry
->
data
();
asyncWriteImpl
(
write
.
peerFd
,
write
);
auto
fd
=
write
.
peerFd
;
// Sometimes writes can be enqueued after a client has already disconnected.
// In that case, clear the queue
auto
it
=
toWrite
.
find
(
fd
);
if
(
it
==
std
::
end
(
toWrite
))
{
continue
;
}
it
->
second
.
push_back
(
std
::
move
(
write
));
reactor
()
->
modifyFd
(
key
(),
fd
,
NotifyOn
::
Read
|
NotifyOn
::
Write
,
Polling
::
Mode
::
Edge
);
}
}
...
...
tests/CMakeLists.txt
View file @
c5fd213e
...
...
@@ -3,7 +3,7 @@ function(pistache_test test_name)
set
(
TEST_SOURCE
${
test_name
}
.cc
)
add_executable
(
${
TEST_EXECUTABLE
}
${
TEST_SOURCE
}
)
target_link_libraries
(
${
TEST_EXECUTABLE
}
gtest gtest_main pistache
)
target_link_libraries
(
${
TEST_EXECUTABLE
}
gtest gtest_main pistache
curl
)
add_test
(
${
test_name
}
${
TEST_EXECUTABLE
}
)
endfunction
()
...
...
@@ -20,5 +20,6 @@ pistache_test(http_client_test)
pistache_test
(
net_test
)
pistache_test
(
listener_test
)
pistache_test
(
payload_test
)
pistache_test
(
streaming_test
)
pistache_test
(
rest_server_test
)
pistache_test
(
string_view_test
)
\ No newline at end of file
tests/streaming_test.cc
0 → 100644
View file @
c5fd213e
#include "gtest/gtest.h"
#include <pistache/http.h>
#include <pistache/description.h>
#include <pistache/client.h>
#include <pistache/endpoint.h>
#include <curl/curl.h>
#include <curl/easy.h>
using
namespace
std
;
using
namespace
Pistache
;
static
const
size_t
N_LETTERS
=
26
;
static
const
size_t
LETTER_REPEATS
=
100000
;
static
const
size_t
SET_REPEATS
=
10
;
static
const
uint16_t
PORT
=
9082
;
void
dumpData
(
const
Rest
::
Request
&
req
,
Http
::
ResponseWriter
response
)
{
UNUSED
(
req
);
auto
stream
=
response
.
stream
(
Http
::
Code
::
Ok
);
char
letter
=
'A'
;
std
::
mutex
responseGuard
;
std
::
vector
<
std
::
thread
>
workers
;
for
(
size_t
s
=
0
;
s
<
SET_REPEATS
;
++
s
)
{
for
(
size_t
i
=
0
;
i
<
N_LETTERS
;
++
i
)
{
std
::
thread
job
([
&
,
i
]()
->
void
{
const
size_t
nchunks
=
10
;
size_t
chunk_size
=
LETTER_REPEATS
/
nchunks
;
std
::
string
payload
(
chunk_size
,
letter
+
i
);
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
responseGuard
);
for
(
size_t
chunk
=
0
;
chunk
<
nchunks
;
++
chunk
)
{
stream
.
write
(
payload
.
c_str
(),
chunk_size
);
stream
.
flush
();
}
}
});
workers
.
push_back
(
std
::
move
(
job
));
}
}
for
(
auto
&
w
:
workers
)
{
w
.
join
();
}
stream
.
ends
();
}
TEST
(
stream
,
from_description
)
{
Address
addr
(
Ipv4
::
any
(),
PORT
);
const
size_t
threads
=
20
;
std
::
shared_ptr
<
Http
::
Endpoint
>
endpoint
;
Rest
::
Description
desc
(
"Rest Description Test"
,
"v1"
);
Rest
::
Router
router
;
desc
.
route
(
desc
.
get
(
"/"
))
.
bind
(
&
dumpData
)
.
response
(
Http
::
Code
::
Ok
,
"Response to the /ready call"
);
router
.
initFromDescription
(
desc
);
auto
flags
=
Tcp
::
Options
::
InstallSignalHandler
|
Tcp
::
Options
::
ReuseAddr
;
auto
opts
=
Http
::
Endpoint
::
options
()
.
threads
(
threads
)
.
flags
(
flags
)
.
maxPayload
(
1024
*
1024
)
;
endpoint
=
std
::
make_shared
<
Pistache
::
Http
::
Endpoint
>
(
addr
);
endpoint
->
init
(
opts
);
endpoint
->
setHandler
(
router
.
handler
());
endpoint
->
serveThreaded
();
std
::
stringstream
ss
;
// from https://stackoverflow.com/questions/6624667/can-i-use-libcurls-curlopt-writefunction-with-a-c11-lambda-expression#14720398
typedef
size_t
(
*
CURL_WRITEFUNCTION_PTR
)(
void
*
,
size_t
,
size_t
,
void
*
);
auto
curl_callback
=
[](
void
*
ptr
,
size_t
size
,
size_t
nmemb
,
void
*
stream
)
->
size_t
{
auto
ss
=
static_cast
<
std
::
stringstream
*>
(
stream
);
ss
->
write
(
static_cast
<
char
*>
(
ptr
),
size
*
nmemb
);
return
size
*
nmemb
;
};
std
::
string
url
=
"http://localhost:"
+
std
::
to_string
(
PORT
)
+
"/"
;
CURLcode
res
=
CURLE_FAILED_INIT
;
CURL
*
curl
=
curl_easy_init
();
if
(
curl
)
{
curl_easy_setopt
(
curl
,
CURLOPT_URL
,
url
.
c_str
());
curl_easy_setopt
(
curl
,
CURLOPT_WRITEFUNCTION
,
static_cast
<
CURL_WRITEFUNCTION_PTR
>
(
curl_callback
));
curl_easy_setopt
(
curl
,
CURLOPT_WRITEDATA
,
&
ss
);
res
=
curl_easy_perform
(
curl
);
curl_easy_cleanup
(
curl
);
}
endpoint
->
shutdown
();
if
(
res
!=
CURLE_OK
)
std
::
cerr
<<
curl_easy_strerror
(
res
)
<<
std
::
endl
;
ASSERT_EQ
(
res
,
CURLE_OK
);
ASSERT_EQ
(
ss
.
str
().
size
(),
SET_REPEATS
*
LETTER_REPEATS
*
N_LETTERS
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment