Commit e52871cb authored by Mathieu Stefani's avatar Mathieu Stefani

Now providing a much more convenient API for rusage

parent 145f472c
......@@ -32,15 +32,11 @@ class MyHandler : public Net::Http::Handler {
.add<Header::Server>("lys")
.add<Header::ContentType>(MIME(Text, Plain));
#if 0
auto stream = response.stream(Net::Http::Code::Ok);
stream << "PO";
stream << flush;
stream << "NG";
stream << ends;
#endif
response.send(Code::Ok, "PONG");
}
}
else if (req.resource() == "/echo") {
......@@ -63,6 +59,50 @@ class MyHandler : public Net::Http::Handler {
}
};
struct LoadMonitor {
LoadMonitor(const std::shared_ptr<Net::Http::Endpoint>& endpoint)
: endpoint_(endpoint)
, interval(std::chrono::seconds(1))
{ }
void setInterval(std::chrono::seconds secs) {
interval = secs;
}
void start() {
thread.reset(new std::thread(std::bind(&LoadMonitor::run, this)));
}
~LoadMonitor() {
thread->join();
}
private:
std::shared_ptr<Net::Http::Endpoint> endpoint_;
std::unique_ptr<std::thread> thread;
std::chrono::seconds interval;
void run() {
Net::Tcp::Listener::Load old;
while (endpoint_->isBound()) {
endpoint_->requestLoad(old).then([&](const Net::Tcp::Listener::Load& load) {
old = load;
double global = load.global;
if (global > 100) global = 100;
if (global > 1)
std::cout << "Global load is " << global << "%" << std::endl;
else
std::cout << "Global load is 0%" << std::endl;
},
Async::NoExcept);
std::this_thread::sleep_for(std::chrono::seconds(interval));
}
}
};
int main(int argc, char *argv[]) {
Net::Port port(9080);
......@@ -81,12 +121,17 @@ int main(int argc, char *argv[]) {
cout << "Cores = " << hardware_concurrency() << endl;
cout << "Using " << thr << " threads" << endl;
Net::Http::Endpoint server(addr);
auto server = std::make_shared<Net::Http::Endpoint>(addr);
auto opts = Net::Http::Endpoint::options()
.threads(thr)
.flags(Net::Tcp::Options::InstallSignalHandler);
server.init(opts);
server.setHandler(std::make_shared<MyHandler>());
server->init(opts);
server->setHandler(std::make_shared<MyHandler>());
LoadMonitor monitor(server);
monitor.setInterval(std::chrono::seconds(5));
monitor.start();
server.serve();
server->serve();
}
......@@ -586,6 +586,11 @@ Endpoint::serve()
}
}
Async::Promise<Tcp::Listener::Load>
Endpoint::requestLoad(const Tcp::Listener::Load& old) {
return listener.requestLoad(old);
}
Endpoint::Options
Endpoint::options() {
return Options();
......
......@@ -530,6 +530,12 @@ public:
void setHandler(const std::shared_ptr<Handler>& handler);
void serve();
bool isBound() const {
return listener.isBound();
}
Async::Promise<Tcp::Listener::Load> requestLoad(const Tcp::Listener::Load& old);
static Options options();
private:
......
......@@ -176,14 +176,14 @@ Listener::bind(const Address& address) {
io->start(handler_, options_);
}
sh = false;
loadThread.reset(new std::thread([=]() {
this->runLoadThread();
}));
return true;
}
bool
Listener::isBound() const {
return g_listen_fd != -1;
}
void
Listener::run() {
for (;;) {
......@@ -211,52 +211,59 @@ Listener::run() {
}
void
Listener::runLoadThread() {
std::vector<rusage> lastUsages;
Listener::shutdown() {
for (auto &worker: ioGroup) {
worker->shutdown();
}
}
while (!sh) {
std::vector<Async::Promise<rusage>> loads;
loads.reserve(ioGroup.size());
Async::Promise<Listener::Load>
Listener::requestLoad(const Listener::Load& old) {
std::vector<Async::Promise<rusage>> loads;
loads.reserve(ioGroup.size());
for (const auto& io: ioGroup) {
loads.push_back(io->getLoad());
}
for (const auto& io: ioGroup) {
loads.push_back(io->getLoad());
}
Async::whenAll(std::begin(loads), std::end(loads))
.then([&](const std::vector<rusage>& usages) {
auto totalElapsed = [](rusage usage) {
return (usage.ru_stime.tv_sec * 1e6 + usage.ru_stime.tv_usec)
+ (usage.ru_utime.tv_sec * 1e6 + usage.ru_utime.tv_usec);
};
return Async::whenAll(std::begin(loads), std::end(loads)).then([=](const std::vector<rusage>& usages) {
if (lastUsages.empty()) lastUsages = usages;
else {
for (size_t i = 0; i < usages.size(); ++i) {
auto last = lastUsages[i];
const auto& usage = usages[i];
Load res;
res.raw = usages;
auto now = totalElapsed(usage);
auto time = now - totalElapsed(last);
if (old.raw.empty()) {
res.global = 0.0;
for (size_t i = 0; i < ioGroup.size(); ++i) res.workers.push_back(0.0);
} else {
auto load = (time * 100.0) / 1e6;
auto totalElapsed = [](rusage usage) {
return (usage.ru_stime.tv_sec * 1e6 + usage.ru_stime.tv_usec)
+ (usage.ru_utime.tv_sec * 1e6 + usage.ru_utime.tv_usec);
};
//printf("Total load for I/O thread %lu = %.3lf%%\n", i, load);
auto now = std::chrono::system_clock::now();
std::chrono::microseconds tick = now - old.tick;
res.tick = now;
}
lastUsages = usages;
}
}, Async::NoExcept);
for (size_t i = 0; i < usages.size(); ++i) {
auto last = old.raw[i];
const auto& usage = usages[i];
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
auto nowElapsed = totalElapsed(usage);
auto timeElapsed = nowElapsed - totalElapsed(last);
void
Listener::shutdown() {
for (auto &worker: ioGroup) {
worker->shutdown();
}
sh = true;
auto loadPct = (timeElapsed * 100.0) / tick.count();
res.workers.push_back(loadPct);
res.global += loadPct;
}
res.global /= usages.size();
}
return Async::Promise<Load>::resolved(std::move(res));
}, Async::NoExcept);
}
Address
......
......@@ -26,10 +26,16 @@ void setSocketOptions(Fd fd, Flags<Options> options);
class Listener {
public:
struct Load {
typedef std::chrono::system_clock::time_point TimePoint;
double global;
std::vector<double> workers;
std::vector<rusage> raw;
TimePoint tick;
};
Listener();
~Listener() {
loadThread->join();
}
Listener(const Address& address);
void init(
......@@ -40,9 +46,13 @@ public:
bool bind();
bool bind(const Address& adress);
bool isBound() const;
void run();
void shutdown();
Async::Promise<Load> requestLoad(const Load& old);
Options options() const;
Address address() const;
......@@ -56,10 +66,6 @@ private:
Flags<Options> options_;
std::shared_ptr<Handler> handler_;
std::atomic<bool> sh;
std::unique_ptr<std::thread> loadThread;
void dispatchPeer(const std::shared_ptr<Peer>& peer);
void runLoadThread();
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment