Commit 471b4b72 authored by James Sedgwick's avatar James Sedgwick Committed by Anton Likhtarov

de-flake tests

Summary: make these more serialized / event based so they don't get flakey under high load

Test Plan:
ran under load - caveat: i was not able to repro the flakiness @njormrod reported
but by inspection these should be fine

Reviewed By: njormrod@fb.com

Subscribers: fugalh, njormrod

FB internal diff: D1574640

Tasks: 5225808
parent cae6c97a
...@@ -132,33 +132,24 @@ TEST(ThreadPoolExecutorTest, IOResizeUnderLoad) { ...@@ -132,33 +132,24 @@ TEST(ThreadPoolExecutorTest, IOResizeUnderLoad) {
template <class TPE> template <class TPE>
static void poolStats() { static void poolStats() {
{ folly::Baton<> startBaton, endBaton;
TPE tpe(10); TPE tpe(1);
for (int i = 0; i < 20; i++) { auto stats = tpe.getPoolStats();
tpe.add(burnMs(20)); EXPECT_EQ(1, stats.threadCount);
} EXPECT_EQ(1, stats.idleThreadCount);
burnMs(10)(); EXPECT_EQ(0, stats.activeThreadCount);
auto stats = tpe.getPoolStats(); EXPECT_EQ(0, stats.pendingTaskCount);
EXPECT_EQ(10, stats.threadCount); EXPECT_EQ(0, stats.totalTaskCount);
EXPECT_EQ(0, stats.idleThreadCount); tpe.add([&](){ startBaton.post(); endBaton.wait(); });
EXPECT_EQ(10, stats.activeThreadCount); tpe.add([&](){});
EXPECT_EQ(10, stats.pendingTaskCount); startBaton.wait();
EXPECT_EQ(20, stats.totalTaskCount); stats = tpe.getPoolStats();
} EXPECT_EQ(1, stats.threadCount);
EXPECT_EQ(0, stats.idleThreadCount);
{ EXPECT_EQ(1, stats.activeThreadCount);
TPE tpe(10); EXPECT_EQ(1, stats.pendingTaskCount);
for (int i = 0; i < 5; i++) { EXPECT_EQ(2, stats.totalTaskCount);
tpe.add(burnMs(20)); endBaton.post();
}
burnMs(10)();
auto stats = tpe.getPoolStats();
EXPECT_EQ(10, stats.threadCount);
EXPECT_EQ(5, stats.idleThreadCount);
EXPECT_EQ(5, stats.activeThreadCount);
EXPECT_EQ(0, stats.pendingTaskCount);
EXPECT_EQ(5, stats.totalTaskCount);
}
} }
TEST(ThreadPoolExecutorTest, CPUPoolStats) { TEST(ThreadPoolExecutorTest, CPUPoolStats) {
...@@ -171,27 +162,20 @@ TEST(ThreadPoolExecutorTest, IOPoolStats) { ...@@ -171,27 +162,20 @@ TEST(ThreadPoolExecutorTest, IOPoolStats) {
template <class TPE> template <class TPE>
static void taskStats() { static void taskStats() {
TPE tpe(10); TPE tpe(1);
std::atomic<int> c(0); std::atomic<int> c(0);
tpe.subscribeToTaskStats(Observer<ThreadPoolExecutor::TaskStats>::create( tpe.subscribeToTaskStats(Observer<ThreadPoolExecutor::TaskStats>::create(
[&] (ThreadPoolExecutor::TaskStats stats) { [&] (ThreadPoolExecutor::TaskStats stats) {
int i = c++; int i = c++;
if (i < 10) { EXPECT_LT(milliseconds(0), stats.runTime);
EXPECT_GE(milliseconds(10), stats.waitTime); if (i == 1) {
EXPECT_LE(milliseconds(20), stats.runTime); EXPECT_LT(milliseconds(0), stats.waitTime);
} else {
EXPECT_LE(milliseconds(10), stats.waitTime);
EXPECT_LE(milliseconds(10), stats.runTime);
} }
})); }));
for (int i = 0; i < 10; i++) { tpe.add(burnMs(10));
tpe.add(burnMs(20)); tpe.add(burnMs(10));
}
for (int i = 0; i < 10; i++) {
tpe.add(burnMs(10));
}
tpe.join(); tpe.join();
EXPECT_EQ(20, c); EXPECT_EQ(2, c);
} }
TEST(ThreadPoolExecutorTest, CPUTaskStats) { TEST(ThreadPoolExecutorTest, CPUTaskStats) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment