Commit 69433ef3 authored by Nathan Bronson's avatar Nathan Bronson Committed by Dave Watson

make futexWaitUntil tolerant of invalid time_point-s

Summary:
futexWaitUntil could generate an invalid timespec when presented
with a time_point from before the epoch, which resulted in an EINVAL from
the futex syscall.  Debug builds crashed on this unexpected return value,
while release builds soldiered on.  This path happened to be exercised
by the unit test.  This diff fixes the unintentional deadline overflow
in the test, adds explicit testing of overflow behavior, and makes
futexWaitUntil handle invalid time_points in a sensible manner.

Test Plan:
1. new unit tests
2. fbmake runtests_dbg

Reviewed By: mssarang@fb.com

Subscribers: strager, njormrod, folly-diffs@

FB internal diff: D1747972

Tasks: 5853949

Signature: t1:1747972:1419382193:862c193a13428d96acb33c85f962f59203661f40
parent ef2fa20f
...@@ -59,6 +59,10 @@ struct timespec ...@@ -59,6 +59,10 @@ struct timespec
timeSpecFromTimePoint(time_point<Clock> absTime) timeSpecFromTimePoint(time_point<Clock> absTime)
{ {
auto duration = absTime.time_since_epoch(); auto duration = absTime.time_since_epoch();
if (duration.count() < 0) {
// kernel timespec_valid requires non-negative seconds and nanos in [0,1G)
duration = Clock::duration::zero();
}
auto secs = duration_cast<seconds>(duration); auto secs = duration_cast<seconds>(duration);
auto nanos = duration_cast<nanoseconds>(duration - secs); auto nanos = duration_cast<nanoseconds>(duration - secs);
struct timespec result = { secs.count(), nanos.count() }; struct timespec result = { secs.count(), nanos.count() };
...@@ -108,8 +112,11 @@ FutexResult nativeFutexWaitImpl(void* addr, ...@@ -108,8 +112,11 @@ FutexResult nativeFutexWaitImpl(void* addr,
return FutexResult::VALUE_CHANGED; return FutexResult::VALUE_CHANGED;
default: default:
assert(false); assert(false);
// EACCESS, EFAULT, or EINVAL. All of these mean *addr point to // EINVAL, EACCESS, or EFAULT. EINVAL means there was an invalid
// invalid memory (or I misunderstand the API). We can either // op (should be impossible) or an invalid timeout (should have
// been sanitized by timeSpecFromTimePoint). EACCESS or EFAULT
// means *addr points to invalid memory, which is unlikely because
// the caller should have segfaulted already. We can either
// crash, or return a value that lets the process continue for // crash, or return a value that lets the process continue for
// a bit. We choose the latter. VALUE_CHANGED probably turns the // a bit. We choose the latter. VALUE_CHANGED probably turns the
// caller into a spin lock. // caller into a spin lock.
......
...@@ -64,9 +64,9 @@ void liveClockWaitUntilTests() { ...@@ -64,9 +64,9 @@ void liveClockWaitUntilTests() {
auto fp = &f; // workaround for t5336595 auto fp = &f; // workaround for t5336595
auto thrA = DSched::thread([fp,stress]{ auto thrA = DSched::thread([fp,stress]{
while (true) { while (true) {
auto deadline = time_point_cast<Duration>( const auto deadline = time_point_cast<Duration>(
Clock::now() + microseconds(1 << (stress % 20))); Clock::now() + microseconds(1 << (stress % 20)));
auto res = fp->futexWaitUntil(0, deadline); const auto res = fp->futexWaitUntil(0, deadline);
EXPECT_TRUE(res == FutexResult::TIMEDOUT || res == FutexResult::AWOKEN); EXPECT_TRUE(res == FutexResult::TIMEDOUT || res == FutexResult::AWOKEN);
if (res == FutexResult::AWOKEN) { if (res == FutexResult::AWOKEN) {
break; break;
...@@ -81,12 +81,26 @@ void liveClockWaitUntilTests() { ...@@ -81,12 +81,26 @@ void liveClockWaitUntilTests() {
DSched::join(thrA); DSched::join(thrA);
} }
auto start = Clock::now(); {
EXPECT_EQ(f.futexWaitUntil(0, start + milliseconds(100)), const auto start = Clock::now();
FutexResult::TIMEDOUT); const auto deadline = time_point_cast<Duration>(start + milliseconds(100));
LOG(INFO) << "Futex wait timed out after waiting for " EXPECT_EQ(f.futexWaitUntil(0, deadline), FutexResult::TIMEDOUT);
<< duration_cast<milliseconds>(Clock::now() - start).count() LOG(INFO) << "Futex wait timed out after waiting for "
<< "ms, should be ~100ms"; << duration_cast<milliseconds>(Clock::now() - start).count()
<< "ms using clock with " << Duration::period::den
<< " precision, should be ~100ms";
}
{
const auto start = Clock::now();
const auto deadline = time_point_cast<Duration>(
start - 2 * start.time_since_epoch());
EXPECT_EQ(f.futexWaitUntil(0, deadline), FutexResult::TIMEDOUT);
LOG(INFO) << "Futex wait with invalid deadline timed out after waiting for "
<< duration_cast<milliseconds>(Clock::now() - start).count()
<< "ms using clock with " << Duration::period::den
<< " precision, should be ~0ms";
}
} }
template <typename Clock> template <typename Clock>
...@@ -95,7 +109,7 @@ void deterministicAtomicWaitUntilTests() { ...@@ -95,7 +109,7 @@ void deterministicAtomicWaitUntilTests() {
// Futex wait must eventually fail with either FutexResult::TIMEDOUT or // Futex wait must eventually fail with either FutexResult::TIMEDOUT or
// FutexResult::INTERRUPTED // FutexResult::INTERRUPTED
auto res = f.futexWaitUntil(0, Clock::now() + milliseconds(100)); const auto res = f.futexWaitUntil(0, Clock::now() + milliseconds(100));
EXPECT_TRUE(res == FutexResult::TIMEDOUT || res == FutexResult::INTERRUPTED); EXPECT_TRUE(res == FutexResult::TIMEDOUT || res == FutexResult::INTERRUPTED);
} }
...@@ -104,8 +118,8 @@ void run_wait_until_tests() { ...@@ -104,8 +118,8 @@ void run_wait_until_tests() {
liveClockWaitUntilTests<Atom, system_clock, system_clock::duration>(); liveClockWaitUntilTests<Atom, system_clock, system_clock::duration>();
liveClockWaitUntilTests<Atom, steady_clock, steady_clock::duration>(); liveClockWaitUntilTests<Atom, steady_clock, steady_clock::duration>();
typedef duration<int64_t, pico> picoseconds; typedef duration<int64_t, std::ratio<1, 10000000>> decimicroseconds;
liveClockWaitUntilTests<Atom, system_clock, picoseconds>(); liveClockWaitUntilTests<Atom, system_clock, decimicroseconds>();
} }
template <> template <>
...@@ -124,7 +138,7 @@ void run_system_clock_test() { ...@@ -124,7 +138,7 @@ void run_system_clock_test() {
struct timespec ts; struct timespec ts;
const int maxIters = 1000; const int maxIters = 1000;
int iter = 0; int iter = 0;
uint64_t delta = 10000000 /* 10 ms */; const uint64_t delta = 10000000 /* 10 ms */;
/** The following loop is only to make the test more robust in the presence of /** The following loop is only to make the test more robust in the presence of
* clock adjustments that can occur. We just run the loop maxIter times and * clock adjustments that can occur. We just run the loop maxIter times and
...@@ -156,15 +170,15 @@ void run_steady_clock_test() { ...@@ -156,15 +170,15 @@ void run_steady_clock_test() {
* for the time_points */ * for the time_points */
EXPECT_TRUE(steady_clock::is_steady); EXPECT_TRUE(steady_clock::is_steady);
uint64_t A = duration_cast<nanoseconds>(steady_clock::now() const uint64_t A = duration_cast<nanoseconds>(steady_clock::now()
.time_since_epoch()).count(); .time_since_epoch()).count();
struct timespec ts; struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
uint64_t B = ts.tv_sec * 1000000000ULL + ts.tv_nsec; const uint64_t B = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
uint64_t C = duration_cast<nanoseconds>(steady_clock::now() const uint64_t C = duration_cast<nanoseconds>(steady_clock::now()
.time_since_epoch()).count(); .time_since_epoch()).count();
EXPECT_TRUE(A <= B && B <= C); EXPECT_TRUE(A <= B && B <= C);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment