Commit 377e3f4c authored by Jim Meyering's avatar Jim Meyering Committed by Viswanath Sivakumar

folly: fix many -Wsign-compare warning/errors reported by gcc-4.9

Summary:
With the upgrade to gcc-4.9 fbcode's default is to enable
-Wsign-compare by default.  That exposes nontrivial technical debt.
This is part of that clean-up.  Some of these changes fix the problem
where there's an "int" for-loop index, yet an unsigned limit.
The fix is to use an unsigned type when the limit is also unsigned.
I usually choose size_t because of the general recommendation (when
writing portable code) to avoid size-tied types like uint32_t and
uint64_t unless you have a very good reason to require them.

Test Plan:
Run this and note there are fewer errors than before:
fbconfig --platform-all=gcc-4.9-glibc-2.20 tao/server && fbmake dbgo

Also, run this, compiling and running tests with gcc-4.8.1.
Verify that there are no more failures than without this patch:
fbmake opt -k && fbmake runtests_opt
Here's the tail of that process, (same with and without the patch):
Summary (total time 47.45s):
PASS: 1949
FAIL: 1
SKIP: 0
FATAL: 9
TIMEOUT: 0

Reviewed By: hans@fb.com

Subscribers: trunkagent, fugalh, njormrod, folly-diffs@

FB internal diff: D1766722

Tasks: 5941250

Signature: t1:1766722:1420762240:a8545648ddb4fd0b2adf0d3147a84409c0f706a8
parent 7f0c4bda
......@@ -84,7 +84,8 @@ class Arena {
size = roundUp(size);
bytesUsed_ += size;
if (LIKELY(end_ - ptr_ >= size)) {
assert(ptr_ <= end_);
if (LIKELY((size_t)(end_ - ptr_) >= size)) {
// Fast path: there's enough room in the current block
char* r = ptr_;
ptr_ += size;
......
......@@ -826,7 +826,7 @@ private:
template <class ForwardIterator>
void assign(ForwardIterator first, ForwardIterator last,
std::forward_iterator_tag) {
auto const newSize = std::distance(first, last);
const size_t newSize = std::distance(first, last);
if (newSize > capacity()) {
impl_.reset(newSize);
M_uninitialized_copy_e(first, last);
......
......@@ -358,14 +358,15 @@ class MPMCQueue : boost::noncopyable {
/// This is how many times we will spin before using FUTEX_WAIT when
/// the queue is full on enqueue, adaptively computed by occasionally
/// spinning for longer and smoothing with an exponential moving average
Atom<int> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
/// The adaptive spin cutoff when the queue is empty on dequeue
Atom<int> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
/// Alignment doesn't prevent false sharing at the end of the struct,
/// so fill out the last cache line
char padding_[detail::CacheLocality::kFalseSharingRange - sizeof(Atom<int>)];
char padding_[detail::CacheLocality::kFalseSharingRange -
sizeof(Atom<uint32_t>)];
/// We assign tickets in increasing order, but we don't want to
......@@ -602,13 +603,13 @@ struct TurnSequencer {
/// before blocking and will adjust spinCutoff based on the results,
/// otherwise it will spin for at most spinCutoff spins.
void waitForTurn(const uint32_t turn,
Atom<int>& spinCutoff,
Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff) noexcept {
int prevThresh = spinCutoff.load(std::memory_order_relaxed);
const int effectiveSpinCutoff =
uint32_t prevThresh = spinCutoff.load(std::memory_order_relaxed);
const uint32_t effectiveSpinCutoff =
updateSpinCutoff || prevThresh == 0 ? kMaxSpins : prevThresh;
int tries;
uint32_t tries;
const uint32_t sturn = turn << kTurnShift;
for (tries = 0; ; ++tries) {
uint32_t state = state_.load(std::memory_order_acquire);
......@@ -647,13 +648,14 @@ struct TurnSequencer {
if (updateSpinCutoff || prevThresh == 0) {
// if we hit kMaxSpins then spinning was pointless, so the right
// spinCutoff is kMinSpins
int target;
uint32_t target;
if (tries >= kMaxSpins) {
target = kMinSpins;
} else {
// to account for variations, we allow ourself to spin 2*N when
// we think that N is actually required in order to succeed
target = std::min(int{kMaxSpins}, std::max(int{kMinSpins}, tries * 2));
target = std::min<uint32_t>(kMaxSpins,
std::max<uint32_t>(kMinSpins, tries * 2));
}
if (prevThresh == 0) {
......@@ -759,7 +761,7 @@ struct SingleElementQueue {
typename = typename std::enable_if<
std::is_nothrow_constructible<T,Args...>::value>::type>
void enqueue(const uint32_t turn,
Atom<int>& spinCutoff,
Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff,
Args&&... args) noexcept {
sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
......@@ -775,7 +777,7 @@ struct SingleElementQueue {
boost::has_nothrow_constructor<T>::value) ||
std::is_nothrow_constructible<T,T&&>::value>::type>
void enqueue(const uint32_t turn,
Atom<int>& spinCutoff,
Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff,
T&& goner) noexcept {
if (std::is_nothrow_constructible<T,T&&>::value) {
......@@ -798,7 +800,7 @@ struct SingleElementQueue {
}
void dequeue(uint32_t turn,
Atom<int>& spinCutoff,
Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff,
T& elem) noexcept {
if (folly::IsRelocatable<T>::value) {
......
......@@ -179,14 +179,14 @@ T TimeseriesHistogram<T, TT, C>::rate(int level) const {
template <typename T, typename TT, typename C>
void TimeseriesHistogram<T, TT, C>::clear() {
for (int i = 0; i < buckets_.getNumBuckets(); i++) {
for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
buckets_.getByIndex(i).clear();
}
}
template <typename T, typename TT, typename C>
void TimeseriesHistogram<T, TT, C>::update(TimeType now) {
for (int i = 0; i < buckets_.getNumBuckets(); i++) {
for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
buckets_.getByIndex(i).update(now);
}
}
......@@ -195,7 +195,7 @@ template <typename T, typename TT, typename C>
std::string TimeseriesHistogram<T, TT, C>::getString(int level) const {
std::string result;
for (int i = 0; i < buckets_.getNumBuckets(); i++) {
for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
if (i > 0) {
toAppend(",", &result);
}
......@@ -213,7 +213,7 @@ std::string TimeseriesHistogram<T, TT, C>::getString(TimeType start,
TimeType end) const {
std::string result;
for (int i = 0; i < buckets_.getNumBuckets(); i++) {
for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
if (i > 0) {
toAppend(",", &result);
}
......
......@@ -45,7 +45,7 @@ void run_mt_sequencer_thread(
int numOps,
uint32_t init,
TurnSequencer<Atom>& seq,
Atom<int>& spinThreshold,
Atom<uint32_t>& spinThreshold,
int& prev,
int i) {
for (int op = i; op < numOps; op += numThreads) {
......@@ -59,7 +59,7 @@ void run_mt_sequencer_thread(
template <template<typename> class Atom>
void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) {
TurnSequencer<Atom> seq(init);
Atom<int> spinThreshold(0);
Atom<uint32_t> spinThreshold(0);
int prev = -1;
std::vector<std::thread> threads(numThreads);
......
......@@ -27,7 +27,7 @@ class PriorityLifoSemMPMCQueue : public BlockingQueue<T> {
explicit PriorityLifoSemMPMCQueue(uint32_t numPriorities, size_t capacity) {
CHECK(numPriorities > 0);
queues_.reserve(numPriorities);
for (int i = 0; i < numPriorities; i++) {
for (uint32_t i = 0; i < numPriorities; i++) {
queues_.push_back(MPMCQueue<T>(capacity));
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment