Commit 377e3f4c authored by Jim Meyering's avatar Jim Meyering Committed by Viswanath Sivakumar

folly: fix many -Wsign-compare warning/errors reported by gcc-4.9

Summary:
With the upgrade to gcc-4.9 fbcode's default is to enable
-Wsign-compare by default.  That exposes nontrivial technical debt.
This is part of that clean-up.  Some of these changes fix the problem
where there's an "int" for-loop index, yet an unsigned limit.
The fix is to use an unsigned type when the limit is also unsigned.
I usually choose size_t because of the general recommendation (when
writing portable code) to avoid size-tied types like uint32_t and
uint64_t unless you have a very good reason to require them.

Test Plan:
Run this and note there are fewer errors than before:
fbconfig --platform-all=gcc-4.9-glibc-2.20 tao/server && fbmake dbgo

Also, run this, compiling and running tests with gcc-4.8.1.
Verify that there are no more failures than without this patch:
fbmake opt -k && fbmake runtests_opt
Here's the tail of that process, (same with and without the patch):
Summary (total time 47.45s):
PASS: 1949
FAIL: 1
SKIP: 0
FATAL: 9
TIMEOUT: 0

Reviewed By: hans@fb.com

Subscribers: trunkagent, fugalh, njormrod, folly-diffs@

FB internal diff: D1766722

Tasks: 5941250

Signature: t1:1766722:1420762240:a8545648ddb4fd0b2adf0d3147a84409c0f706a8
parent 7f0c4bda
...@@ -84,7 +84,8 @@ class Arena { ...@@ -84,7 +84,8 @@ class Arena {
size = roundUp(size); size = roundUp(size);
bytesUsed_ += size; bytesUsed_ += size;
if (LIKELY(end_ - ptr_ >= size)) { assert(ptr_ <= end_);
if (LIKELY((size_t)(end_ - ptr_) >= size)) {
// Fast path: there's enough room in the current block // Fast path: there's enough room in the current block
char* r = ptr_; char* r = ptr_;
ptr_ += size; ptr_ += size;
......
...@@ -826,7 +826,7 @@ private: ...@@ -826,7 +826,7 @@ private:
template <class ForwardIterator> template <class ForwardIterator>
void assign(ForwardIterator first, ForwardIterator last, void assign(ForwardIterator first, ForwardIterator last,
std::forward_iterator_tag) { std::forward_iterator_tag) {
auto const newSize = std::distance(first, last); const size_t newSize = std::distance(first, last);
if (newSize > capacity()) { if (newSize > capacity()) {
impl_.reset(newSize); impl_.reset(newSize);
M_uninitialized_copy_e(first, last); M_uninitialized_copy_e(first, last);
......
...@@ -358,14 +358,15 @@ class MPMCQueue : boost::noncopyable { ...@@ -358,14 +358,15 @@ class MPMCQueue : boost::noncopyable {
/// This is how many times we will spin before using FUTEX_WAIT when /// This is how many times we will spin before using FUTEX_WAIT when
/// the queue is full on enqueue, adaptively computed by occasionally /// the queue is full on enqueue, adaptively computed by occasionally
/// spinning for longer and smoothing with an exponential moving average /// spinning for longer and smoothing with an exponential moving average
Atom<int> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_; Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushSpinCutoff_;
/// The adaptive spin cutoff when the queue is empty on dequeue /// The adaptive spin cutoff when the queue is empty on dequeue
Atom<int> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_; Atom<uint32_t> FOLLY_ALIGN_TO_AVOID_FALSE_SHARING popSpinCutoff_;
/// Alignment doesn't prevent false sharing at the end of the struct, /// Alignment doesn't prevent false sharing at the end of the struct,
/// so fill out the last cache line /// so fill out the last cache line
char padding_[detail::CacheLocality::kFalseSharingRange - sizeof(Atom<int>)]; char padding_[detail::CacheLocality::kFalseSharingRange -
sizeof(Atom<uint32_t>)];
/// We assign tickets in increasing order, but we don't want to /// We assign tickets in increasing order, but we don't want to
...@@ -602,13 +603,13 @@ struct TurnSequencer { ...@@ -602,13 +603,13 @@ struct TurnSequencer {
/// before blocking and will adjust spinCutoff based on the results, /// before blocking and will adjust spinCutoff based on the results,
/// otherwise it will spin for at most spinCutoff spins. /// otherwise it will spin for at most spinCutoff spins.
void waitForTurn(const uint32_t turn, void waitForTurn(const uint32_t turn,
Atom<int>& spinCutoff, Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff) noexcept { const bool updateSpinCutoff) noexcept {
int prevThresh = spinCutoff.load(std::memory_order_relaxed); uint32_t prevThresh = spinCutoff.load(std::memory_order_relaxed);
const int effectiveSpinCutoff = const uint32_t effectiveSpinCutoff =
updateSpinCutoff || prevThresh == 0 ? kMaxSpins : prevThresh; updateSpinCutoff || prevThresh == 0 ? kMaxSpins : prevThresh;
int tries;
uint32_t tries;
const uint32_t sturn = turn << kTurnShift; const uint32_t sturn = turn << kTurnShift;
for (tries = 0; ; ++tries) { for (tries = 0; ; ++tries) {
uint32_t state = state_.load(std::memory_order_acquire); uint32_t state = state_.load(std::memory_order_acquire);
...@@ -647,13 +648,14 @@ struct TurnSequencer { ...@@ -647,13 +648,14 @@ struct TurnSequencer {
if (updateSpinCutoff || prevThresh == 0) { if (updateSpinCutoff || prevThresh == 0) {
// if we hit kMaxSpins then spinning was pointless, so the right // if we hit kMaxSpins then spinning was pointless, so the right
// spinCutoff is kMinSpins // spinCutoff is kMinSpins
int target; uint32_t target;
if (tries >= kMaxSpins) { if (tries >= kMaxSpins) {
target = kMinSpins; target = kMinSpins;
} else { } else {
// to account for variations, we allow ourself to spin 2*N when // to account for variations, we allow ourself to spin 2*N when
// we think that N is actually required in order to succeed // we think that N is actually required in order to succeed
target = std::min(int{kMaxSpins}, std::max(int{kMinSpins}, tries * 2)); target = std::min<uint32_t>(kMaxSpins,
std::max<uint32_t>(kMinSpins, tries * 2));
} }
if (prevThresh == 0) { if (prevThresh == 0) {
...@@ -759,7 +761,7 @@ struct SingleElementQueue { ...@@ -759,7 +761,7 @@ struct SingleElementQueue {
typename = typename std::enable_if< typename = typename std::enable_if<
std::is_nothrow_constructible<T,Args...>::value>::type> std::is_nothrow_constructible<T,Args...>::value>::type>
void enqueue(const uint32_t turn, void enqueue(const uint32_t turn,
Atom<int>& spinCutoff, Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff, const bool updateSpinCutoff,
Args&&... args) noexcept { Args&&... args) noexcept {
sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff); sequencer_.waitForTurn(turn * 2, spinCutoff, updateSpinCutoff);
...@@ -775,7 +777,7 @@ struct SingleElementQueue { ...@@ -775,7 +777,7 @@ struct SingleElementQueue {
boost::has_nothrow_constructor<T>::value) || boost::has_nothrow_constructor<T>::value) ||
std::is_nothrow_constructible<T,T&&>::value>::type> std::is_nothrow_constructible<T,T&&>::value>::type>
void enqueue(const uint32_t turn, void enqueue(const uint32_t turn,
Atom<int>& spinCutoff, Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff, const bool updateSpinCutoff,
T&& goner) noexcept { T&& goner) noexcept {
if (std::is_nothrow_constructible<T,T&&>::value) { if (std::is_nothrow_constructible<T,T&&>::value) {
...@@ -798,7 +800,7 @@ struct SingleElementQueue { ...@@ -798,7 +800,7 @@ struct SingleElementQueue {
} }
void dequeue(uint32_t turn, void dequeue(uint32_t turn,
Atom<int>& spinCutoff, Atom<uint32_t>& spinCutoff,
const bool updateSpinCutoff, const bool updateSpinCutoff,
T& elem) noexcept { T& elem) noexcept {
if (folly::IsRelocatable<T>::value) { if (folly::IsRelocatable<T>::value) {
......
...@@ -179,14 +179,14 @@ T TimeseriesHistogram<T, TT, C>::rate(int level) const { ...@@ -179,14 +179,14 @@ T TimeseriesHistogram<T, TT, C>::rate(int level) const {
template <typename T, typename TT, typename C> template <typename T, typename TT, typename C>
void TimeseriesHistogram<T, TT, C>::clear() { void TimeseriesHistogram<T, TT, C>::clear() {
for (int i = 0; i < buckets_.getNumBuckets(); i++) { for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
buckets_.getByIndex(i).clear(); buckets_.getByIndex(i).clear();
} }
} }
template <typename T, typename TT, typename C> template <typename T, typename TT, typename C>
void TimeseriesHistogram<T, TT, C>::update(TimeType now) { void TimeseriesHistogram<T, TT, C>::update(TimeType now) {
for (int i = 0; i < buckets_.getNumBuckets(); i++) { for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
buckets_.getByIndex(i).update(now); buckets_.getByIndex(i).update(now);
} }
} }
...@@ -195,7 +195,7 @@ template <typename T, typename TT, typename C> ...@@ -195,7 +195,7 @@ template <typename T, typename TT, typename C>
std::string TimeseriesHistogram<T, TT, C>::getString(int level) const { std::string TimeseriesHistogram<T, TT, C>::getString(int level) const {
std::string result; std::string result;
for (int i = 0; i < buckets_.getNumBuckets(); i++) { for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
if (i > 0) { if (i > 0) {
toAppend(",", &result); toAppend(",", &result);
} }
...@@ -213,7 +213,7 @@ std::string TimeseriesHistogram<T, TT, C>::getString(TimeType start, ...@@ -213,7 +213,7 @@ std::string TimeseriesHistogram<T, TT, C>::getString(TimeType start,
TimeType end) const { TimeType end) const {
std::string result; std::string result;
for (int i = 0; i < buckets_.getNumBuckets(); i++) { for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
if (i > 0) { if (i > 0) {
toAppend(",", &result); toAppend(",", &result);
} }
......
...@@ -45,7 +45,7 @@ void run_mt_sequencer_thread( ...@@ -45,7 +45,7 @@ void run_mt_sequencer_thread(
int numOps, int numOps,
uint32_t init, uint32_t init,
TurnSequencer<Atom>& seq, TurnSequencer<Atom>& seq,
Atom<int>& spinThreshold, Atom<uint32_t>& spinThreshold,
int& prev, int& prev,
int i) { int i) {
for (int op = i; op < numOps; op += numThreads) { for (int op = i; op < numOps; op += numThreads) {
...@@ -59,7 +59,7 @@ void run_mt_sequencer_thread( ...@@ -59,7 +59,7 @@ void run_mt_sequencer_thread(
template <template<typename> class Atom> template <template<typename> class Atom>
void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) { void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) {
TurnSequencer<Atom> seq(init); TurnSequencer<Atom> seq(init);
Atom<int> spinThreshold(0); Atom<uint32_t> spinThreshold(0);
int prev = -1; int prev = -1;
std::vector<std::thread> threads(numThreads); std::vector<std::thread> threads(numThreads);
......
...@@ -27,7 +27,7 @@ class PriorityLifoSemMPMCQueue : public BlockingQueue<T> { ...@@ -27,7 +27,7 @@ class PriorityLifoSemMPMCQueue : public BlockingQueue<T> {
explicit PriorityLifoSemMPMCQueue(uint32_t numPriorities, size_t capacity) { explicit PriorityLifoSemMPMCQueue(uint32_t numPriorities, size_t capacity) {
CHECK(numPriorities > 0); CHECK(numPriorities > 0);
queues_.reserve(numPriorities); queues_.reserve(numPriorities);
for (int i = 0; i < numPriorities; i++) { for (uint32_t i = 0; i < numPriorities; i++) {
queues_.push_back(MPMCQueue<T>(capacity)); queues_.push_back(MPMCQueue<T>(capacity));
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment