Commit 047e9e39 authored by Christopher Dykes's avatar Christopher Dykes Committed by Facebook Github Bot

Begin making folly compile cleanly with a few of MSVC's sign mismatch warnings enabled

Summary:
This makes the changes required to allow folly to compile cleanly with the sign/unsigned mismatch warnings 4388 and 4389, as well as with warnings 4804 and 4805, which are about comparisons between scalars and bool.
Most of the changes in this are to `DCHECK_*` calls which are translated to a templated call which does the comparison internally based on the inferred type of the argument, which for a literal `0` is `int`, causing the warnings to get generated when the comparison is done.

Reviewed By: yfeldblum

Differential Revision: D4253427

fbshipit-source-id: cd17973a78e948a62c886a2959f9abf40a69f9f5
parent 853adb4f
...@@ -1175,6 +1175,19 @@ parseTo(StringPiece src, Tgt& out) { ...@@ -1175,6 +1175,19 @@ parseTo(StringPiece src, Tgt& out) {
namespace detail { namespace detail {
/**
* Bool to integral doesn't need any special checks, and this
* overload means we aren't trying to see if a bool is less than
* an integer.
*/
template <class Tgt>
typename std::enable_if<
!std::is_same<Tgt, bool>::value && std::is_integral<Tgt>::value,
Expected<Tgt, ConversionCode>>::type
convertTo(const bool& value) noexcept {
return static_cast<Tgt>(value ? 1 : 0);
}
/** /**
* Checked conversion from integral to integral. The checks are only * Checked conversion from integral to integral. The checks are only
* performed when meaningful, e.g. conversion from int to long goes * performed when meaningful, e.g. conversion from int to long goes
......
...@@ -309,7 +309,7 @@ MemoryMapping::~MemoryMapping() { ...@@ -309,7 +309,7 @@ MemoryMapping::~MemoryMapping() {
void MemoryMapping::advise(int advice) const { advise(advice, 0, mapLength_); } void MemoryMapping::advise(int advice) const { advise(advice, 0, mapLength_); }
void MemoryMapping::advise(int advice, size_t offset, size_t length) const { void MemoryMapping::advise(int advice, size_t offset, size_t length) const {
CHECK_LE(offset + length, mapLength_) CHECK_LE(offset + length, size_t(mapLength_))
<< " offset: " << offset << " offset: " << offset
<< " length: " << length << " length: " << length
<< " mapLength_: " << mapLength_; << " mapLength_: " << mapLength_;
......
...@@ -540,7 +540,7 @@ size_t hexDumpLine(const void* ptr, size_t offset, size_t size, ...@@ -540,7 +540,7 @@ size_t hexDumpLine(const void* ptr, size_t offset, size_t size,
} }
line.append(16 - n, ' '); line.append(16 - n, ' ');
line.push_back('|'); line.push_back('|');
DCHECK_EQ(line.size(), 78); DCHECK_EQ(line.size(), 78u);
return n; return n;
} }
......
...@@ -423,11 +423,13 @@ struct is_negative_impl<T, false> { ...@@ -423,11 +423,13 @@ struct is_negative_impl<T, false> {
// inside what are really static ifs (not executed because of the templated // inside what are really static ifs (not executed because of the templated
// types) that violate -Wsign-compare and/or -Wbool-compare so suppress them // types) that violate -Wsign-compare and/or -Wbool-compare so suppress them
// in order to not prevent all calling code from using it. // in order to not prevent all calling code from using it.
#pragma GCC diagnostic push FOLLY_PUSH_WARNING
#pragma GCC diagnostic ignored "-Wsign-compare" FOLLY_GCC_DISABLE_WARNING(sign-compare)
#if __GNUC_PREREQ(5, 0) #if __GNUC_PREREQ(5, 0)
#pragma GCC diagnostic ignored "-Wbool-compare" FOLLY_GCC_DISABLE_WARNING(bool-compare)
#endif #endif
FOLLY_MSVC_DISABLE_WARNING(4388) // sign-compare
FOLLY_MSVC_DISABLE_WARNING(4804) // bool-compare
template <typename RHS, RHS rhs, typename LHS> template <typename RHS, RHS rhs, typename LHS>
bool less_than_impl(LHS const lhs) { bool less_than_impl(LHS const lhs) {
...@@ -445,7 +447,7 @@ bool greater_than_impl(LHS const lhs) { ...@@ -445,7 +447,7 @@ bool greater_than_impl(LHS const lhs) {
lhs > rhs; lhs > rhs;
} }
#pragma GCC diagnostic pop FOLLY_POP_WARNING
} // namespace detail { } // namespace detail {
......
...@@ -88,9 +88,9 @@ static size_t qfind_first_byte_of_needles16(const StringPieceLite haystack, ...@@ -88,9 +88,9 @@ static size_t qfind_first_byte_of_needles16(const StringPieceLite haystack,
// helper method for case where needles.size() <= 16 // helper method for case where needles.size() <= 16
size_t qfind_first_byte_of_needles16(const StringPieceLite haystack, size_t qfind_first_byte_of_needles16(const StringPieceLite haystack,
const StringPieceLite needles) { const StringPieceLite needles) {
DCHECK_GT(haystack.size(), 0); DCHECK_GT(haystack.size(), 0u);
DCHECK_GT(needles.size(), 0); DCHECK_GT(needles.size(), 0u);
DCHECK_LE(needles.size(), 16); DCHECK_LE(needles.size(), 16u);
if ((needles.size() <= 2 && haystack.size() >= 256) || if ((needles.size() <= 2 && haystack.size() >= 256) ||
// must bail if we can't even SSE-load a single segment of haystack // must bail if we can't even SSE-load a single segment of haystack
(haystack.size() < 16 && (haystack.size() < 16 &&
...@@ -142,7 +142,7 @@ template <bool HAYSTACK_ALIGNED> ...@@ -142,7 +142,7 @@ template <bool HAYSTACK_ALIGNED>
size_t scanHaystackBlock(const StringPieceLite haystack, size_t scanHaystackBlock(const StringPieceLite haystack,
const StringPieceLite needles, const StringPieceLite needles,
uint64_t blockStartIdx) { uint64_t blockStartIdx) {
DCHECK_GT(needles.size(), 16); // should handled by *needles16() method DCHECK_GT(needles.size(), 16u); // should handled by *needles16() method
DCHECK(blockStartIdx + 16 <= haystack.size() || DCHECK(blockStartIdx + 16 <= haystack.size() ||
(page_for(haystack.data() + blockStartIdx) == (page_for(haystack.data() + blockStartIdx) ==
page_for(haystack.data() + blockStartIdx + 15))); page_for(haystack.data() + blockStartIdx + 15)));
......
...@@ -34,7 +34,7 @@ void StaticMetaBase::onThreadExit(void* ptr) { ...@@ -34,7 +34,7 @@ void StaticMetaBase::onThreadExit(void* ptr) {
#else #else
std::unique_ptr<ThreadEntry> threadEntry(static_cast<ThreadEntry*>(ptr)); std::unique_ptr<ThreadEntry> threadEntry(static_cast<ThreadEntry*>(ptr));
#endif #endif
DCHECK_GT(threadEntry->elementsCapacity, 0); DCHECK_GT(threadEntry->elementsCapacity, 0u);
auto& meta = *threadEntry->meta; auto& meta = *threadEntry->meta;
// Make sure this ThreadEntry is available if ThreadLocal A is accessed in // Make sure this ThreadEntry is available if ThreadLocal A is accessed in
......
...@@ -108,7 +108,7 @@ void DynamicParser::ParserStack::Pop::operator()() noexcept { ...@@ -108,7 +108,7 @@ void DynamicParser::ParserStack::Pop::operator()() noexcept {
stackPtr_->value_ = value_; stackPtr_->value_ = value_;
if (stackPtr_->unmaterializedSubErrorKeys_.empty()) { if (stackPtr_->unmaterializedSubErrorKeys_.empty()) {
// There should be the current error, and the root. // There should be the current error, and the root.
CHECK_GE(stackPtr_->subErrors_.size(), 2) CHECK_GE(stackPtr_->subErrors_.size(), 2u)
<< "Internal bug: out of suberrors"; << "Internal bug: out of suberrors";
stackPtr_->subErrors_.pop_back(); stackPtr_->subErrors_.pop_back();
} else { } else {
......
...@@ -44,11 +44,11 @@ struct Default { ...@@ -44,11 +44,11 @@ struct Default {
return __builtin_popcountll(value); return __builtin_popcountll(value);
} }
static FOLLY_ALWAYS_INLINE int ctz(uint64_t value) { static FOLLY_ALWAYS_INLINE int ctz(uint64_t value) {
DCHECK_GT(value, 0); DCHECK_GT(value, 0u);
return __builtin_ctzll(value); return __builtin_ctzll(value);
} }
static FOLLY_ALWAYS_INLINE int clz(uint64_t value) { static FOLLY_ALWAYS_INLINE int clz(uint64_t value) {
DCHECK_GT(value, 0); DCHECK_GT(value, 0u);
return __builtin_clzll(value); return __builtin_clzll(value);
} }
static FOLLY_ALWAYS_INLINE uint64_t blsr(uint64_t value) { static FOLLY_ALWAYS_INLINE uint64_t blsr(uint64_t value) {
......
...@@ -211,15 +211,15 @@ std::unique_ptr<folly::IOBuf> toBserIOBuf(folly::dynamic const& dyn, ...@@ -211,15 +211,15 @@ std::unique_ptr<folly::IOBuf> toBserIOBuf(folly::dynamic const& dyn,
auto magicptr = hdrbuf + sizeof(kMagic); auto magicptr = hdrbuf + sizeof(kMagic);
auto lenptr = hdrbuf + hdrlen; auto lenptr = hdrbuf + hdrlen;
if (len > std::numeric_limits<int32_t>::max()) { if (len > uint64_t(std::numeric_limits<int32_t>::max())) {
*magicptr = (int8_t)BserType::Int64; *magicptr = (int8_t)BserType::Int64;
*(int64_t*)lenptr = (int64_t)len; *(int64_t*)lenptr = (int64_t)len;
hdrlen += sizeof(int64_t); hdrlen += sizeof(int64_t);
} else if (len > std::numeric_limits<int16_t>::max()) { } else if (len > uint64_t(std::numeric_limits<int16_t>::max())) {
*magicptr = (int8_t)BserType::Int32; *magicptr = (int8_t)BserType::Int32;
*(int32_t*)lenptr = (int32_t)len; *(int32_t*)lenptr = (int32_t)len;
hdrlen += sizeof(int32_t); hdrlen += sizeof(int32_t);
} else if (len > std::numeric_limits<int8_t>::max()) { } else if (len > uint64_t(std::numeric_limits<int8_t>::max())) {
*magicptr = (int8_t)BserType::Int16; *magicptr = (int8_t)BserType::Int16;
*(int16_t*)lenptr = (int16_t)len; *(int16_t*)lenptr = (int16_t)len;
hdrlen += sizeof(int16_t); hdrlen += sizeof(int16_t);
......
...@@ -40,7 +40,7 @@ class GraphCycleDetector { ...@@ -40,7 +40,7 @@ class GraphCycleDetector {
} }
auto& nodes = edges_[from]; auto& nodes = edges_[from];
DCHECK_EQ(0, nodes.count(to)); DCHECK_EQ(nodes.count(to), 0u);
nodes.insert(to); nodes.insert(to);
return true; return true;
......
...@@ -38,8 +38,8 @@ std::thread::id localThreadId() { ...@@ -38,8 +38,8 @@ std::thread::id localThreadId() {
/* Size of the region from p + nBytes down to the last non-magic value */ /* Size of the region from p + nBytes down to the last non-magic value */
static size_t nonMagicInBytes(unsigned char* stackLimit, size_t stackSize) { static size_t nonMagicInBytes(unsigned char* stackLimit, size_t stackSize) {
CHECK_EQ(0, reinterpret_cast<intptr_t>(stackLimit) % sizeof(uint64_t)); CHECK_EQ(reinterpret_cast<intptr_t>(stackLimit) % sizeof(uint64_t), 0u);
CHECK_EQ(0, stackSize % sizeof(uint64_t)); CHECK_EQ(stackSize % sizeof(uint64_t), 0u);
uint64_t* begin = reinterpret_cast<uint64_t*>(stackLimit); uint64_t* begin = reinterpret_cast<uint64_t*>(stackLimit);
uint64_t* end = reinterpret_cast<uint64_t*>(stackLimit + stackSize); uint64_t* end = reinterpret_cast<uint64_t*>(stackLimit + stackSize);
...@@ -82,8 +82,8 @@ void Fiber::init(bool recordStackUsed) { ...@@ -82,8 +82,8 @@ void Fiber::init(bool recordStackUsed) {
recordStackUsed_ = recordStackUsed; recordStackUsed_ = recordStackUsed;
if (UNLIKELY(recordStackUsed_ && !stackFilledWithMagic_)) { if (UNLIKELY(recordStackUsed_ && !stackFilledWithMagic_)) {
CHECK_EQ( CHECK_EQ(
0, reinterpret_cast<intptr_t>(fiberStackLimit_) % sizeof(uint64_t)); reinterpret_cast<intptr_t>(fiberStackLimit_) % sizeof(uint64_t), 0u);
CHECK_EQ(0, fiberStackSize_ % sizeof(uint64_t)); CHECK_EQ(fiberStackSize_ % sizeof(uint64_t), 0u);
std::fill( std::fill(
reinterpret_cast<uint64_t*>(fiberStackLimit_), reinterpret_cast<uint64_t*>(fiberStackLimit_),
reinterpret_cast<uint64_t*>(fiberStackLimit_ + fiberStackSize_), reinterpret_cast<uint64_t*>(fiberStackLimit_ + fiberStackSize_),
......
...@@ -76,7 +76,7 @@ class GlobalCache { ...@@ -76,7 +76,7 @@ class GlobalCache {
std::unique_ptr<FiberManager> eraseImpl(EventBaseT& evb) { std::unique_ptr<FiberManager> eraseImpl(EventBaseT& evb) {
std::lock_guard<std::mutex> lg(mutex_); std::lock_guard<std::mutex> lg(mutex_);
DCHECK_EQ(1, map_.count(&evb)); DCHECK_EQ(map_.count(&evb), 1u);
auto ret = std::move(map_[&evb]); auto ret = std::move(map_[&evb]);
map_.erase(&evb); map_.erase(&evb);
......
...@@ -25,7 +25,7 @@ Barrier::Barrier(uint32_t n) ...@@ -25,7 +25,7 @@ Barrier::Barrier(uint32_t n)
Barrier::~Barrier() { Barrier::~Barrier() {
auto block = controlBlock_.load(std::memory_order_relaxed); auto block = controlBlock_.load(std::memory_order_relaxed);
auto prev = block->valueAndReaderCount.load(std::memory_order_relaxed); auto prev = block->valueAndReaderCount.load(std::memory_order_relaxed);
DCHECK_EQ(prev >> kReaderShift, 0); DCHECK_EQ(prev >> kReaderShift, 0u);
auto val = prev & kValueMask; auto val = prev & kValueMask;
auto p = promises(block); auto p = promises(block);
......
...@@ -995,7 +995,7 @@ bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const { ...@@ -995,7 +995,7 @@ bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const {
return false; return false;
} }
size_t n = std::min(ba.size(), bb.size()); size_t n = std::min(ba.size(), bb.size());
DCHECK_GT(n, 0); DCHECK_GT(n, 0u);
if (memcmp(ba.data(), bb.data(), n)) { if (memcmp(ba.data(), bb.data(), n)) {
return false; return false;
} }
......
...@@ -1331,8 +1331,8 @@ class IOBuf { ...@@ -1331,8 +1331,8 @@ class IOBuf {
static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags, static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags,
SharedInfo* info) { SharedInfo* info) {
uintptr_t uinfo = reinterpret_cast<uintptr_t>(info); uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
DCHECK_EQ(flags & ~kFlagMask, 0); DCHECK_EQ(flags & ~kFlagMask, 0u);
DCHECK_EQ(uinfo & kFlagMask, 0); DCHECK_EQ(uinfo & kFlagMask, 0u);
return flags | uinfo; return flags | uinfo;
} }
...@@ -1342,7 +1342,7 @@ class IOBuf { ...@@ -1342,7 +1342,7 @@ class IOBuf {
inline void setSharedInfo(SharedInfo* info) { inline void setSharedInfo(SharedInfo* info) {
uintptr_t uinfo = reinterpret_cast<uintptr_t>(info); uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
DCHECK_EQ(uinfo & kFlagMask, 0); DCHECK_EQ(uinfo & kFlagMask, 0u);
flagsAndSharedInfo_ = (flagsAndSharedInfo_ & kFlagMask) | uinfo; flagsAndSharedInfo_ = (flagsAndSharedInfo_ & kFlagMask) | uinfo;
} }
...@@ -1352,12 +1352,12 @@ class IOBuf { ...@@ -1352,12 +1352,12 @@ class IOBuf {
// flags_ are changed from const methods // flags_ are changed from const methods
inline void setFlags(uintptr_t flags) const { inline void setFlags(uintptr_t flags) const {
DCHECK_EQ(flags & ~kFlagMask, 0); DCHECK_EQ(flags & ~kFlagMask, 0u);
flagsAndSharedInfo_ |= flags; flagsAndSharedInfo_ |= flags;
} }
inline void clearFlags(uintptr_t flags) const { inline void clearFlags(uintptr_t flags) const {
DCHECK_EQ(flags & ~kFlagMask, 0); DCHECK_EQ(flags & ~kFlagMask, 0u);
flagsAndSharedInfo_ &= ~flags; flagsAndSharedInfo_ &= ~flags;
} }
......
...@@ -337,7 +337,7 @@ ssize_t sendmsg(int s, const struct msghdr* message, int fl) { ...@@ -337,7 +337,7 @@ ssize_t sendmsg(int s, const struct msghdr* message, int fl) {
(int)message->msg_iov[i].iov_len, (int)message->msg_iov[i].iov_len,
message->msg_flags); message->msg_flags);
} }
if (r == -1 || r != message->msg_iov[i].iov_len) { if (r == -1 || size_t(r) != message->msg_iov[i].iov_len) {
errno = translate_wsa_error(WSAGetLastError()); errno = translate_wsa_error(WSAGetLastError());
if (WSAGetLastError() == WSAEWOULDBLOCK && bytesSent > 0) { if (WSAGetLastError() == WSAEWOULDBLOCK && bytesSent > 0) {
return bytesSent; return bytesSent;
......
...@@ -95,7 +95,7 @@ static ssize_t doVecOperation(int fd, const iovec* iov, int count) { ...@@ -95,7 +95,7 @@ static ssize_t doVecOperation(int fd, const iovec* iov, int count) {
return -1; return -1;
} }
if (res == curLen) { if (size_t(res) == curLen) {
curIov++; curIov++;
if (curIov < count) { if (curIov < count) {
curBase = iov[curIov].iov_base; curBase = iov[curIov].iov_base;
......
...@@ -27,7 +27,7 @@ MultiLevelTimeSeries<VT, CT>::MultiLevelTimeSeries( ...@@ -27,7 +27,7 @@ MultiLevelTimeSeries<VT, CT>::MultiLevelTimeSeries(
size_t nLevels, size_t nLevels,
const Duration levelDurations[]) const Duration levelDurations[])
: cachedTime_(), cachedSum_(0), cachedCount_(0) { : cachedTime_(), cachedSum_(0), cachedCount_(0) {
CHECK_GT(nLevels, 0); CHECK_GT(nLevels, 0u);
CHECK(levelDurations); CHECK(levelDurations);
levels_.reserve(nLevels); levels_.reserve(nLevels);
...@@ -46,10 +46,10 @@ MultiLevelTimeSeries<VT, CT>::MultiLevelTimeSeries( ...@@ -46,10 +46,10 @@ MultiLevelTimeSeries<VT, CT>::MultiLevelTimeSeries(
size_t nBuckets, size_t nBuckets,
std::initializer_list<Duration> durations) std::initializer_list<Duration> durations)
: cachedTime_(), cachedSum_(0), cachedCount_(0) { : cachedTime_(), cachedSum_(0), cachedCount_(0) {
CHECK_GT(durations.size(), 0); CHECK_GT(durations.size(), 0u);
levels_.reserve(durations.size()); levels_.reserve(durations.size());
int i = 0; size_t i = 0;
Duration prev{0}; Duration prev{0};
for (auto dur : durations) { for (auto dur : durations) {
if (dur == Duration(0)) { if (dur == Duration(0)) {
......
...@@ -100,7 +100,7 @@ class MultiLevelTimeSeries { ...@@ -100,7 +100,7 @@ class MultiLevelTimeSeries {
*/ */
const Level& getLevel(int level) const { const Level& getLevel(int level) const {
CHECK(level >= 0); CHECK(level >= 0);
CHECK_LT(level, levels_.size()); CHECK_LT(size_t(level), levels_.size());
return levels_[level]; return levels_[level];
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment