Commit 1c609e4e authored by Giuseppe Ottaviano's avatar Giuseppe Ottaviano Committed by Facebook Github Bot

Implement partial loadUnaligned

Summary:
Neither GCC or Clang are able to optimize a `memcpy` where
the length is known to be smaller than a word: a library call is
generated. This is a simple implementation of a subword
`loadUnaligned` to be used to read the tail of a buffer when reading
one word at a time.

Reviewed By: luciang

Differential Revision: D7631978

fbshipit-source-id: d5719580e6bef82aa8e3fbdce8f873b569527a3a
parent 1bd232cd
...@@ -671,7 +671,7 @@ void escapeString( ...@@ -671,7 +671,7 @@ void escapeString(
if (avail >= 8) { if (avail >= 8) {
word = folly::loadUnaligned<uint64_t>(firstEsc); word = folly::loadUnaligned<uint64_t>(firstEsc);
} else { } else {
memcpy(static_cast<void*>(&word), firstEsc, avail); word = folly::partialLoadUnaligned<uint64_t>(firstEsc, avail);
} }
auto prefix = firstEscapableInWord(word); auto prefix = firstEscapableInWord(word);
DCHECK_LE(prefix, avail); DCHECK_LE(prefix, avail);
......
...@@ -352,6 +352,44 @@ inline T loadUnaligned(const void* p) { ...@@ -352,6 +352,44 @@ inline T loadUnaligned(const void* p) {
} }
} }
/**
* Read l bytes into the low bits of a value of an unsigned integral
* type T, where l < sizeof(T).
*
* This is intended as a complement to loadUnaligned to read the tail
* of a buffer when it is processed one word at a time.
*/
template <class T>
inline T partialLoadUnaligned(const void* p, size_t l) {
static_assert(
std::is_integral<T>::value && std::is_unsigned<T>::value &&
sizeof(T) <= 8,
"Invalid type");
assume(l < sizeof(T));
auto cp = static_cast<const char*>(p);
T value = 0;
if (!kHasUnalignedAccess || !kIsLittleEndian) {
// Unsupported, use memcpy.
memcpy(&value, cp, l);
return value;
}
auto avail = l;
if (l & 4) {
avail -= 4;
value = static_cast<T>(loadUnaligned<uint32_t>(cp + avail)) << (avail * 8);
}
if (l & 2) {
avail -= 2;
value |= static_cast<T>(loadUnaligned<uint16_t>(cp + avail)) << (avail * 8);
}
if (l & 1) {
value |= loadUnaligned<uint8_t>(cp);
}
return value;
}
/** /**
* Write an unaligned value of type T. * Write an unaligned value of type T.
*/ */
...@@ -365,7 +403,7 @@ inline void storeUnaligned(void* p, T value) { ...@@ -365,7 +403,7 @@ inline void storeUnaligned(void* p, T value) {
// if p is a nullptr. By assuming it's not a nullptr, we get a // if p is a nullptr. By assuming it's not a nullptr, we get a
// nice loud segfault in optimized builds if p is nullptr, rather // nice loud segfault in optimized builds if p is nullptr, rather
// than just silently doing nothing. // than just silently doing nothing.
folly::assume(p != nullptr); assume(p != nullptr);
new (p) Unaligned<T>(value); new (p) Unaligned<T>(value);
} else { } else {
memcpy(p, &value, sizeof(T)); memcpy(p, &value, sizeof(T));
......
...@@ -16,6 +16,12 @@ ...@@ -16,6 +16,12 @@
// @author Tudor Bosman (tudorb@fb.com) // @author Tudor Bosman (tudorb@fb.com)
#include <algorithm>
#include <vector>
#include <folly/CppAttributes.h>
#include <folly/Random.h>
#include <folly/lang/Assume.h>
#include <folly/lang/Bits.h> #include <folly/lang/Bits.h>
#include <folly/Benchmark.h> #include <folly/Benchmark.h>
...@@ -24,7 +30,7 @@ using namespace folly; ...@@ -24,7 +30,7 @@ using namespace folly;
BENCHMARK(nextPowTwoClz, iters) { BENCHMARK(nextPowTwoClz, iters) {
for (unsigned long i = 0; i < iters; ++i) { for (unsigned long i = 0; i < iters; ++i) {
auto x = folly::nextPowTwo(iters); auto x = folly::nextPowTwo(i);
folly::doNotOptimizeAway(x); folly::doNotOptimizeAway(x);
} }
} }
...@@ -47,6 +53,96 @@ BENCHMARK(reverse, iters) { ...@@ -47,6 +53,96 @@ BENCHMARK(reverse, iters) {
} }
} }
namespace {
template <class F>
void testPartialLoadUnaligned(F f, size_t iters) {
constexpr size_t kBufSize = 32;
std::vector<char> buf;
BENCHMARK_SUSPEND {
buf.resize(kBufSize + 7); // Allow unguarded tail reads.
std::generate(
buf.begin(), buf.end(), [] { return folly::Random::rand32(255); });
}
uint64_t ret = 0;
for (size_t i = 0; i < iters; ++i) {
// Make the position depend on the previous result to break loop pipelining.
auto pos = ret % kBufSize;
ret = f(buf.data() + pos, i % 8);
folly::doNotOptimizeAway(ret);
}
}
/**
* An alternative implementation of partialLoadUnaligned that has
* comparable performance. Not worth the extra complexity and code
* size, leaving it here for future consideration in case the relative
* performance changes.
*/
uint64_t partialLoadUnalignedSwitch(const char* p, size_t l) {
folly::assume(l < 8);
uint64_t r = 0;
switch (l) {
case 7:
r = static_cast<uint64_t>(folly::loadUnaligned<uint32_t>(p + 3)) << 24;
FOLLY_FALLTHROUGH;
case 3:
r |= static_cast<uint64_t>(folly::loadUnaligned<uint16_t>(p + 1)) << 8;
FOLLY_FALLTHROUGH;
case 1:
r |= *p;
break;
case 6:
r = static_cast<uint64_t>(folly::loadUnaligned<uint16_t>(p + 4)) << 32;
FOLLY_FALLTHROUGH;
case 4:
r |= folly::loadUnaligned<uint32_t>(p);
break;
case 5:
r = static_cast<uint64_t>(folly::loadUnaligned<uint32_t>(p + 4)) << 32;
r |= *p;
break;
case 2:
r = folly::loadUnaligned<uint16_t>(p);
break;
case 0:
break;
}
return r;
}
} // namespace
BENCHMARK_DRAW_LINE();
BENCHMARK(PartialLoadUnaligned, iters) {
testPartialLoadUnaligned(folly::partialLoadUnaligned<uint64_t>, iters);
}
BENCHMARK(PartialLoadUnalignedMemcpy, iters) {
testPartialLoadUnaligned(
[](const char* p, size_t l) {
folly::assume(l < 8);
uint64_t ret;
memcpy(&ret, p, l);
return ret;
},
iters);
}
BENCHMARK(PartialLoadUnalignedSwitch, iters) {
testPartialLoadUnaligned(partialLoadUnalignedSwitch, iters);
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true); gflags::ParseCommandLineFlags(&argc, &argv, true);
folly::runBenchmarks(); folly::runBenchmarks();
...@@ -54,16 +150,19 @@ int main(int argc, char** argv) { ...@@ -54,16 +150,19 @@ int main(int argc, char** argv) {
} }
/* /*
Benchmarks run on dual Xeon X5650's @ 2.67GHz w/hyperthreading enabled Benchmarks run on Intel Xeon CPU E5-2678 v3 @ 2.50GHz with --bm_min_usec=500000
(12 physical cores, 12 MB cache, 72 GB RAM)
============================================================================ ============================================================================
folly/test/BitsBenchmark.cpp relative time/iter iters/s folly/lang/test/BitsBenchmark.cpp relative time/iter iters/s
============================================================================ ============================================================================
nextPowTwoClz 0.00fs Infinity nextPowTwoClz 0.00fs Infinity
---------------------------------------------------------------------------- ----------------------------------------------------------------------------
isPowTwo 731.61ps 1.37G isPowTwo 0.00fs Infinity
----------------------------------------------------------------------------
reverse 4.18ns 239.14M
---------------------------------------------------------------------------- ----------------------------------------------------------------------------
reverse 4.84ns 206.58M PartialLoadUnaligned 2.22ns 449.80M
PartialLoadUnalignedMemcpy 7.53ns 132.78M
PartialLoadUnalignedSwitch 2.04ns 491.30M
============================================================================ ============================================================================
*/ */
...@@ -16,10 +16,12 @@ ...@@ -16,10 +16,12 @@
// @author Tudor Bosman (tudorb@fb.com) // @author Tudor Bosman (tudorb@fb.com)
#include <folly/lang/Bits.h> #include <algorithm>
#include <random>
#include <vector>
#include <folly/Random.h> #include <folly/Random.h>
#include <random> #include <folly/lang/Bits.h>
#include <folly/portability/GTest.h> #include <folly/portability/GTest.h>
...@@ -208,3 +210,25 @@ TEST(Bits, BitReverse) { ...@@ -208,3 +210,25 @@ TEST(Bits, BitReverse) {
EXPECT_EQ(folly::bitReverse(b), reverse_simple(b) >> 32); EXPECT_EQ(folly::bitReverse(b), reverse_simple(b) >> 32);
} }
} }
TEST(Bits, PartialLoadUnaligned) {
std::vector<char> buf(128);
std::generate(
buf.begin(), buf.end(), [] { return folly::Random::rand32(255); });
for (size_t l = 0; l < 8; ++l) {
for (size_t pos = 0; pos <= buf.size() - l; ++pos) {
auto p = buf.data() + pos;
auto x = folly::partialLoadUnaligned<uint64_t>(p, l);
uint64_t expected = 0;
memcpy(&expected, p, l);
EXPECT_EQ(x, expected);
if (l < 4) {
auto x32 = folly::partialLoadUnaligned<uint32_t>(p, l);
EXPECT_EQ(x32, static_cast<uint32_t>(expected));
}
}
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment