Commit 9f646cf0 authored by Marc Celani's avatar Marc Celani Committed by Facebook Github Bot

Significantly improve merge performance by leveraging inplace_merge

Summary:
The sorting approach used in TDigest is inefficient because it is not leveraging the fact that there are k sorted subarrays. This diff leverages inplace_merge to improve performance.

The cost of merging k digests of size m used to be O(km * log(km)). It is now O(km * log(k)).

Reviewed By: anakryiko

Differential Revision: D7690143

fbshipit-source-id: 1307db153b3cae0bb952d4b872aede8c40ce292c
parent bd1c3af4
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <emmintrin.h> #include <emmintrin.h>
#include <algorithm>
#include <cmath> #include <cmath>
namespace folly { namespace folly {
...@@ -149,15 +150,43 @@ TDigest TDigest::merge(Range<const TDigest*> digests) { ...@@ -149,15 +150,43 @@ TDigest TDigest::merge(Range<const TDigest*> digests) {
std::vector<Centroid> centroids; std::vector<Centroid> centroids;
centroids.reserve(nCentroids); centroids.reserve(nCentroids);
std::vector<std::vector<Centroid>::iterator> starts;
starts.reserve(digests.size());
double count = 0; double count = 0;
for (auto it = digests.begin(); it != digests.end(); it++) { for (auto it = digests.begin(); it != digests.end(); it++) {
starts.push_back(centroids.end());
count += it->count(); count += it->count();
for (const auto& centroid : it->centroids_) { for (const auto& centroid : it->centroids_) {
centroids.push_back(centroid); centroids.push_back(centroid);
} }
} }
std::sort(centroids.begin(), centroids.end());
for (size_t digestsPerBlock = 1; digestsPerBlock < starts.size();
digestsPerBlock *= 2) {
// Each sorted block is digestPerBlock digests big. For each step, try to
// merge two blocks together.
for (size_t i = 0; i < starts.size(); i += (digestsPerBlock * 2)) {
// It is possible that this block is incomplete (less than digestsPerBlock
// big). In that case, the rest of the block is sorted and leave it alone
if (i + digestsPerBlock < starts.size()) {
auto first = starts[i];
auto middle = starts[i + digestsPerBlock];
// It is possible that the next block is incomplete (less than
// digestsPerBlock big). In that case, merge to end. Otherwise, merge to
// the end of that block.
std::vector<Centroid>::iterator last =
(i + (digestsPerBlock * 2) < starts.size())
? *(starts.begin() + i + 2 * digestsPerBlock)
: centroids.end();
std::inplace_merge(first, middle, last);
}
}
}
DCHECK(std::is_sorted(centroids.begin(), centroids.end()));
size_t maxSize = digests.begin()->maxSize_; size_t maxSize = digests.begin()->maxSize_;
TDigest result(maxSize); TDigest result(maxSize);
......
...@@ -112,7 +112,9 @@ BENCHMARK_RELATIVE_NAMED_PARAM(merge, 1000x1, 1000, 1000) ...@@ -112,7 +112,9 @@ BENCHMARK_RELATIVE_NAMED_PARAM(merge, 1000x1, 1000, 1000)
BENCHMARK_RELATIVE_NAMED_PARAM(merge, 1000x5, 1000, 5000) BENCHMARK_RELATIVE_NAMED_PARAM(merge, 1000x5, 1000, 5000)
BENCHMARK_RELATIVE_NAMED_PARAM(merge, 1000x10, 1000, 10000) BENCHMARK_RELATIVE_NAMED_PARAM(merge, 1000x10, 1000, 10000)
BENCHMARK_DRAW_LINE(); BENCHMARK_DRAW_LINE();
BENCHMARK_NAMED_PARAM(mergeDigests, 100x60, 100, 60) BENCHMARK_NAMED_PARAM(mergeDigests, 100x10, 100, 10)
BENCHMARK_RELATIVE_NAMED_PARAM(mergeDigests, 100x30, 100, 30)
BENCHMARK_RELATIVE_NAMED_PARAM(mergeDigests, 100x60, 100, 60)
BENCHMARK_RELATIVE_NAMED_PARAM(mergeDigests, 1000x60, 1000, 60) BENCHMARK_RELATIVE_NAMED_PARAM(mergeDigests, 1000x60, 1000, 60)
BENCHMARK_DRAW_LINE(); BENCHMARK_DRAW_LINE();
BENCHMARK_NAMED_PARAM(estimateQuantile, 100x1_p001, 100, 0.001) BENCHMARK_NAMED_PARAM(estimateQuantile, 100x1_p001, 100, 0.001)
...@@ -136,31 +138,33 @@ BENCHMARK_RELATIVE_NAMED_PARAM(estimateQuantile, 1000_p999, 1000, 0.999) ...@@ -136,31 +138,33 @@ BENCHMARK_RELATIVE_NAMED_PARAM(estimateQuantile, 1000_p999, 1000, 0.999)
* ============================================================================ * ============================================================================
* folly/stats/test/TDigestBenchmark.cpp relative time/iter iters/s * folly/stats/test/TDigestBenchmark.cpp relative time/iter iters/s
* ============================================================================ * ============================================================================
* merge(100x1) 2.21us 451.95K * merge(100x1) 2.19us 455.86K
* merge(100x5) 57.80% 3.83us 261.23K * merge(100x5) 58.77% 3.73us 267.92K
* merge(100x10) 42.26% 5.24us 191.01K * merge(100x10) 42.00% 5.22us 191.48K
* merge(1000x1) 10.43% 21.21us 47.15K * merge(1000x1) 10.52% 20.86us 47.95K
* merge(1000x5) 6.54% 33.85us 29.54K * merge(1000x5) 6.54% 33.54us 29.81K
* merge(1000x10) 4.52% 48.97us 20.42K * merge(1000x10) 4.43% 49.54us 20.19K
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* mergeDigests(100x60) 278.92us 3.59K * mergeDigests(100x10) 25.29us 39.55K
* mergeDigests(1000x60) 8.98% 3.10ms 322.11 * mergeDigests(100x30) 21.71% 116.50us 8.58K
* mergeDigests(100x60) 9.22% 274.32us 3.65K
* mergeDigests(1000x60) 0.90% 2.81ms 356.45
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* estimateQuantile(100x1_p001) 8.51ns 117.49M * estimateQuantile(100x1_p001) 8.48ns 117.88M
* estimateQuantile(100_p01) 61.35% 13.87ns 72.08M * estimateQuantile(100_p01) 61.32% 13.83ns 72.29M
* estimateQuantile(100_p25) 13.71% 62.08ns 16.11M * estimateQuantile(100_p25) 11.66% 72.73ns 13.75M
* estimateQuantile(100_p50) 10.37% 82.09ns 12.18M * estimateQuantile(100_p50) 9.55% 88.79ns 11.26M
* estimateQuantile(100_p75) 13.92% 61.14ns 16.36M * estimateQuantile(100_p75) 13.88% 61.14ns 16.36M
* estimateQuantile(100_p99) 67.06% 12.69ns 78.79M * estimateQuantile(100_p99) 66.88% 12.68ns 78.83M
* estimateQuantile(100_p999) 110.81% 7.68ns 130.20M * estimateQuantile(100_p999) 110.57% 7.67ns 130.34M
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* estimateQuantile(1000_p001) 26.56% 32.05ns 31.20M * estimateQuantile(1000_p001) 26.46% 32.06ns 31.19M
* estimateQuantile(1000_p01) 7.72% 110.22ns 9.07M * estimateQuantile(1000_p01) 7.78% 108.97ns 9.18M
* estimateQuantile(1000_p25) 1.74% 488.18ns 2.05M * estimateQuantile(1000_p25) 1.74% 488.35ns 2.05M
* estimateQuantile(1000_p50) 1.24% 684.06ns 1.46M * estimateQuantile(1000_p50) 1.24% 683.10ns 1.46M
* estimateQuantile(1000_p75) 1.76% 483.38ns 2.07M * estimateQuantile(1000_p75) 1.75% 483.58ns 2.07M
* estimateQuantile(1000_p99) 7.98% 106.66ns 9.38M * estimateQuantile(1000_p99) 8.06% 105.29ns 9.50M
* estimateQuantile(1000_p999) 32.64% 26.08ns 38.35M * estimateQuantile(1000_p999) 32.98% 25.72ns 38.87M
* ============================================================================ * ============================================================================
*/ */
......
...@@ -59,14 +59,12 @@ TEST(TDigest, Merge) { ...@@ -59,14 +59,12 @@ TEST(TDigest, Merge) {
for (int i = 1; i <= 100; ++i) { for (int i = 1; i <= 100; ++i) {
values.push_back(i); values.push_back(i);
} }
digest = digest.merge(values); digest = digest.merge(values);
values.clear(); values.clear();
for (int i = 101; i <= 200; ++i) { for (int i = 101; i <= 200; ++i) {
values.push_back(i); values.push_back(i);
} }
digest = digest.merge(values); digest = digest.merge(values);
EXPECT_EQ(200, digest.count()); EXPECT_EQ(200, digest.count());
...@@ -106,7 +104,6 @@ TEST(TDigest, MergeLarge) { ...@@ -106,7 +104,6 @@ TEST(TDigest, MergeLarge) {
for (int i = 1; i <= 1000; ++i) { for (int i = 1; i <= 1000; ++i) {
values.push_back(i); values.push_back(i);
} }
digest = digest.merge(values); digest = digest.merge(values);
EXPECT_EQ(1000, digest.count()); EXPECT_EQ(1000, digest.count());
...@@ -124,12 +121,17 @@ TEST(TDigest, MergeLargeAsDigests) { ...@@ -124,12 +121,17 @@ TEST(TDigest, MergeLargeAsDigests) {
std::vector<TDigest> digests; std::vector<TDigest> digests;
TDigest digest(100); TDigest digest(100);
std::vector<double> values;
for (int i = 1; i <= 1000; ++i) {
values.push_back(i);
}
// Ensure that the values do not monotonically increase across digests.
std::random_shuffle(values.begin(), values.end());
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
std::vector<double> values; std::vector<double> sorted(
for (int j = 1; j <= 100; ++j) { values.begin() + (i * 100), values.begin() + (i + 1) * 100);
values.push_back(100 * i + j); std::sort(sorted.begin(), sorted.end());
} digests.push_back(digest.merge(sorted));
digests.push_back(digest.merge(values));
} }
digest = TDigest::merge(digests); digest = TDigest::merge(digests);
...@@ -140,21 +142,32 @@ TEST(TDigest, MergeLargeAsDigests) { ...@@ -140,21 +142,32 @@ TEST(TDigest, MergeLargeAsDigests) {
EXPECT_EQ(1.5, digest.estimateQuantile(0.001)); EXPECT_EQ(1.5, digest.estimateQuantile(0.001));
EXPECT_EQ(10.5, digest.estimateQuantile(0.01)); EXPECT_EQ(10.5, digest.estimateQuantile(0.01));
EXPECT_EQ(500.25, digest.estimateQuantile(0.5));
EXPECT_EQ(990.25, digest.estimateQuantile(0.99)); EXPECT_EQ(990.25, digest.estimateQuantile(0.99));
EXPECT_EQ(999.5, digest.estimateQuantile(0.999)); EXPECT_EQ(999.5, digest.estimateQuantile(0.999));
} }
class DistributionTest : public ::testing::TestWithParam< class DistributionTest
std::tuple<bool, size_t, double, double>> {}; : public ::testing::TestWithParam<
std::tuple<std::pair<bool, size_t>, double, bool>> {};
TEST_P(DistributionTest, ReasonableError) { TEST_P(DistributionTest, ReasonableError) {
std::pair<bool, size_t> underlyingDistribution;
bool logarithmic; bool logarithmic;
size_t modes; size_t modes;
double quantile; double quantile;
double reasonableError; double reasonableError = 0;
bool digestMerge;
std::tie(logarithmic, modes, quantile, reasonableError) = GetParam();
std::tie(underlyingDistribution, quantile, digestMerge) = GetParam();
std::tie(logarithmic, modes) = underlyingDistribution;
if (quantile == 0.001 || quantile == 0.999) {
reasonableError = 0.0005;
} else if (quantile == 0.01 || quantile == 0.99) {
reasonableError = 0.005;
} else if (quantile == 0.25 || quantile == 0.5 || quantile == 0.75) {
reasonableError = 0.02;
}
std::vector<double> errors; std::vector<double> errors;
...@@ -186,16 +199,25 @@ TEST_P(DistributionTest, ReasonableError) { ...@@ -186,16 +199,25 @@ TEST_P(DistributionTest, ReasonableError) {
} }
} }
std::vector<TDigest> digests;
for (size_t i = 0; i < kNumSamples / 1000; ++i) { for (size_t i = 0; i < kNumSamples / 1000; ++i) {
auto it_l = values.begin() + (i * 1000); auto it_l = values.begin() + (i * 1000);
auto it_r = it_l + 1000; auto it_r = it_l + 1000;
std::sort(it_l, it_r); std::sort(it_l, it_r);
folly::Range<const double*> r(values, i * 1000, 1000); folly::Range<const double*> r(values, i * 1000, 1000);
digest = digest.merge(r); if (digestMerge) {
digests.push_back(digest.merge(r));
} else {
digest = digest.merge(r);
}
} }
std::sort(values.begin(), values.end()); std::sort(values.begin(), values.end());
if (digestMerge) {
digest = TDigest::merge(digests);
}
double est = digest.estimateQuantile(quantile); double est = digest.estimateQuantile(quantile);
auto it = std::lower_bound(values.begin(), values.end(), est); auto it = std::lower_bound(values.begin(), values.end(), est);
int32_t actualRank = std::distance(values.begin(), it); int32_t actualRank = std::distance(values.begin(), it);
...@@ -224,32 +246,11 @@ TEST_P(DistributionTest, ReasonableError) { ...@@ -224,32 +246,11 @@ TEST_P(DistributionTest, ReasonableError) {
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(
ReasonableErrors, ReasonableErrors,
DistributionTest, DistributionTest,
::testing::Values( ::testing::Combine(
std::make_tuple(true, 1, 0.001, 0.0005), ::testing::Values(
std::make_tuple(true, 1, 0.01, 0.005), std::make_pair(true, 1),
std::make_tuple(true, 1, 0.25, 0.02), std::make_pair(true, 3),
std::make_tuple(true, 1, 0.50, 0.02), std::make_pair(false, 1),
std::make_tuple(true, 1, 0.75, 0.02), std::make_pair(false, 10)),
std::make_tuple(true, 1, 0.99, 0.005), ::testing::Values(0.001, 0.01, 0.25, 0.50, 0.75, 0.99, 0.999),
std::make_tuple(true, 1, 0.999, 0.0005), ::testing::Bool()));
std::make_tuple(true, 3, 0.001, 0.0005),
std::make_tuple(true, 3, 0.01, 0.005),
std::make_tuple(true, 3, 0.25, 0.02),
std::make_tuple(true, 3, 0.50, 0.02),
std::make_tuple(true, 3, 0.75, 0.02),
std::make_tuple(true, 3, 0.99, 0.005),
std::make_tuple(true, 3, 0.999, 0.0005),
std::make_tuple(false, 1, 0.001, 0.0005),
std::make_tuple(false, 1, 0.01, 0.005),
std::make_tuple(false, 1, 0.25, 0.02),
std::make_tuple(false, 1, 0.50, 0.02),
std::make_tuple(false, 1, 0.75, 0.02),
std::make_tuple(false, 1, 0.99, 0.005),
std::make_tuple(false, 1, 0.999, 0.0005),
std::make_tuple(false, 10, 0.001, 0.0005),
std::make_tuple(false, 10, 0.01, 0.005),
std::make_tuple(false, 10, 0.25, 0.02),
std::make_tuple(false, 10, 0.50, 0.02),
std::make_tuple(false, 10, 0.75, 0.02),
std::make_tuple(false, 10, 0.99, 0.005),
std::make_tuple(false, 10, 0.999, 0.0005)));
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment