Commit 5bd3bab0 authored by Christopher Dykes's avatar Christopher Dykes Committed by Facebook Github Bot

Switch implicit references of folly::make_unique to std::make_unique

Summary: It's *almost* dead. This switches things to explicitly reference `std::make_unique` so that `folly::make_unique` can be marked as deprecated until mobile catches up and it can be killed completely.

Reviewed By: yfeldblum

Differential Revision: D5026584

fbshipit-source-id: aefc8cb3de84583fd3722fdb9dfea620884590c5
parent 7d151d2c
......@@ -98,7 +98,7 @@ namespace test {
// of its destruction.
struct EnvVarSaver {
EnvVarSaver()
: state_(make_unique<experimental::EnvironmentState>(
: state_(std::make_unique<experimental::EnvironmentState>(
experimental::EnvironmentState::fromCurrentEnvironment())) {}
EnvVarSaver(EnvVarSaver&& other) noexcept : state_(std::move(other.state_)) {}
......
......@@ -117,7 +117,7 @@ struct SchemaValidator final : IValidator, public Validator {
// We break apart the constructor and actually loading the schema so that
// we can handle the case where a schema refers to itself, e.g. via
// "$ref": "#".
auto v = make_unique<SchemaValidator>();
auto v = std::make_unique<SchemaValidator>();
v->loadSchema(context, schema);
return v;
}
......@@ -667,7 +667,7 @@ void SchemaValidator::loadSchema(SchemaValidatorContext& context,
if (p->isString() && p->stringPiece()[0] == '#') {
auto it = context.refs.find(p->getString());
if (it != context.refs.end()) {
validators_.emplace_back(make_unique<RefValidator>(it->second));
validators_.emplace_back(std::make_unique<RefValidator>(it->second));
return;
}
......@@ -704,7 +704,7 @@ void SchemaValidator::loadSchema(SchemaValidatorContext& context,
// future references to it will just see that pointer and won't try to
// keep parsing further.
if (s) {
auto v = make_unique<SchemaValidator>();
auto v = std::make_unique<SchemaValidator>();
context.refs[p->getString()] = v.get();
v->loadSchema(context, *s);
validators_.emplace_back(std::move(v));
......@@ -715,34 +715,34 @@ void SchemaValidator::loadSchema(SchemaValidatorContext& context,
// Numeric validators
if (const auto* p = schema.get_ptr("multipleOf")) {
validators_.emplace_back(make_unique<MultipleOfValidator>(*p));
validators_.emplace_back(std::make_unique<MultipleOfValidator>(*p));
}
if (const auto* p = schema.get_ptr("maximum")) {
validators_.emplace_back(
make_unique<ComparisonValidator>(*p,
schema.get_ptr("exclusiveMaximum"),
ComparisonValidator::Type::MAX));
validators_.emplace_back(std::make_unique<ComparisonValidator>(
*p,
schema.get_ptr("exclusiveMaximum"),
ComparisonValidator::Type::MAX));
}
if (const auto* p = schema.get_ptr("minimum")) {
validators_.emplace_back(
make_unique<ComparisonValidator>(*p,
schema.get_ptr("exclusiveMinimum"),
ComparisonValidator::Type::MIN));
validators_.emplace_back(std::make_unique<ComparisonValidator>(
*p,
schema.get_ptr("exclusiveMinimum"),
ComparisonValidator::Type::MIN));
}
// String validators
if (const auto* p = schema.get_ptr("maxLength")) {
validators_.emplace_back(
make_unique<SizeValidator<std::greater_equal<int64_t>>>(
std::make_unique<SizeValidator<std::greater_equal<int64_t>>>(
*p, dynamic::Type::STRING));
}
if (const auto* p = schema.get_ptr("minLength")) {
validators_.emplace_back(
make_unique<SizeValidator<std::less_equal<int64_t>>>(
std::make_unique<SizeValidator<std::less_equal<int64_t>>>(
*p, dynamic::Type::STRING));
}
if (const auto* p = schema.get_ptr("pattern")) {
validators_.emplace_back(make_unique<StringPatternValidator>(*p));
validators_.emplace_back(std::make_unique<StringPatternValidator>(*p));
}
// Array validators
......@@ -750,20 +750,20 @@ void SchemaValidator::loadSchema(SchemaValidatorContext& context,
const auto* additionalItems = schema.get_ptr("additionalItems");
if (items || additionalItems) {
validators_.emplace_back(
make_unique<ArrayItemsValidator>(context, items, additionalItems));
std::make_unique<ArrayItemsValidator>(context, items, additionalItems));
}
if (const auto* p = schema.get_ptr("maxItems")) {
validators_.emplace_back(
make_unique<SizeValidator<std::greater_equal<int64_t>>>(
std::make_unique<SizeValidator<std::greater_equal<int64_t>>>(
*p, dynamic::Type::ARRAY));
}
if (const auto* p = schema.get_ptr("minItems")) {
validators_.emplace_back(
make_unique<SizeValidator<std::less_equal<int64_t>>>(
std::make_unique<SizeValidator<std::less_equal<int64_t>>>(
*p, dynamic::Type::ARRAY));
}
if (const auto* p = schema.get_ptr("uniqueItems")) {
validators_.emplace_back(make_unique<ArrayUniqueValidator>(*p));
validators_.emplace_back(std::make_unique<ArrayUniqueValidator>(*p));
}
// Object validators
......@@ -771,46 +771,47 @@ void SchemaValidator::loadSchema(SchemaValidatorContext& context,
const auto* patternProperties = schema.get_ptr("patternProperties");
const auto* additionalProperties = schema.get_ptr("additionalProperties");
if (properties || patternProperties || additionalProperties) {
validators_.emplace_back(make_unique<PropertiesValidator>(
validators_.emplace_back(std::make_unique<PropertiesValidator>(
context, properties, patternProperties, additionalProperties));
}
if (const auto* p = schema.get_ptr("maxProperties")) {
validators_.emplace_back(
make_unique<SizeValidator<std::greater_equal<int64_t>>>(
std::make_unique<SizeValidator<std::greater_equal<int64_t>>>(
*p, dynamic::Type::OBJECT));
}
if (const auto* p = schema.get_ptr("minProperties")) {
validators_.emplace_back(
make_unique<SizeValidator<std::less_equal<int64_t>>>(
std::make_unique<SizeValidator<std::less_equal<int64_t>>>(
*p, dynamic::Type::OBJECT));
}
if (const auto* p = schema.get_ptr("required")) {
validators_.emplace_back(make_unique<RequiredValidator>(*p));
validators_.emplace_back(std::make_unique<RequiredValidator>(*p));
}
// Misc validators
if (const auto* p = schema.get_ptr("dependencies")) {
validators_.emplace_back(make_unique<DependencyValidator>(context, *p));
validators_.emplace_back(
std::make_unique<DependencyValidator>(context, *p));
}
if (const auto* p = schema.get_ptr("enum")) {
validators_.emplace_back(make_unique<EnumValidator>(*p));
validators_.emplace_back(std::make_unique<EnumValidator>(*p));
}
if (const auto* p = schema.get_ptr("type")) {
validators_.emplace_back(make_unique<TypeValidator>(*p));
validators_.emplace_back(std::make_unique<TypeValidator>(*p));
}
if (const auto* p = schema.get_ptr("allOf")) {
validators_.emplace_back(make_unique<AllOfValidator>(context, *p));
validators_.emplace_back(std::make_unique<AllOfValidator>(context, *p));
}
if (const auto* p = schema.get_ptr("anyOf")) {
validators_.emplace_back(make_unique<AnyOfValidator>(
validators_.emplace_back(std::make_unique<AnyOfValidator>(
context, *p, AnyOfValidator::Type::ONE_OR_MORE));
}
if (const auto* p = schema.get_ptr("oneOf")) {
validators_.emplace_back(make_unique<AnyOfValidator>(
validators_.emplace_back(std::make_unique<AnyOfValidator>(
context, *p, AnyOfValidator::Type::EXACTLY_ONE));
}
if (const auto* p = schema.get_ptr("not")) {
validators_.emplace_back(make_unique<NotValidator>(context, *p));
validators_.emplace_back(std::make_unique<NotValidator>(context, *p));
}
}
......@@ -1014,7 +1015,7 @@ folly::Singleton<Validator> schemaValidator([]() {
Validator::~Validator() = default;
std::unique_ptr<Validator> makeValidator(const dynamic& schema) {
auto v = make_unique<SchemaValidator>();
auto v = std::make_unique<SchemaValidator>();
SchemaValidatorContext context(schema);
context.refs["#"] = v.get();
v->loadSchema(context, schema);
......
......@@ -166,8 +166,8 @@ class ObserverManager::NextQueue {
};
ObserverManager::ObserverManager() {
currentQueue_ = make_unique<CurrentQueue>();
nextQueue_ = make_unique<NextQueue>(*this);
currentQueue_ = std::make_unique<CurrentQueue>();
nextQueue_ = std::make_unique<NextQueue>(*this);
}
ObserverManager::~ObserverManager() {
......
......@@ -385,7 +385,7 @@ StackTracePrinter::StackTracePrinter(size_t minSignalSafeElfCacheSize, int fd)
fd,
SymbolizePrinter::COLOR_IF_TTY,
size_t(64) << 10), // 64KiB
addresses_(make_unique<FrameArray<kMaxStackTraceDepth>>()) {}
addresses_(std::make_unique<FrameArray<kMaxStackTraceDepth>>()) {}
void StackTracePrinter::flush() {
printer_.flush();
......
......@@ -45,7 +45,7 @@ TEST(EnvVarSaverTest, ExampleNew) {
PCHECK(0 == unsetenv(key));
EXPECT_EQ(nullptr, getenv(key));
auto saver = make_unique<EnvVarSaver>();
auto saver = std::make_unique<EnvVarSaver>();
PCHECK(0 == setenv(key, "blah", true));
EXPECT_STREQ("blah", getenv(key));
saver = nullptr;
......@@ -57,7 +57,7 @@ TEST(EnvVarSaverTest, ExampleExisting) {
EXPECT_NE(nullptr, getenv(key));
auto value = std::string{getenv(key)};
auto saver = make_unique<EnvVarSaver>();
auto saver = std::make_unique<EnvVarSaver>();
PCHECK(0 == setenv(key, "blah", true));
EXPECT_STREQ("blah", getenv(key));
saver = nullptr;
......@@ -180,7 +180,7 @@ TEST(EnvVarSaverTest, ExampleDeleting) {
EXPECT_NE(nullptr, getenv(key));
auto value = std::string{getenv(key)};
auto saver = make_unique<EnvVarSaver>();
auto saver = std::make_unique<EnvVarSaver>();
PCHECK(0 == unsetenv(key));
EXPECT_EQ(nullptr, getenv(key));
saver = nullptr;
......
......@@ -63,11 +63,12 @@ class GlobalCache {
auto& fmPtrRef = map_[&evb];
if (!fmPtrRef) {
auto loopController = make_unique<EventBaseLoopController>();
auto loopController = std::make_unique<EventBaseLoopController>();
loopController->attachEventBase(evb);
evb.runOnDestruction(new EventBaseOnDestructionCallback<EventBaseT>(evb));
fmPtrRef = make_unique<FiberManager>(std::move(loopController), opts);
fmPtrRef =
std::make_unique<FiberManager>(std::move(loopController), opts);
}
return *fmPtrRef;
......
......@@ -108,7 +108,7 @@ TEST(FutureSplitter, splitFutureMoveAssignable) {
TEST(FutureSplitter, splitFutureScope) {
Promise<int> p;
auto pSP = make_unique<FutureSplitter<int>>(p.getFuture());
auto pSP = std::make_unique<FutureSplitter<int>>(p.getFuture());
auto f1 = pSP->getFuture();
EXPECT_FALSE(f1.isReady());
pSP.reset();
......
......@@ -100,20 +100,22 @@ TEST(Pmap, Rvalues) {
// apply
{
auto mapResult
= seq(1)
| map([](int x) { return make_unique<int>(x); })
| map([](std::unique_ptr<int> x) { return make_unique<int>(*x * *x); })
| map([](std::unique_ptr<int> x) { return *x; })
| take(1000)
| sum;
= seq(1)
| map([](int x) { return std::make_unique<int>(x); })
| map([](std::unique_ptr<int> x) {
return std::make_unique<int>(*x * *x); })
| map([](std::unique_ptr<int> x) { return *x; })
| take(1000)
| sum;
auto pmapResult
= seq(1)
| pmap([](int x) { return make_unique<int>(x); })
| pmap([](std::unique_ptr<int> x) { return make_unique<int>(*x * *x); })
| pmap([](std::unique_ptr<int> x) { return *x; })
| take(1000)
| sum;
= seq(1)
| pmap([](int x) { return std::make_unique<int>(x); })
| pmap([](std::unique_ptr<int> x) {
return std::make_unique<int>(*x * *x); })
| pmap([](std::unique_ptr<int> x) { return *x; })
| take(1000)
| sum;
EXPECT_EQ(pmapResult, mapResult);
}
......@@ -121,18 +123,20 @@ TEST(Pmap, Rvalues) {
// foreach
{
auto mapResult
= seq(1, 1000)
| map([](int x) { return make_unique<int>(x); })
| map([](std::unique_ptr<int> x) { return make_unique<int>(*x * *x); })
| map([](std::unique_ptr<int> x) { return *x; })
| sum;
= seq(1, 1000)
| map([](int x) { return std::make_unique<int>(x); })
| map([](std::unique_ptr<int> x) {
return std::make_unique<int>(*x * *x); })
| map([](std::unique_ptr<int> x) { return *x; })
| sum;
auto pmapResult
= seq(1, 1000)
| pmap([](int x) { return make_unique<int>(x); })
| pmap([](std::unique_ptr<int> x) { return make_unique<int>(*x * *x); })
| pmap([](std::unique_ptr<int> x) { return *x; })
| sum;
= seq(1, 1000)
| pmap([](int x) { return std::make_unique<int>(x); })
| pmap([](std::unique_ptr<int> x) {
return std::make_unique<int>(*x * *x); })
| pmap([](std::unique_ptr<int> x) { return *x; })
| sum;
EXPECT_EQ(pmapResult, mapResult);
}
......
......@@ -194,7 +194,7 @@ class NoCompressionCodec final : public Codec {
};
std::unique_ptr<Codec> NoCompressionCodec::create(int level, CodecType type) {
return make_unique<NoCompressionCodec>(level, type);
return std::make_unique<NoCompressionCodec>(level, type);
}
NoCompressionCodec::NoCompressionCodec(int level, CodecType type)
......@@ -323,7 +323,7 @@ class LZ4Codec final : public Codec {
};
std::unique_ptr<Codec> LZ4Codec::create(int level, CodecType type) {
return make_unique<LZ4Codec>(level, type);
return std::make_unique<LZ4Codec>(level, type);
}
LZ4Codec::LZ4Codec(int level, CodecType type) : Codec(type) {
......@@ -468,7 +468,7 @@ class LZ4FrameCodec final : public Codec {
/* static */ std::unique_ptr<Codec> LZ4FrameCodec::create(
int level,
CodecType type) {
return make_unique<LZ4FrameCodec>(level, type);
return std::make_unique<LZ4FrameCodec>(level, type);
}
static constexpr uint32_t kLZ4FrameMagicLE = 0x184D2204;
......@@ -666,7 +666,7 @@ class SnappyCodec final : public Codec {
};
std::unique_ptr<Codec> SnappyCodec::create(int level, CodecType type) {
return make_unique<SnappyCodec>(level, type);
return std::make_unique<SnappyCodec>(level, type);
}
SnappyCodec::SnappyCodec(int level, CodecType type) : Codec(type) {
......@@ -820,7 +820,7 @@ bool ZlibCodec::canUncompress(const IOBuf* data, Optional<uint64_t>) const {
}
std::unique_ptr<Codec> ZlibCodec::create(int level, CodecType type) {
return make_unique<ZlibCodec>(level, type);
return std::make_unique<ZlibCodec>(level, type);
}
ZlibCodec::ZlibCodec(int level, CodecType type) : Codec(type) {
......@@ -1109,7 +1109,7 @@ bool LZMA2Codec::canUncompress(const IOBuf* data, Optional<uint64_t>) const {
}
std::unique_ptr<Codec> LZMA2Codec::create(int level, CodecType type) {
return make_unique<LZMA2Codec>(level, type);
return std::make_unique<LZMA2Codec>(level, type);
}
LZMA2Codec::LZMA2Codec(int level, CodecType type) : Codec(type) {
......@@ -1353,7 +1353,7 @@ bool ZSTDCodec::canUncompress(const IOBuf* data, Optional<uint64_t>) const {
}
std::unique_ptr<Codec> ZSTDCodec::create(int level, CodecType type) {
return make_unique<ZSTDCodec>(level, type);
return std::make_unique<ZSTDCodec>(level, type);
}
ZSTDCodec::ZSTDCodec(int level, CodecType type) : Codec(type) {
......@@ -1568,7 +1568,7 @@ class Bzip2Codec final : public Codec {
/* static */ std::unique_ptr<Codec> Bzip2Codec::create(
int level,
CodecType type) {
return make_unique<Bzip2Codec>(level, type);
return std::make_unique<Bzip2Codec>(level, type);
}
Bzip2Codec::Bzip2Codec(int level, CodecType type) : Codec(type) {
......@@ -1817,7 +1817,7 @@ void AutomaticCodec::addCodecIfSupported(CodecType type) {
/* static */ std::unique_ptr<Codec> AutomaticCodec::create(
std::vector<std::unique_ptr<Codec>> customCodecs) {
return make_unique<AutomaticCodec>(std::move(customCodecs));
return std::make_unique<AutomaticCodec>(std::move(customCodecs));
}
AutomaticCodec::AutomaticCodec(std::vector<std::unique_ptr<Codec>> customCodecs)
......
......@@ -439,7 +439,7 @@ class CursorBase {
size_t cloneAtMost(std::unique_ptr<folly::IOBuf>& buf, size_t len) {
if (!buf) {
buf = make_unique<folly::IOBuf>();
buf = std::make_unique<folly::IOBuf>();
}
return cloneAtMost(*buf, len);
}
......
......@@ -258,7 +258,7 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
}
unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
return make_unique<IOBuf>(CREATE, capacity);
return std::make_unique<IOBuf>(CREATE, capacity);
}
unique_ptr<IOBuf> IOBuf::createChain(
......@@ -309,9 +309,9 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint64_t capacity,
//
// Note that we always pass freeOnError as false to the constructor.
// If the constructor throws we'll handle it below. (We have to handle
// allocation failures from make_unique too.)
return make_unique<IOBuf>(TAKE_OWNERSHIP, buf, capacity, length,
freeFn, userData, false);
// allocation failures from std::make_unique too.)
return std::make_unique<IOBuf>(
TAKE_OWNERSHIP, buf, capacity, length, freeFn, userData, false);
} catch (...) {
takeOwnershipError(freeOnError, buf, freeFn, userData);
throw;
......@@ -332,7 +332,7 @@ IOBuf::IOBuf(WrapBufferOp op, ByteRange br)
}
unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
return make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
}
IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
......@@ -506,15 +506,15 @@ void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
}
unique_ptr<IOBuf> IOBuf::clone() const {
return make_unique<IOBuf>(cloneAsValue());
return std::make_unique<IOBuf>(cloneAsValue());
}
unique_ptr<IOBuf> IOBuf::cloneOne() const {
return make_unique<IOBuf>(cloneOneAsValue());
return std::make_unique<IOBuf>(cloneOneAsValue());
}
unique_ptr<IOBuf> IOBuf::cloneCoalesced() const {
return make_unique<IOBuf>(cloneCoalescedAsValue());
return std::make_unique<IOBuf>(cloneCoalescedAsValue());
}
IOBuf IOBuf::cloneAsValue() const {
......
......@@ -51,7 +51,7 @@ void EventBaseThread::start() {
if (th_) {
return;
}
th_ = make_unique<ScopedEventBaseThread>(ebm_);
th_ = std::make_unique<ScopedEventBaseThread>(ebm_);
}
void EventBaseThread::stop() {
......
......@@ -1189,7 +1189,7 @@ TEST(EventBaseTest, RunInEventBaseThreadAndWait) {
vector<unique_ptr<atomic<size_t>>> atoms(c);
for (size_t i = 0; i < c; ++i) {
auto& atom = atoms.at(i);
atom = make_unique<atomic<size_t>>(0);
atom = std::make_unique<atomic<size_t>>(0);
}
vector<thread> threads;
for (size_t i = 0; i < c; ++i) {
......
......@@ -488,7 +488,7 @@ namespace {
class CustomCodec : public Codec {
public:
static std::unique_ptr<Codec> create(std::string prefix, CodecType type) {
return make_unique<CustomCodec>(std::move(prefix), type);
return std::make_unique<CustomCodec>(std::move(prefix), type);
}
explicit CustomCodec(std::string prefix, CodecType type)
: Codec(CodecType::USER_DEFINED),
......
......@@ -482,18 +482,16 @@ void runMtProdConsDeterministic(long seed) {
// we use the Bench method, but perf results are meaningless under DSched
DSched sched(DSched::uniform(seed));
vector<unique_ptr<WriteMethodCaller<MPMCQueue<int, DeterministicAtomic,
Dynamic>>>> callers;
callers.emplace_back(make_unique<BlockingWriteCaller<MPMCQueue<int,
DeterministicAtomic, Dynamic>>>());
callers.emplace_back(make_unique<WriteIfNotFullCaller<MPMCQueue<int,
DeterministicAtomic, Dynamic>>>());
callers.emplace_back(make_unique<WriteCaller<MPMCQueue<int,
DeterministicAtomic, Dynamic>>>());
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
DeterministicAtomic, Dynamic>>>(milliseconds(1)));
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
DeterministicAtomic, Dynamic>>>(seconds(2)));
using QueueType = MPMCQueue<int, DeterministicAtomic, Dynamic>;
vector<unique_ptr<WriteMethodCaller<QueueType>>> callers;
callers.emplace_back(std::make_unique<BlockingWriteCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteIfNotFullCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteCaller<QueueType>>());
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(milliseconds(1)));
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(seconds(2)));
size_t cap;
for (const auto& caller : callers) {
......@@ -562,18 +560,16 @@ void runMtProdConsDeterministicDynamic(
// we use the Bench method, but perf results are meaningless under DSched
DSched sched(DSched::uniform(seed));
vector<unique_ptr<WriteMethodCaller<MPMCQueue<int, DeterministicAtomic,
true>>>> callers;
callers.emplace_back(make_unique<BlockingWriteCaller<MPMCQueue<int,
DeterministicAtomic, true>>>());
callers.emplace_back(make_unique<WriteIfNotFullCaller<MPMCQueue<int,
DeterministicAtomic, true>>>());
callers.emplace_back(make_unique<WriteCaller<MPMCQueue<int,
DeterministicAtomic, true>>>());
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
DeterministicAtomic, true>>>(milliseconds(1)));
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
DeterministicAtomic, true>>>(seconds(2)));
using QueueType = MPMCQueue<int, DeterministicAtomic, true>;
vector<unique_ptr<WriteMethodCaller<QueueType>>> callers;
callers.emplace_back(std::make_unique<BlockingWriteCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteIfNotFullCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteCaller<QueueType>>());
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(milliseconds(1)));
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(seconds(2)));
for (const auto& caller : callers) {
LOG(INFO) <<
......@@ -628,39 +624,29 @@ TEST(MPMCQueue, mt_prod_cons_deterministic_dynamic_with_arguments) {
template <bool Dynamic = false>
void runMtProdCons() {
using QueueType = MPMCQueue<int, std::atomic, Dynamic>;
int n = 100000;
setFromEnv(n, "NUM_OPS");
vector<unique_ptr<WriteMethodCaller<MPMCQueue<int, std::atomic, Dynamic>>>>
vector<unique_ptr<WriteMethodCaller<QueueType>>>
callers;
callers.emplace_back(make_unique<BlockingWriteCaller<MPMCQueue<int,
std::atomic, Dynamic>>>());
callers.emplace_back(make_unique<WriteIfNotFullCaller<MPMCQueue<int,
std::atomic, Dynamic>>>());
callers.emplace_back(make_unique<WriteCaller<MPMCQueue<int, std::atomic,
Dynamic>>>());
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
std::atomic, Dynamic>>>(milliseconds(1)));
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
std::atomic, Dynamic>>>(seconds(2)));
callers.emplace_back(std::make_unique<BlockingWriteCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteIfNotFullCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteCaller<QueueType>>());
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(milliseconds(1)));
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(seconds(2)));
for (const auto& caller : callers) {
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10)),
1, 1, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10)),
10, 1, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10)),
1, 10, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10)),
10, 10, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10000)),
1, 1, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10000)),
10, 1, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10000)),
1, 10, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(10000)),
10, 10, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, std::atomic, Dynamic>(100000)),
32, 100, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 1, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 10, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 1, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 10, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 1, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 10, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 1, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 10, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(100000)), 32, 100, n, *caller);
}
}
......@@ -674,38 +660,27 @@ TEST(MPMCQueue, mt_prod_cons_dynamic) {
template <bool Dynamic = false>
void runMtProdConsEmulatedFutex() {
using QueueType = MPMCQueue<int, EmulatedFutexAtomic, Dynamic>;
int n = 100000;
vector<unique_ptr<WriteMethodCaller<MPMCQueue<int, EmulatedFutexAtomic,
Dynamic>>>> callers;
callers.emplace_back(make_unique<BlockingWriteCaller<MPMCQueue<int,
EmulatedFutexAtomic, Dynamic>>>());
callers.emplace_back(make_unique<WriteIfNotFullCaller<MPMCQueue<int,
EmulatedFutexAtomic, Dynamic>>>());
callers.emplace_back(make_unique<WriteCaller<MPMCQueue<int,
EmulatedFutexAtomic, Dynamic>>>());
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
EmulatedFutexAtomic, Dynamic>>>(milliseconds(1)));
callers.emplace_back(make_unique<TryWriteUntilCaller<MPMCQueue<int,
EmulatedFutexAtomic, Dynamic>>>(seconds(2)));
vector<unique_ptr<WriteMethodCaller<QueueType>>> callers;
callers.emplace_back(std::make_unique<BlockingWriteCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteIfNotFullCaller<QueueType>>());
callers.emplace_back(std::make_unique<WriteCaller<QueueType>>());
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(milliseconds(1)));
callers.emplace_back(
std::make_unique<TryWriteUntilCaller<QueueType>>(seconds(2)));
for (const auto& caller : callers) {
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10)), 1, 1, n, *caller);
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10)), 10, 1, n, *caller);
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10)), 1, 10, n, *caller);
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10)), 10, 10, n, *caller);
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10000)), 1, 1, n, *caller);
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10000)), 10, 1, n, *caller);
LOG(INFO) << PC_BENCH(
(MPMCQueue<int, EmulatedFutexAtomic, Dynamic>(10000)), 1, 10, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, EmulatedFutexAtomic, Dynamic>
(10000)), 10, 10, n, *caller);
LOG(INFO) << PC_BENCH((MPMCQueue<int, EmulatedFutexAtomic, Dynamic>
(100000)), 32, 100, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 1, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 10, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 1, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10)), 10, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 1, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 10, 1, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 1, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(10000)), 10, 10, n, *caller);
LOG(INFO) << PC_BENCH((QueueType(100000)), 32, 100, n, *caller);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment