Commit 56aa5c9a authored by Yicheng Wang's avatar Yicheng Wang Committed by Facebook GitHub Bot

Remove setMaxReadAtOnce() API in MeteredExecutor

Summary:
There is no current use case of this API throughout the codebase
and it's causing some confusions of how this class is used
so removing it for now. It's easy to add it back if we need it
in the future

Reviewed By: yfeldblum

Differential Revision: D33112001

fbshipit-source-id: c711f5ab0ad2324e478f6eb18155d367d2cce1f2
parent b3bb10b5
...@@ -34,10 +34,6 @@ MeteredExecutor::MeteredExecutor( ...@@ -34,10 +34,6 @@ MeteredExecutor::MeteredExecutor(
ownedExecutor_ = std::move(executor); ownedExecutor_ = std::move(executor);
} }
void MeteredExecutor::setMaxReadAtOnce(uint32_t maxAtOnce) {
queue_.setMaxReadAtOnce(maxAtOnce);
}
std::unique_ptr<QueueObserver> MeteredExecutor::setupQueueObserver() { std::unique_ptr<QueueObserver> MeteredExecutor::setupQueueObserver() {
if (options_.enableQueueObserver) { if (options_.enableQueueObserver) {
std::string name = "unk"; std::string name = "unk";
...@@ -98,16 +94,9 @@ void MeteredExecutor::Consumer::operator()( ...@@ -98,16 +94,9 @@ void MeteredExecutor::Consumer::operator()(
if (self_.queueObserver_) { if (self_.queueObserver_) {
self_.queueObserver_->onDequeued(task.getQueueObserverPayload()); self_.queueObserver_->onDequeued(task.getQueueObserverPayload());
} }
if (!first_) { DCHECK(!first_);
first_ = std::make_optional<Task>(std::move(task)); first_ = std::make_optional<Task>(std::move(task));
firstRctx_ = std::move(rctx); firstRctx_ = std::move(rctx);
} else {
self_.kaInner_->add(
[task = std::move(task), rctx = std::move(rctx)]() mutable {
RequestContextScopeGuard guard(std::move(rctx));
task.run();
});
}
} }
} // namespace folly } // namespace folly
...@@ -59,8 +59,6 @@ class MeteredExecutor : public DefaultKeepAliveExecutor { ...@@ -59,8 +59,6 @@ class MeteredExecutor : public DefaultKeepAliveExecutor {
explicit MeteredExecutor(KeepAlive keepAlive, Options options = Options()); explicit MeteredExecutor(KeepAlive keepAlive, Options options = Options());
~MeteredExecutor() override; ~MeteredExecutor() override;
void setMaxReadAtOnce(uint32_t maxAtOnce);
void add(Func func) override; void add(Func func) override;
size_t pendingTasks() const { return queue_.size(); } size_t pendingTasks() const { return queue_.size(); }
......
...@@ -28,14 +28,12 @@ class MeteredExecutorTest : public testing::Test { ...@@ -28,14 +28,12 @@ class MeteredExecutorTest : public testing::Test {
protected: protected:
void createAdapter( void createAdapter(
int numLevels, int numLevels,
int maxReadAtOnce = 1,
std::unique_ptr<Executor> exc = std::unique_ptr<Executor> exc =
std::make_unique<CPUThreadPoolExecutor>(1)) { std::make_unique<CPUThreadPoolExecutor>(1)) {
executors_.resize(numLevels + 1); executors_.resize(numLevels + 1);
executors_[0] = exc.get(); executors_[0] = exc.get();
for (int i = 0; i < numLevels; i++) { for (int i = 0; i < numLevels; i++) {
auto mlsa = std::make_unique<MeteredExecutor>(std::move(exc)); auto mlsa = std::make_unique<MeteredExecutor>(std::move(exc));
mlsa->setMaxReadAtOnce(maxReadAtOnce);
exc = std::move(mlsa); exc = std::move(mlsa);
executors_[i + 1] = exc.get(); executors_[i + 1] = exc.get();
} }
...@@ -168,10 +166,8 @@ TEST_P(MeteredExecutorTestP, TwoLevelsWithKeepAlives) { ...@@ -168,10 +166,8 @@ TEST_P(MeteredExecutorTestP, TwoLevelsWithKeepAlives) {
auto hipri_exec = std::make_unique<CPUThreadPoolExecutor>(1); auto hipri_exec = std::make_unique<CPUThreadPoolExecutor>(1);
auto hipri_ka = getKeepAliveToken(hipri_exec.get()); auto hipri_ka = getKeepAliveToken(hipri_exec.get());
auto mipri_exec = std::make_unique<MeteredExecutor>(hipri_ka); auto mipri_exec = std::make_unique<MeteredExecutor>(hipri_ka);
mipri_exec->setMaxReadAtOnce(maxReadAtOnce);
auto mipri_ka = getKeepAliveToken(mipri_exec.get()); auto mipri_ka = getKeepAliveToken(mipri_exec.get());
auto lopri_exec = std::make_unique<MeteredExecutor>(mipri_ka); auto lopri_exec = std::make_unique<MeteredExecutor>(mipri_ka);
lopri_exec->setMaxReadAtOnce(maxReadAtOnce);
executors_ = {hipri_exec.get(), mipri_exec.get(), lopri_exec.get()}; executors_ = {hipri_exec.get(), mipri_exec.get(), lopri_exec.get()};
int32_t v = 0; int32_t v = 0;
...@@ -201,7 +197,7 @@ TEST_P(MeteredExecutorTestP, TwoLevelsWithKeepAlives) { ...@@ -201,7 +197,7 @@ TEST_P(MeteredExecutorTestP, TwoLevelsWithKeepAlives) {
} }
TEST_P(MeteredExecutorTestP, RequestContext) { TEST_P(MeteredExecutorTestP, RequestContext) {
createAdapter(3, maxReadAtOnce); createAdapter(3);
folly::Baton baton; folly::Baton baton;
add([&] { baton.wait(); }); add([&] { baton.wait(); });
...@@ -256,7 +252,7 @@ TEST_F(MeteredExecutorTest, ResetJoins) { ...@@ -256,7 +252,7 @@ TEST_F(MeteredExecutorTest, ResetJoins) {
TEST_F(MeteredExecutorTest, ConcurrentShutdown) { TEST_F(MeteredExecutorTest, ConcurrentShutdown) {
// ensure no data races on shutdown when executor has 2 threads // ensure no data races on shutdown when executor has 2 threads
createAdapter(2, 1, std::make_unique<CPUThreadPoolExecutor>(2)); createAdapter(2, std::make_unique<CPUThreadPoolExecutor>(2));
} }
TEST_F(MeteredExecutorTest, CostOfMeteredExecutors) { TEST_F(MeteredExecutorTest, CostOfMeteredExecutors) {
...@@ -287,7 +283,7 @@ TEST_F(MeteredExecutorTest, CostOfMeteredExecutors) { ...@@ -287,7 +283,7 @@ TEST_F(MeteredExecutorTest, CostOfMeteredExecutors) {
auto drive = [exc = exc.get()] { exc->drive(); }; auto drive = [exc = exc.get()] { exc->drive(); };
auto getCount = [exc = exc.get()] { return std::exchange(exc->count, 0); }; auto getCount = [exc = exc.get()] { return std::exchange(exc->count, 0); };
auto driveOnAdd = [exc = exc.get()] { exc->driveWhenAdded = true; }; auto driveOnAdd = [exc = exc.get()] { exc->driveWhenAdded = true; };
createAdapter(3, 1, std::move(exc)); createAdapter(3, std::move(exc));
// When queues are empty, we will schedule as many tasks on the main // When queues are empty, we will schedule as many tasks on the main
// executor as there are executors in the chain. // executor as there are executors in the chain.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment