Commit cd3552c1 authored by Sam Akhavan's avatar Sam Akhavan Committed by Facebook GitHub Bot

Back out "Fix a shutdown race in CPUThreadPoolExecutor::add" (#1561)

Summary:
Pull Request resolved: https://github.com/facebook/folly/pull/1561

As title.

Reviewed By: jdq

Differential Revision: D27736665

fbshipit-source-id: 2f09ea4dad25389a46faccca4a5803ea9e077e0d
parent d31902b9
...@@ -14,10 +14,8 @@ ...@@ -14,10 +14,8 @@
* limitations under the License. * limitations under the License.
*/ */
#include <folly/Executor.h>
#include <folly/executors/CPUThreadPoolExecutor.h> #include <folly/executors/CPUThreadPoolExecutor.h>
#include <atomic>
#include <folly/Memory.h> #include <folly/Memory.h>
#include <folly/concurrency/QueueObserver.h> #include <folly/concurrency/QueueObserver.h>
#include <folly/executors/task_queue/PriorityLifoSemMPMCQueue.h> #include <folly/executors/task_queue/PriorityLifoSemMPMCQueue.h>
...@@ -52,7 +50,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor( ...@@ -52,7 +50,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor(
std::shared_ptr<ThreadFactory> threadFactory) std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor( : ThreadPoolExecutor(
numThreads, numThreads,
FLAGS_dynamic_cputhreadpoolexecutor ? 1 : numThreads, FLAGS_dynamic_cputhreadpoolexecutor ? 0 : numThreads,
std::move(threadFactory)), std::move(threadFactory)),
taskQueue_(taskQueue.release()) { taskQueue_(taskQueue.release()) {
setNumThreads(numThreads); setNumThreads(numThreads);
...@@ -74,7 +72,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor( ...@@ -74,7 +72,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor(
size_t numThreads, std::shared_ptr<ThreadFactory> threadFactory) size_t numThreads, std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor( : ThreadPoolExecutor(
numThreads, numThreads,
FLAGS_dynamic_cputhreadpoolexecutor ? 1 : numThreads, FLAGS_dynamic_cputhreadpoolexecutor ? 0 : numThreads,
std::move(threadFactory)), std::move(threadFactory)),
taskQueue_(std::allocate_shared<default_queue>(default_queue_alloc{})) { taskQueue_(std::allocate_shared<default_queue>(default_queue_alloc{})) {
setNumThreads(numThreads); setNumThreads(numThreads);
...@@ -85,9 +83,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor( ...@@ -85,9 +83,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor(
std::pair<size_t, size_t> numThreads, std::pair<size_t, size_t> numThreads,
std::shared_ptr<ThreadFactory> threadFactory) std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor( : ThreadPoolExecutor(
std::max(numThreads.first, size_t{1}), numThreads.first, numThreads.second, std::move(threadFactory)),
numThreads.second,
std::move(threadFactory)),
taskQueue_(std::allocate_shared<default_queue>(default_queue_alloc{})) { taskQueue_(std::allocate_shared<default_queue>(default_queue_alloc{})) {
setNumThreads(numThreads.first); setNumThreads(numThreads.first);
registerThreadPoolExecutor(this); registerThreadPoolExecutor(this);
...@@ -163,7 +159,15 @@ void CPUThreadPoolExecutor::add(Func func) { ...@@ -163,7 +159,15 @@ void CPUThreadPoolExecutor::add(Func func) {
void CPUThreadPoolExecutor::add( void CPUThreadPoolExecutor::add(
Func func, std::chrono::milliseconds expiration, Func expireCallback) { Func func, std::chrono::milliseconds expiration, Func expireCallback) {
addImpl<false>(std::move(func), 0, expiration, std::move(expireCallback)); CPUTask task{std::move(func), expiration, std::move(expireCallback), 0};
if (auto queueObserver = getQueueObserver(0)) {
task.queueObserverPayload() =
queueObserver->onEnqueued(task.context_.get());
}
auto result = taskQueue_->add(std::move(task));
if (!result.reusedThread) {
ensureActiveThreads();
}
} }
void CPUThreadPoolExecutor::addWithPriority(Func func, int8_t priority) { void CPUThreadPoolExecutor::addWithPriority(Func func, int8_t priority) {
...@@ -175,43 +179,15 @@ void CPUThreadPoolExecutor::add( ...@@ -175,43 +179,15 @@ void CPUThreadPoolExecutor::add(
int8_t priority, int8_t priority,
std::chrono::milliseconds expiration, std::chrono::milliseconds expiration,
Func expireCallback) { Func expireCallback) {
addImpl<true>( CHECK(getNumPriorities() > 0);
std::move(func), priority, expiration, std::move(expireCallback));
}
template <bool withPriority>
void CPUThreadPoolExecutor::addImpl(
Func func,
int8_t priority,
std::chrono::milliseconds expiration,
Func expireCallback) {
if (withPriority) {
CHECK(getNumPriorities() > 0);
}
CPUTask task( CPUTask task(
std::move(func), expiration, std::move(expireCallback), priority); std::move(func), expiration, std::move(expireCallback), priority);
if (auto queueObserver = getQueueObserver(priority)) { if (auto queueObserver = getQueueObserver(priority)) {
task.queueObserverPayload() = task.queueObserverPayload() =
queueObserver->onEnqueued(task.context_.get()); queueObserver->onEnqueued(task.context_.get());
} }
auto result = taskQueue_->addWithPriority(std::move(task), priority);
// It's not safe to expect that the executor is alive after a task is added to if (!result.reusedThread) {
// the queue (this task could be holding the last KeepAlive and when finished
// - it may unblock the executor shutdown).
// If we need executor to be alive after adding into the queue, we have to
// acquire a KeepAlive.
bool mayNeedToAddThreads = minThreads_.load(std::memory_order_relaxed) == 0 ||
activeThreads_.load(std::memory_order_relaxed) <
maxThreads_.load(std::memory_order_relaxed);
folly::Executor::KeepAlive<> ka = mayNeedToAddThreads
? getKeepAliveToken(this)
: folly::Executor::KeepAlive<>{};
auto result = withPriority
? taskQueue_->addWithPriority(std::move(task), priority)
: taskQueue_->add(std::move(task));
if (mayNeedToAddThreads && !result.reusedThread) {
ensureActiveThreads(); ensureActiveThreads();
} }
} }
......
...@@ -162,13 +162,6 @@ class CPUThreadPoolExecutor : public ThreadPoolExecutor { ...@@ -162,13 +162,6 @@ class CPUThreadPoolExecutor : public ThreadPoolExecutor {
bool tryDecrToStop(); bool tryDecrToStop();
bool taskShouldStop(folly::Optional<CPUTask>&); bool taskShouldStop(folly::Optional<CPUTask>&);
template <bool withPriority>
void addImpl(
Func func,
int8_t priority,
std::chrono::milliseconds expiration,
Func expireCallback);
std::unique_ptr<folly::QueueObserverFactory> createQueueObserverFactory(); std::unique_ptr<folly::QueueObserverFactory> createQueueObserverFactory();
QueueObserver* FOLLY_NULLABLE getQueueObserver(int8_t pri); QueueObserver* FOLLY_NULLABLE getQueueObserver(int8_t pri);
......
...@@ -92,38 +92,28 @@ TEST(CleanupTest, EnsureCleanupAfterTaskBasic) { ...@@ -92,38 +92,28 @@ TEST(CleanupTest, EnsureCleanupAfterTaskBasic) {
} }
TEST(CleanupTest, Errors) { TEST(CleanupTest, Errors) {
auto runTest = [](auto releaseCleanedFunc) { auto cleaned = std::make_unique<Cleaned>();
auto cleaned = std::make_unique<Cleaned>();
cleaned->addCleanup(folly::makeSemiFuture().deferValue( cleaned->addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); })); [](folly::Unit) { EXPECT_TRUE(false); }));
cleaned->addCleanup(folly::makeSemiFuture<folly::Unit>( cleaned->addCleanup(
std::runtime_error("failed cleanup"))); folly::makeSemiFuture<folly::Unit>(std::runtime_error("failed cleanup")));
releaseCleanedFunc(cleaned);
};
folly::ManualExecutor exec;
EXPECT_EXIT( EXPECT_EXIT(
runTest([](auto& cleaned) { cleaned->cleanup()
folly::ManualExecutor exec; .within(1s)
cleaned->cleanup() .via(folly::getKeepAliveToken(exec))
.within(1s) .getVia(&exec),
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
}),
testing::KilledBySignal(SIGABRT), testing::KilledBySignal(SIGABRT),
".*noexcept.*"); ".*noexcept.*");
EXPECT_EXIT( EXPECT_EXIT(
runTest([](auto& cleaned) { cleaned.reset(); }), cleaned.reset(), testing::KilledBySignal(SIGABRT), ".*destructed.*");
testing::KilledBySignal(SIGABRT),
".*destructed.*");
runTest([](auto& cleaned) { // must leak the Cleaned as its destructor will abort.
// must leak the Cleaned as its destructor will abort. (void)cleaned.release();
(void)cleaned.release();
});
} }
TEST(CleanupTest, Invariants) { TEST(CleanupTest, Invariants) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment