Commit 323f42e1 authored by Eric Niebler's avatar Eric Niebler Committed by Facebook Github Bot

bring bit-rotted pushmi examples back from the dead

Summary: Several of the pushmi examples needed a working thread pool. Use the new one in pushmi/examples/pool.h, and bring the examples up-to-date with recent pushmi changes.

Reviewed By: yfeldblum

Differential Revision: D14563479

fbshipit-source-id: 70def2adc90fdf3eb5eef929245f39b294209cbe
parent 594ff7a5
......@@ -37,15 +37,21 @@ PUSHMI_INLINE_VAR constexpr struct bulk_fn {
Target&& driver,
IF&& initFunc,
RS&& selector) const {
return [func, sb, se, driver, initFunc, selector](auto in) {
return [func, sb, se, driver, initFunc, selector](auto in) mutable {
return make_single_sender(
[in, func, sb, se, driver, initFunc, selector](auto out) mutable {
using Out = decltype(out);
struct data : Out {
data(Out out) : Out(std::move(out)) {}
bool empty = true;
};
submit(
in,
make_receiver(
std::move(out),
data{std::move(out)},
[func, sb, se, driver, initFunc, selector](
auto& out_, auto input) {
auto& out_, auto input) mutable noexcept {
out_.empty = false;
driver(
initFunc,
selector,
......@@ -53,8 +59,12 @@ PUSHMI_INLINE_VAR constexpr struct bulk_fn {
func,
sb,
se,
std::move(out_));
}));
std::move(static_cast<Out&>(out_)));
},
// forward to output
[](auto o, auto e) noexcept {set_error(o, e);},
// only pass done through when empty
[](auto o){ if (o.empty) { set_done(o); }}));
});
};
}
......
......@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <iostream>
......@@ -41,32 +42,32 @@ void lisp(CPUExecutor cpu, IOExecutor io) {
// f on cpu - g on cpu (implicit: a single task on the cpu executor runs all
// the functions)
op::submit([](g_t) {})(op::transform([](f_t ft) { return g(ft); })(
op::transform([](auto) { return f(); })(cpu)));
op::transform([](auto) { return f(); })(cpu.schedule())));
// f on cpu - g on cpu (explicit: the first cpu task runs f and a second cpu
// task runs g)
op::submit([](g_t) {})(op::transform([](f_t ft) { return g(ft); })(
op::via(mi::strands(cpu))(op::transform([](auto) { return f(); })(cpu))));
op::via(mi::strands(cpu))(op::transform([](auto) { return f(); })(cpu.schedule()))));
// f on io - g on cpu
op::submit([](g_t) {})(op::transform([](f_t ft) { return g(ft); })(
op::via(mi::strands(cpu))(op::transform([](auto) { return f(); })(io))));
op::via(mi::strands(cpu))(op::transform([](auto) { return f(); })(io.schedule()))));
}
template <class CPUExecutor, class IOExecutor>
void sugar(CPUExecutor cpu, IOExecutor io) {
// f on cpu - g on cpu (implicit: a single task on the cpu executor runs all
// the functions)
cpu | op::transform([](auto) { return f(); }) |
cpu.schedule() | op::transform([](auto) { return f(); }) |
op::transform([](f_t ft) { return g(ft); }) | op::submit([](g_t) {});
// f on cpu - g on cpu (explicit: the first cpu task runs f and a second cpu
// task runs g)
cpu | op::transform([](auto) { return f(); }) | op::via(mi::strands(cpu)) |
cpu.schedule() | op::transform([](auto) { return f(); }) | op::via(mi::strands(cpu)) |
op::transform([](f_t ft) { return g(ft); }) | op::submit([](g_t) {});
// f on io - g on cpu
io | op::transform([](auto) { return f(); }) | op::via(mi::strands(cpu)) |
io.schedule() | op::transform([](auto) { return f(); }) | op::via(mi::strands(cpu)) |
op::transform([](f_t ft) { return g(ft); }) | op::submit([](g_t) {});
}
......@@ -75,7 +76,7 @@ void pipe(CPUExecutor cpu, IOExecutor io) {
// f on cpu - g on cpu (implicit: a single task on the cpu executor runs all
// the functions)
mi::pipe(
cpu,
cpu.schedule(),
op::transform([](auto) { return f(); }),
op::transform([](f_t ft) { return g(ft); }),
op::submit([](g_t) {}));
......@@ -83,7 +84,7 @@ void pipe(CPUExecutor cpu, IOExecutor io) {
// f on cpu - g on cpu (explicit: the first cpu task runs f and a second cpu
// task runs g)
mi::pipe(
cpu,
cpu.schedule(),
op::transform([](auto) { return f(); }),
op::via(mi::strands(cpu)),
op::transform([](f_t ft) { return g(ft); }),
......@@ -91,7 +92,7 @@ void pipe(CPUExecutor cpu, IOExecutor io) {
// f on io - g on cpu
mi::pipe(
io,
io.schedule(),
op::transform([](auto) { return f(); }),
op::via(mi::strands(cpu)),
op::transform([](f_t ft) { return g(ft); }),
......
......@@ -13,26 +13,23 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <iostream>
#include <vector>
#include <folly/experimental/pushmi/examples/pool.h>
#include <folly/experimental/pushmi/strand.h>
#include <folly/experimental/pushmi/o/request_via.h>
#include <folly/experimental/pushmi/o/tap.h>
#include <folly/experimental/pushmi/o/transform.h>
#include <folly/experimental/pushmi/examples/pool.h>
using namespace folly::pushmi::aliases;
template <class Io>
auto io_operation(Io io) {
return io | op::transform([](auto) { return 42; }) |
op::tap([](int v) { printf("io pool producing, %d\n", v); }) |
return io.schedule() | op::transform([](auto) { return 42; }) |
op::tap([](int v) { std::printf("io pool producing, %d\n", v); }) |
op::request_via();
}
......@@ -44,15 +41,18 @@ int main() {
auto cpu = cpuPool.executor();
io_operation(io).via(mi::strands(cpu)) |
op::tap([](int v) { printf("cpu pool processing, %d\n", v); }) |
op::tap([](int v) { std::printf("cpu pool processing, %d\n", v); }) |
op::submit();
// when the caller is not going to process the result (only side-effect
// matters) or the caller is just going to push the result into a queue.
// matters) or the caller is just going to push the result into a queue,
// provide a way to skip the transition to a different executor and make it
// stand out so that it has to be justified in code reviews.
mi::via_cast<mi::is_sender<>>(io_operation(io)) | op::submit();
io = mi::pool_executor{};
cpu = mi::pool_executor{};
ioPool.wait();
cpuPool.wait();
......
......@@ -32,7 +32,7 @@ auto naive_executor_bulk_target(Executor e, Allocator a = Allocator{}) {
auto&& func,
auto sb,
auto se,
auto out) {
auto out) mutable {
using RS = decltype(selector);
using F = std::conditional_t<
std::is_lvalue_reference<decltype(func)>::value,
......@@ -42,63 +42,70 @@ auto naive_executor_bulk_target(Executor e, Allocator a = Allocator{}) {
try {
typename std::allocator_traits<Allocator>::template rebind_alloc<char>
allocState(a);
auto shared_state = std::allocate_shared<std::tuple<
std::exception_ptr, // first exception
Out, // destination
RS, // selector
F, // func
std::atomic<decltype(init(input))>, // accumulation
std::atomic<std::size_t>, // pending
std::atomic<std::size_t> // exception count (protects assignment to
// first exception)
>>(
using Acc = decltype(init(input));
struct shared_state_type {
std::exception_ptr first_exception_{};
Out destination_;
RS selector_;
F func_;
std::atomic<Acc> accumulation_;
std::atomic<std::size_t> pending_{1};
std::atomic<std::size_t> exception_count_{0}; // protects assignment to
// first exception
shared_state_type(Out&& destination, RS&& selector, F&& func, Acc acc)
: destination_((Out&&) destination)
, selector_((RS&&) selector)
, func_((F&&) func)
, accumulation_(acc)
{}
};
auto shared_state = std::allocate_shared<shared_state_type>(
allocState,
std::exception_ptr{},
std::move(out),
std::move(selector),
(decltype(func)&&)func,
init(std::move(input)),
1,
0);
e | op::submit([e, sb, se, shared_state](auto) {
init(std::move(input)));
e.schedule() | op::submit([e, sb, se, shared_state](auto) mutable {
auto stepDone = [](auto shared_state) {
// pending
if (--std::get<5>(*shared_state) == 0) {
if (--shared_state->pending_ == 0) {
// first exception
if (std::get<0>(*shared_state)) {
if (shared_state->first_exception_) {
mi::set_error(
std::get<1>(*shared_state), std::get<0>(*shared_state));
shared_state->destination_, shared_state->first_exception_);
return;
}
try {
// selector(accumulation)
auto result = std::get<2>(*shared_state)(
std::move(std::get<4>(*shared_state).load()));
mi::set_value(std::get<1>(*shared_state), std::move(result));
auto result = shared_state->selector_(
std::move(shared_state->accumulation_.load()));
mi::set_value(shared_state->destination_, std::move(result));
mi::set_done(shared_state->destination_);
} catch (...) {
mi::set_error(
std::get<1>(*shared_state), std::current_exception());
shared_state->destination_, std::current_exception());
}
}
};
for (decltype(sb) idx{sb}; idx != se;
++idx, ++std::get<5>(*shared_state)) {
e | op::submit([shared_state, idx, stepDone](auto ex) {
for (decltype(sb) idx{sb}; idx != se; ++idx) {
++shared_state->pending_;
e.schedule() | op::submit([shared_state, idx, stepDone](auto) {
try {
// this indicates to me that bulk is not the right abstraction
auto old = std::get<4>(*shared_state).load();
auto step = old;
auto old = shared_state->accumulation_.load();
Acc step;
do {
step = old;
// func(accumulation, idx)
std::get<3> (*shared_state)(step, idx);
} while (!std::get<4>(*shared_state)
shared_state->func_(step, idx);
} while (!shared_state->accumulation_
.compare_exchange_strong(old, step));
} catch (...) {
// exception count
if (std::get<6>(*shared_state)++ == 0) {
if (shared_state->exception_count_++ == 0) {
// store first exception
std::get<0>(*shared_state) = std::current_exception();
shared_state->first_exception_ = std::current_exception();
} // else eat the exception
}
stepDone(shared_state);
......@@ -107,7 +114,7 @@ auto naive_executor_bulk_target(Executor e, Allocator a = Allocator{}) {
stepDone(shared_state);
});
} catch (...) {
e |
e.schedule() |
op::submit([out = std::move(out), ep = std::current_exception()](
auto) mutable { mi::set_error(out, ep); });
}
......
......@@ -37,6 +37,7 @@ auto inline_bulk_target() {
}
auto result = selector(std::move(acc));
mi::set_value(out, std::move(result));
mi::set_done(out);
} catch (...) {
mi::set_error(out, std::current_exception());
}
......
......@@ -78,6 +78,7 @@ void inline_driver(
}
auto result = selector(std::move(acc));
mi::set_value(out, std::move(result));
mi::set_done(out);
} catch(...) {
mi::set_error(out, std::current_exception());
}
......
......@@ -34,7 +34,7 @@ auto naive_executor_bulk_target(Executor e, Allocator a = Allocator{}) {
auto&& func,
auto sb,
auto se,
auto out) {
auto out) mutable {
using RS = decltype(selector);
using F = std::conditional_t<
std::is_lvalue_reference<decltype(func)>::value,
......@@ -44,63 +44,70 @@ auto naive_executor_bulk_target(Executor e, Allocator a = Allocator{}) {
try {
typename std::allocator_traits<Allocator>::template rebind_alloc<char>
allocState(a);
auto shared_state = std::allocate_shared<std::tuple<
std::exception_ptr, // first exception
Out, // destination
RS, // selector
F, // func
std::atomic<decltype(init(input))>, // accumulation
std::atomic<std::size_t>, // pending
std::atomic<std::size_t> // exception count (protects assignment to
// first exception)
>>(
using Acc = decltype(init(input));
struct shared_state_type {
std::exception_ptr first_exception_{};
Out destination_;
RS selector_;
F func_;
std::atomic<Acc> accumulation_;
std::atomic<std::size_t> pending_{1};
std::atomic<std::size_t> exception_count_{0}; // protects assignment to
// first exception
shared_state_type(Out&& destination, RS&& selector, F&& func, Acc acc)
: destination_((Out&&) destination)
, selector_((RS&&) selector)
, func_((F&&) func)
, accumulation_(acc)
{}
};
auto shared_state = std::allocate_shared<shared_state_type>(
allocState,
std::exception_ptr{},
std::move(out),
std::move(selector),
(decltype(func)&&)func,
init(std::move(input)),
1,
0);
e | op::submit([e, sb, se, shared_state](auto) {
init(std::move(input)));
e.schedule() | op::submit([e, sb, se, shared_state](auto) mutable {
auto stepDone = [](auto shared_state) {
// pending
if (--std::get<5>(*shared_state) == 0) {
if (--shared_state->pending_ == 0) {
// first exception
if (std::get<0>(*shared_state)) {
if (shared_state->first_exception_) {
mi::set_error(
std::get<1>(*shared_state), std::get<0>(*shared_state));
shared_state->destination_, shared_state->first_exception_);
return;
}
try {
// selector(accumulation)
auto result = std::get<2>(*shared_state)(
std::move(std::get<4>(*shared_state).load()));
mi::set_value(std::get<1>(*shared_state), std::move(result));
auto result = shared_state->selector_(
std::move(shared_state->accumulation_.load()));
mi::set_value(shared_state->destination_, std::move(result));
mi::set_done(shared_state->destination_);
} catch (...) {
mi::set_error(
std::get<1>(*shared_state), std::current_exception());
shared_state->destination_, std::current_exception());
}
}
};
for (decltype(sb) idx{sb}; idx != se;
++idx, ++std::get<5>(*shared_state)) {
e | op::submit([shared_state, idx, stepDone](auto ex) {
for (decltype(sb) idx{sb}; idx != se; ++idx) {
++shared_state->pending_;
e.schedule() | op::submit([shared_state, idx, stepDone](auto) {
try {
// this indicates to me that bulk is not the right abstraction
auto old = std::get<4>(*shared_state).load();
auto step = old;
auto old = shared_state->accumulation_.load();
Acc step;
do {
step = old;
// func(accumulation, idx)
std::get<3> (*shared_state)(step, idx);
} while (!std::get<4>(*shared_state)
shared_state->func_(step, idx);
} while (!shared_state->accumulation_
.compare_exchange_strong(old, step));
} catch (...) {
// exception count
if (std::get<6>(*shared_state)++ == 0) {
if (shared_state->exception_count_++ == 0) {
// store first exception
std::get<0>(*shared_state) = std::current_exception();
shared_state->first_exception_ = std::current_exception();
} // else eat the exception
}
stepDone(shared_state);
......@@ -109,7 +116,7 @@ auto naive_executor_bulk_target(Executor e, Allocator a = Allocator{}) {
stepDone(shared_state);
});
} catch (...) {
e |
e.schedule() |
op::submit([out = std::move(out), ep = std::current_exception()](
auto) mutable { mi::set_error(out, ep); });
}
......
......@@ -38,6 +38,7 @@ auto inline_bulk_target() {
}
auto result = selector(std::move(acc));
mi::set_value(out, std::move(result));
mi::set_done(out);
} catch (...) {
mi::set_error(out, std::current_exception());
}
......
......@@ -14,24 +14,25 @@
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
#include <futures.h>
#include <futures_static_thread_pool.h>
#include <atomic>
#include <cassert>
#include <functional>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
#include <folly/experimental/pushmi/examples/pool.h>
#include <folly/experimental/pushmi/strand.h>
#include <folly/experimental/pushmi/o/just.h>
#include <folly/experimental/pushmi/o/transform.h>
#include <folly/experimental/pushmi/o/via.h>
#include <folly/experimental/pushmi/strand.h>
#include <folly/experimental/pushmi/examples/pool.h>
// // See https://github.com/executors/futures-impl
// #include <futures.h>
// #include <futures_static_thread_pool.h>
using namespace folly::pushmi::aliases;
......@@ -51,54 +52,54 @@ struct inline_executor {
void execute(Function f) const noexcept {
f();
}
constexpr bool query(std::experimental::execution::oneway_t) {
return true;
}
constexpr bool query(std::experimental::execution::twoway_t) {
return false;
}
constexpr bool query(std::experimental::execution::single_t) {
return true;
}
// constexpr bool query(std::experimental::execution::oneway_t) {
// return true;
// }
// constexpr bool query(std::experimental::execution::twoway_t) {
// return false;
// }
// constexpr bool query(std::experimental::execution::single_t) {
// return true;
// }
};
namespace p1054 {
// A promise refers to a promise and is associated with a future,
// either through type-erasure or through construction of an
// underlying promise with an overload of make_promise_contract().
// make_promise_contract() cannot be written to produce a lazy future.
// the promise has to exist prior to .then() getting a continuation.
// there must be a shared allocation to connect the promise and future.
template <class T, class Executor>
std::pair<
std::experimental::standard_promise<T>,
std::experimental::standard_future<T, std::decay_t<Executor>>>
make_promise_contract(const Executor& e) {
std::experimental::standard_promise<T> promise;
auto ex = e;
return {promise, promise.get_future(std::move(ex))};
}
template <class Executor, class Function, class Future>
std::experimental::standard_future<
std::result_of_t<
Function(std::decay_t<typename std::decay_t<Future>::value_type>&&)>,
std::decay_t<Executor>>
then_execute(Executor&& e, Function&& f, Future&& pred) {
using V = std::decay_t<typename std::decay_t<Future>::value_type>;
using T = std::result_of_t<Function(V &&)>;
auto pc = make_promise_contract<T>(e);
auto p = std::get<0>(pc);
auto r = std::get<1>(pc);
((Future &&) pred).then([e, p, f](V v) mutable {
e.execute([p, f, v]() mutable { p.set_value(f(v)); });
return 0;
});
return r;
}
} // namespace p1054
// namespace p1054 {
// // A promise refers to a promise and is associated with a future,
// // either through type-erasure or through construction of an
// // underlying promise with an overload of make_promise_contract().
//
// // make_promise_contract() cannot be written to produce a lazy future.
// // The promise has to exist prior to .then() getting a continuation.
// // there must be a shared allocation to connect the promise and future.
// template <class T, class Executor>
// std::pair<
// std::experimental::standard_promise<T>,
// std::experimental::standard_future<T, std::decay_t<Executor>>>
// make_promise_contract(const Executor& e) {
// std::experimental::standard_promise<T> promise;
// auto ex = e;
// return {promise, promise.get_future(std::move(ex))};
// }
//
// template <class Executor, class Function, class Future>
// std::experimental::standard_future<
// std::result_of_t<
// Function(std::decay_t<typename std::decay_t<Future>::value_type>&&)>,
// std::decay_t<Executor>>
// then_execute(Executor&& e, Function&& f, Future&& pred) {
// using V = std::decay_t<typename std::decay_t<Future>::value_type>;
// using T = std::result_of_t<Function(V &&)>;
// auto pc = make_promise_contract<T>(e);
// auto p = std::get<0>(pc);
// auto r = std::get<1>(pc);
// ((Future &&) pred).then([e, p, f](V v) mutable {
// e.execute([p, f, v]() mutable { p.set_value(f(v)); });
// return 0;
// });
// return r;
// }
//
// } // namespace p1054
namespace p1055 {
......@@ -113,24 +114,25 @@ auto then_execute(Executor&& e, Function&& f, Future&& pred) {
int main() {
mi::pool p{std::max(1u, std::thread::hardware_concurrency())};
std::experimental::futures_static_thread_pool sp{
std::max(1u, std::thread::hardware_concurrency())};
auto pc = p1054::make_promise_contract<int>(inline_executor{});
auto& pr = std::get<0>(pc);
auto& r = std::get<1>(pc);
auto f = p1054::then_execute(
sp.executor(), [](int v) { return v * 2; }, std::move(r));
pr.set_value(42);
f.get();
p1055::then_execute(p.executor(), [](int v) { return v * 2; }, op::just(21)) |
op::get<int>;
sp.stop();
sp.wait();
p.stop();
p.wait();
// std::experimental::futures_static_thread_pool sp{
// std::max(1u, std::thread::hardware_concurrency())};
//
// auto pc = p1054::make_promise_contract<int>(inline_executor{});
// auto& pr = std::get<0>(pc);
// auto& r = std::get<1>(pc);
// auto f = p1054::then_execute(
// sp.executor(), [](int v) { return v * 2; }, std::move(r));
// pr.set_value(42);
// f.get();
//
// sp.stop();
// sp.wait();
std::cout << "OK" << std::endl;
}
......@@ -14,60 +14,61 @@
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
#include <futures.h>
#include <atomic>
#include <cassert>
#include <functional>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <folly/experimental/pushmi/examples/pool.h>
#include <vector>
#include <folly/experimental/pushmi/o/transform.h>
using namespace folly::pushmi::aliases;
#include <folly/experimental/pushmi/examples/pool.h>
namespace p1054 {
// A promise refers to a promise and is associated with a future,
// either through type-erasure or through construction of an
// underlying promise with an overload of make_promise_contract().
using namespace folly::pushmi::aliases;
// make_promise_contract() cannot be written to produce a lazy future.
// the promise has to exist prior to .then() getting a continuation.
// there must be a shared allocation to connect the promise and future.
template <class T, class Executor>
std::pair<
std::experimental::standard_promise<T>,
std::experimental::standard_future<T, std::decay_t<Executor>>>
make_promise_contract(const Executor& e) {
std::experimental::standard_promise<T> promise;
auto ex = e;
return {promise, promise.get_future(std::move(ex))};
}
template <class Executor, class Function>
std::experimental::standard_future<
std::result_of_t<std::decay_t<Function>()>,
std::decay_t<Executor>>
twoway_execute(Executor&& e, Function&& f) {
using T = std::result_of_t<std::decay_t<Function>()>;
auto pc = make_promise_contract<T>(e);
auto p = std::get<0>(pc);
auto r = std::get<1>(pc);
e.execute([p, f]() mutable { p.set_value(f()); });
return r;
}
} // namespace p1054
// // See https://github.com/executors/futures-impl
// #include <futures.h>
//
// namespace p1054 {
// // A promise refers to a promise and is associated with a future,
// // either through type-erasure or through construction of an
// // underlying promise with an overload of make_promise_contract().
//
// // make_promise_contract() cannot be written to produce a lazy future.
// // the promise has to exist prior to .then() getting a continuation.
// // there must be a shared allocation to connect the promise and future.
// template <class T, class Executor>
// std::pair<
// std::experimental::standard_promise<T>,
// std::experimental::standard_future<T, std::decay_t<Executor>>>
// make_promise_contract(const Executor& e) {
// std::experimental::standard_promise<T> promise;
// auto ex = e;
// return {promise, promise.get_future(std::move(ex))};
// }
//
// template <class Executor, class Function>
// std::experimental::standard_future<
// std::result_of_t<std::decay_t<Function>()>,
// std::decay_t<Executor>>
// twoway_execute(Executor&& e, Function&& f) {
// using T = std::result_of_t<std::decay_t<Function>()>;
// auto pc = make_promise_contract<T>(e);
// auto p = std::get<0>(pc);
// auto r = std::get<1>(pc);
// e.execute([p, f]() mutable { p.set_value(f()); });
// return r;
// }
// } // namespace p1054
namespace p1055 {
template <class Executor, class Function>
auto twoway_execute(Executor&& e, Function&& f) {
return e | op::transform([f](auto) { return f(); });
return e.schedule() | op::transform([f](auto) { return f(); });
}
} // namespace p1055
......@@ -75,17 +76,18 @@ auto twoway_execute(Executor&& e, Function&& f) {
int main() {
mi::pool p{std::max(1u, std::thread::hardware_concurrency())};
std::experimental::static_thread_pool sp{
std::max(1u, std::thread::hardware_concurrency())};
p1054::twoway_execute(sp.executor(), []() { return 42; }).get();
p1055::twoway_execute(p.executor(), []() { return 42; }) | op::get<int>;
sp.stop();
sp.wait();
p.stop();
p.wait();
// std::experimental::static_thread_pool sp{
// std::max(1u, std::thread::hardware_concurrency())};
//
// p1054::twoway_execute(sp.executor(), []() { return 42; }).get();
//
// sp.stop();
// sp.wait();
std::cout << "OK" << std::endl;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment