Commit 02a55afa authored by Dominik Charousset's avatar Dominik Charousset

Remove `scheduler::abstract_worker`

By moving the implementation of `stop` into the concrete coordinator
implementation, the `abstract_coordinator` no longer needs any virtual member
function dispatch for its workers.
parent 6241921e
...@@ -67,19 +67,12 @@ class scheduler_policy { ...@@ -67,19 +67,12 @@ class scheduler_policy {
template <class Worker> template <class Worker>
void resume_job_later(Worker* self, resumable* job); void resume_job_later(Worker* self, resumable* job);
/**
* Returns `nullptr` if no element could be dequeued immediately.
* Called by external sources to try to dequeue an element.
*/
template <class Worker>
resumable* try_external_dequeue(Worker* self);
/** /**
* Blocks until a job could be dequeued. * Blocks until a job could be dequeued.
* Called by the worker itself to acquire a new job. * Called by the worker itself to acquire a new job.
*/ */
template <class Worker> template <class Worker>
resumable* internal_dequeue(Worker* self); resumable* dequeue(Worker* self);
/** /**
* Performs cleanup action before a shutdown takes place. * Performs cleanup action before a shutdown takes place.
......
...@@ -95,7 +95,7 @@ class work_stealing { ...@@ -95,7 +95,7 @@ class work_stealing {
resumable* try_steal(Worker* self) { resumable* try_steal(Worker* self) {
auto p = self->parent(); auto p = self->parent();
auto victim = d(self).rengine() % p->num_workers(); auto victim = d(self).rengine() % p->num_workers();
return try_external_dequeue(p->worker_by_id(victim)); return d(p->worker_by_id(victim)).exposed_queue.try_pop();
} }
template <class Coordinator> template <class Coordinator>
...@@ -131,12 +131,7 @@ class work_stealing { ...@@ -131,12 +131,7 @@ class work_stealing {
} }
template <class Worker> template <class Worker>
resumable* try_external_dequeue(Worker* self) { resumable* dequeue(Worker* self) {
return d(self).exposed_queue.try_pop();
}
template <class Worker>
resumable* internal_dequeue(Worker* self) {
// we wait for new jobs by polling our external queue: first, we // we wait for new jobs by polling our external queue: first, we
// assume an active work load on the machine and perform aggresive // assume an active work load on the machine and perform aggresive
// polling, then we relax our polling a bit and wait 50 us between // polling, then we relax our polling a bit and wait 50 us between
......
...@@ -39,41 +39,14 @@ ...@@ -39,41 +39,14 @@
#include "caf/spawn_options.hpp" #include "caf/spawn_options.hpp"
#include "caf/execution_unit.hpp" #include "caf/execution_unit.hpp"
#include "caf/policy/work_stealing.hpp"
#include "caf/detail/logging.hpp" #include "caf/detail/logging.hpp"
#include "caf/detail/producer_consumer_list.hpp" #include "caf/detail/producer_consumer_list.hpp"
namespace caf { namespace caf {
namespace scheduler { namespace scheduler {
class abstract_coordinator;
/**
* Base class for work-stealing workers.
*/
class abstract_worker : public execution_unit {
friend class abstract_coordinator;
public:
/**
* Attempt to steal an element from this worker.
*/
virtual resumable* try_steal() = 0;
/**
* Enqueues a new job to the worker's queue from an external
* source, i.e., from any other thread.
*/
virtual void external_enqueue(resumable*) = 0;
/**
* Starts the thread of this worker.
*/
//virtual void start(size_t id, abstract_coordinator* parent) = 0;
};
/** /**
* A coordinator creates the workers, manages delayed sends and * A coordinator creates the workers, manages delayed sends and
* the central printer instance for {@link aout}. It also forwards * the central printer instance for {@link aout}. It also forwards
...@@ -112,7 +85,7 @@ class abstract_coordinator { ...@@ -112,7 +85,7 @@ class abstract_coordinator {
return m_num_workers; return m_num_workers;
} }
virtual abstract_worker* worker_by_id(size_t id) = 0; //virtual execution_unit* worker_by_id(size_t id) = 0;
protected: protected:
...@@ -120,9 +93,8 @@ class abstract_coordinator { ...@@ -120,9 +93,8 @@ class abstract_coordinator {
virtual void initialize(); virtual void initialize();
virtual void stop(); virtual void stop() = 0;
private:
// Creates a default instance. // Creates a default instance.
static abstract_coordinator* create_singleton(); static abstract_coordinator* create_singleton();
...@@ -149,7 +121,7 @@ class coordinator; ...@@ -149,7 +121,7 @@ class coordinator;
* Policy-based implementation of the abstract worker base class. * Policy-based implementation of the abstract worker base class.
*/ */
template <class Policy> template <class Policy>
class worker : public abstract_worker { class worker : public execution_unit {
public: public:
worker(const worker&) = delete; worker(const worker&) = delete;
worker& operator=(const worker&) = delete; worker& operator=(const worker&) = delete;
...@@ -181,20 +153,11 @@ class worker : public abstract_worker { ...@@ -181,20 +153,11 @@ class worker : public abstract_worker {
using policy_data = typename Policy::worker_data; using policy_data = typename Policy::worker_data;
/**
* Attempt to steal an element from the exposed job queue.
*/
job_ptr try_steal() override {
auto result = m_policy.try_external_dequeue(this);
CAF_LOG_DEBUG_IF(result, "stole actor with id " << id_of(result));
return result;
}
/** /**
* Enqueues a new job to the worker's queue from an external * Enqueues a new job to the worker's queue from an external
* source, i.e., from any other thread. * source, i.e., from any other thread.
*/ */
void external_enqueue(job_ptr job) override { void external_enqueue(job_ptr job) {
CAF_REQUIRE(job != nullptr); CAF_REQUIRE(job != nullptr);
CAF_LOG_TRACE("id = " << id() << " actor id " << id_of(job)); CAF_LOG_TRACE("id = " << id() << " actor id " << id_of(job));
m_policy.external_enqueue(this, job); m_policy.external_enqueue(this, job);
...@@ -268,7 +231,7 @@ class worker : public abstract_worker { ...@@ -268,7 +231,7 @@ class worker : public abstract_worker {
CAF_LOG_TRACE("worker with ID " << m_id); CAF_LOG_TRACE("worker with ID " << m_id);
// scheduling loop // scheduling loop
for (;;) { for (;;) {
auto job = m_policy.internal_dequeue(this); auto job = m_policy.dequeue(this);
CAF_REQUIRE(job != nullptr); CAF_REQUIRE(job != nullptr);
CAF_LOG_DEBUG("resume actor " << id_of(job)); CAF_LOG_DEBUG("resume actor " << id_of(job));
CAF_PUSH_AID_FROM_PTR(dynamic_cast<abstract_actor*>(job)); CAF_PUSH_AID_FROM_PTR(dynamic_cast<abstract_actor*>(job));
...@@ -323,7 +286,7 @@ class coordinator : public abstract_coordinator { ...@@ -323,7 +286,7 @@ class coordinator : public abstract_coordinator {
using worker_type = worker<Policy>; using worker_type = worker<Policy>;
worker_type* worker_by_id(size_t id) override { worker_type* worker_by_id(size_t id) {//override {
return &m_workers[id]; return &m_workers[id];
} }
...@@ -343,7 +306,59 @@ class coordinator : public abstract_coordinator { ...@@ -343,7 +306,59 @@ class coordinator : public abstract_coordinator {
void stop() override { void stop() override {
// perform cleanup code of base classe // perform cleanup code of base classe
super::stop(); CAF_LOG_TRACE("");
// shutdown workers
class shutdown_helper : public resumable {
public:
void attach_to_scheduler() override {
// nop
}
void detach_from_scheduler() override {
// nop
}
resumable::resume_result resume(execution_unit* ptr, size_t) override {
CAF_LOG_DEBUG("shutdown_helper::resume => shutdown worker");
CAF_REQUIRE(ptr != nullptr);
std::unique_lock<std::mutex> guard(mtx);
last_worker = ptr;
cv.notify_all();
return resumable::shutdown_execution_unit;
}
shutdown_helper() : last_worker(nullptr) {
// nop
}
std::mutex mtx;
std::condition_variable cv;
execution_unit* last_worker;
};
shutdown_helper sh;
std::vector<worker_type*> alive_workers;
auto num = num_workers();
for (size_t i = 0; i < num; ++i) {
alive_workers.push_back(worker_by_id(i));
}
CAF_LOG_DEBUG("enqueue shutdown_helper into each worker");
while (!alive_workers.empty()) {
alive_workers.back()->external_enqueue(&sh);
// since jobs can be stolen, we cannot assume that we have
// actually shut down the worker we've enqueued sh to
{ // lifetime scope of guard
std::unique_lock<std::mutex> guard(sh.mtx);
sh.cv.wait(guard, [&] { return sh.last_worker != nullptr; });
}
auto last = alive_workers.end();
auto i = std::find(alive_workers.begin(), last, sh.last_worker);
sh.last_worker = nullptr;
alive_workers.erase(i);
}
// shutdown utility actors
CAF_LOG_DEBUG("send exit messages to timer & printer");
anon_send_exit(this->m_timer->address(), exit_reason::user_shutdown);
anon_send_exit(this->m_printer->address(), exit_reason::user_shutdown);
CAF_LOG_DEBUG("join threads of utility actors");
// join each worker thread for good manners
m_timer_thread.join();
m_printer_thread.join();
// wait until all workers are done // wait until all workers are done
for (auto& w : m_workers) { for (auto& w : m_workers) {
w.get_thread().join(); w.get_thread().join();
...@@ -391,7 +406,7 @@ void set_scheduler(scheduler::abstract_coordinator* ptr); ...@@ -391,7 +406,7 @@ void set_scheduler(scheduler::abstract_coordinator* ptr);
* changing the scheduler at runtime is not supported. * changing the scheduler at runtime is not supported.
* @throws std::logic_error if a scheduler is already defined * @throws std::logic_error if a scheduler is already defined
*/ */
template <class Policy> template <class Policy = policy::work_stealing>
void set_scheduler(size_t nw = std::thread::hardware_concurrency(), void set_scheduler(size_t nw = std::thread::hardware_concurrency(),
size_t max_throughput = 0) { size_t max_throughput = 0) {
set_scheduler(new scheduler::coordinator<Policy>(nw, max_throughput)); set_scheduler(new scheduler::coordinator<Policy>(nw, max_throughput));
......
...@@ -32,9 +32,6 @@ ...@@ -32,9 +32,6 @@
#include "caf/scoped_actor.hpp" #include "caf/scoped_actor.hpp"
#include "caf/system_messages.hpp" #include "caf/system_messages.hpp"
#include "caf/actor_ostream.hpp"
#include "caf/policy/work_stealing.hpp"
#include "caf/policy/no_resume.hpp" #include "caf/policy/no_resume.hpp"
#include "caf/policy/no_scheduling.hpp" #include "caf/policy/no_scheduling.hpp"
#include "caf/policy/actor_policies.hpp" #include "caf/policy/actor_policies.hpp"
...@@ -44,11 +41,6 @@ ...@@ -44,11 +41,6 @@
#include "caf/detail/logging.hpp" #include "caf/detail/logging.hpp"
#include "caf/detail/proper_actor.hpp" #include "caf/detail/proper_actor.hpp"
#include "caf/actor_ostream.hpp"
namespace caf { namespace caf {
namespace scheduler { namespace scheduler {
...@@ -210,36 +202,6 @@ void printer_loop(blocking_actor* self) { ...@@ -210,36 +202,6 @@ void printer_loop(blocking_actor* self) {
* implementation of coordinator * * implementation of coordinator *
******************************************************************************/ ******************************************************************************/
class shutdown_helper : public resumable {
public:
void attach_to_scheduler() override {
// nop
}
void detach_from_scheduler() override {
// nop
}
resumable::resume_result resume(execution_unit* ptr, size_t) override {
CAF_LOG_DEBUG("shutdown_helper::resume => shutdown worker");
auto wptr = dynamic_cast<abstract_worker*>(ptr);
CAF_REQUIRE(wptr != nullptr);
std::unique_lock<std::mutex> guard(mtx);
last_worker = wptr;
cv.notify_all();
return resumable::shutdown_execution_unit;
}
shutdown_helper() : last_worker(nullptr) {
// nop
}
~shutdown_helper();
std::mutex mtx;
std::condition_variable cv;
abstract_worker* last_worker;
};
shutdown_helper::~shutdown_helper() {
// nop
}
abstract_coordinator::~abstract_coordinator() { abstract_coordinator::~abstract_coordinator() {
// nop // nop
} }
...@@ -256,39 +218,6 @@ void abstract_coordinator::initialize() { ...@@ -256,39 +218,6 @@ void abstract_coordinator::initialize() {
m_printer_thread = std::thread{printer_loop, m_printer.get()}; m_printer_thread = std::thread{printer_loop, m_printer.get()};
} }
void abstract_coordinator::stop() {
CAF_LOG_TRACE("");
// shutdown workers
shutdown_helper sh;
std::vector<abstract_worker*> alive_workers;
auto num = num_workers();
for (size_t i = 0; i < num; ++i) {
alive_workers.push_back(worker_by_id(i));
}
CAF_LOG_DEBUG("enqueue shutdown_helper into each worker");
while (!alive_workers.empty()) {
alive_workers.back()->external_enqueue(&sh);
// since jobs can be stolen, we cannot assume that we have
// actually shut down the worker we've enqueued sh to
{ // lifetime scope of guard
std::unique_lock<std::mutex> guard(sh.mtx);
sh.cv.wait(guard, [&] { return sh.last_worker != nullptr; });
}
auto last = alive_workers.end();
auto i = std::find(alive_workers.begin(), last, sh.last_worker);
sh.last_worker = nullptr;
alive_workers.erase(i);
}
// shutdown utility actors
CAF_LOG_DEBUG("send exit messages to timer & printer");
anon_send_exit(m_timer->address(), exit_reason::user_shutdown);
anon_send_exit(m_printer->address(), exit_reason::user_shutdown);
CAF_LOG_DEBUG("join threads of utility actors");
m_timer_thread.join();
m_printer_thread.join();
// join each worker thread for good manners
}
abstract_coordinator::abstract_coordinator(size_t nw) abstract_coordinator::abstract_coordinator(size_t nw)
: m_timer(new timer_actor), : m_timer(new timer_actor),
m_printer(true), m_printer(true),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment