Commit f42f143e authored by Dominik Charousset's avatar Dominik Charousset

Provide abstraction for pool of worker actors

parent d479260b
...@@ -25,6 +25,7 @@ set (LIBCAF_CORE_SRCS ...@@ -25,6 +25,7 @@ set (LIBCAF_CORE_SRCS
src/actor_companion.cpp src/actor_companion.cpp
src/actor_namespace.cpp src/actor_namespace.cpp
src/actor_ostream.cpp src/actor_ostream.cpp
src/actor_pool.cpp
src/actor_proxy.cpp src/actor_proxy.cpp
src/actor_registry.cpp src/actor_registry.cpp
src/attachable.cpp src/attachable.cpp
......
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2015 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_ACTOR_POOL_HPP
#define CAF_ACTOR_POOL_HPP
#include <atomic>
#include <random>
#include <vector>
#include <functional>
#include "caf/locks.hpp"
#include "caf/actor.hpp"
#include "caf/abstract_actor.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/detail/shared_spinlock.hpp"
namespace caf {
/**
* An actor poool is a lightweight abstraction for a set of workers.
* The pool itself is an actor, meaning that it can be passed
* around in an actor system to hide the actual set of workers.
*
* After construction, new workers can be added via `{'SYS', 'PUT', actor}`
* messages, e.g., `send(my_pool, sys_atom::value, put_atom::value, worker)`.
* `{'SYS', 'DELETE', actor}` messages remove a worker from the set,
* whereas `{'SYS', 'GET'}` returns a `vector<actor>` containing all workers.
*
* Note that the pool *always* sends exit messages to all of its workers
* when forced to quit. The pool monitors all of its workers. Messages queued
* up in a worker's mailbox are lost, i.e., the pool itself does not buffer
* and resend messages. Advanced caching or resend strategies can be
* implemented in a policy.
*
* It is wort mentioning that the pool is *not* an event-based actor.
* Neither does it live in its own thread. Messages are dispatched immediately
* during the enqueue operation. Any user-defined policy thus has to dispatch
* messages with as little overhead as possible, because the dispatching
* runs in the context of the sender.
*/
class actor_pool : public abstract_actor {
public:
using uplock = upgrade_lock<detail::shared_spinlock>;
using actor_vec = std::vector<actor>;
using factory = std::function<actor ()>;
using policy = std::function<void (uplock&, const actor_vec&,
mailbox_element_ptr&, execution_unit*)>;
/**
* Default policy class implementing simple round robin dispatching.
*/
class round_robin {
public:
round_robin();
round_robin(const round_robin&);
void operator()(uplock&, const actor_vec&,
mailbox_element_ptr&, execution_unit*);
private:
std::atomic<size_t> m_pos;
};
/**
* Default policy class implementing broadcast dispatching.
*/
class broadcast {
public:
void operator()(uplock&, const actor_vec&,
mailbox_element_ptr&, execution_unit*);
};
/**
* Default policy class implementing random dispatching.
*/
class random {
public:
random();
random(const random&);
void operator()(uplock&, const actor_vec&,
mailbox_element_ptr&, execution_unit*);
private:
std::random_device m_rd;
};
~actor_pool();
/**
* Returns an actor pool without workers using the dispatch policy `pol`.
*/
static actor make(policy pol);
/**
* Returns an actor pool with `n` workers created by the factory
* function `fac` using the dispatch policy `pol`.
*/
static actor make(size_t n, factory fac, policy pol);
void enqueue(const actor_addr& sender, message_id mid,
message content, execution_unit* host) override;
void enqueue(mailbox_element_ptr what, execution_unit* host) override;
private:
actor_pool();
bool filter(upgrade_lock<detail::shared_spinlock>&, const actor_addr& sender,
message_id mid, const message& content, execution_unit* host);
detail::shared_spinlock m_mtx;
std::vector<actor> m_workers;
policy m_policy;
uint32_t m_planned_reason;
};
} // namespace caf
#endif // CAF_ACTOR_POOL_HPP
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "caf/spawn_fwd.hpp" #include "caf/spawn_fwd.hpp"
#include "caf/to_string.hpp" #include "caf/to_string.hpp"
#include "caf/actor_addr.hpp" #include "caf/actor_addr.hpp"
#include "caf/actor_pool.hpp"
#include "caf/attachable.hpp" #include "caf/attachable.hpp"
#include "caf/message_id.hpp" #include "caf/message_id.hpp"
#include "caf/replies_to.hpp" #include "caf/replies_to.hpp"
......
...@@ -94,6 +94,11 @@ using ok_atom = atom_constant<atom("OK")>; ...@@ -94,6 +94,11 @@ using ok_atom = atom_constant<atom("OK")>;
*/ */
using error_atom = atom_constant<atom("ERROR")>; using error_atom = atom_constant<atom("ERROR")>;
/**
* Generic 'SYS' atom.
*/
using sys_atom = atom_constant<atom("SYS")>;
} // namespace caf } // namespace caf
#endif // CAF_ATOM_HPP #endif // CAF_ATOM_HPP
...@@ -41,6 +41,7 @@ class duration; ...@@ -41,6 +41,7 @@ class duration;
class behavior; class behavior;
class resumable; class resumable;
class actor_addr; class actor_addr;
class actor_pool;
class message_id; class message_id;
class local_actor; class local_actor;
class actor_proxy; class actor_proxy;
......
...@@ -29,9 +29,7 @@ using unique_lock = std::unique_lock<Lockable>; ...@@ -29,9 +29,7 @@ using unique_lock = std::unique_lock<Lockable>;
template <class SharedLockable> template <class SharedLockable>
class shared_lock { class shared_lock {
public: public:
using lockable = SharedLockable; using lockable = SharedLockable;
explicit shared_lock(lockable& arg) : m_lockable(&arg) { explicit shared_lock(lockable& arg) : m_lockable(&arg) {
...@@ -39,13 +37,20 @@ class shared_lock { ...@@ -39,13 +37,20 @@ class shared_lock {
} }
~shared_lock() { ~shared_lock() {
if (m_lockable) m_lockable->unlock_shared(); unlock();
} }
bool owns_lock() const { bool owns_lock() const {
return m_lockable != nullptr; return m_lockable != nullptr;
} }
void unlock() {
if (m_lockable) {
m_lockable->unlock_shared();
m_lockable = nullptr;
}
}
lockable* release() { lockable* release() {
auto result = m_lockable; auto result = m_lockable;
m_lockable = nullptr; m_lockable = nullptr;
...@@ -53,9 +58,7 @@ class shared_lock { ...@@ -53,9 +58,7 @@ class shared_lock {
} }
private: private:
lockable* m_lockable; lockable* m_lockable;
}; };
template <class SharedLockable> template <class SharedLockable>
...@@ -63,9 +66,7 @@ using upgrade_lock = shared_lock<SharedLockable>; ...@@ -63,9 +66,7 @@ using upgrade_lock = shared_lock<SharedLockable>;
template <class UpgradeLockable> template <class UpgradeLockable>
class upgrade_to_unique_lock { class upgrade_to_unique_lock {
public: public:
using lockable = UpgradeLockable; using lockable = UpgradeLockable;
template <class LockType> template <class LockType>
...@@ -75,17 +76,22 @@ class upgrade_to_unique_lock { ...@@ -75,17 +76,22 @@ class upgrade_to_unique_lock {
} }
~upgrade_to_unique_lock() { ~upgrade_to_unique_lock() {
if (m_lockable) m_lockable->unlock(); unlock();
} }
bool owns_lock() const { bool owns_lock() const {
return m_lockable != nullptr; return m_lockable != nullptr;
} }
private: void unlock() {
if (m_lockable) {
m_lockable->unlock();
m_lockable = nullptr;
}
}
private:
lockable* m_lockable; lockable* m_lockable;
}; };
} // namespace caf } // namespace caf
......
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2015 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#include "caf/actor_pool.hpp"
#include "caf/send.hpp"
#include "caf/default_attachable.hpp"
#include "caf/detail/sync_request_bouncer.hpp"
namespace caf {
actor_pool::round_robin::round_robin() : m_pos(0) {
// nop
}
actor_pool::round_robin::round_robin(const round_robin&) : m_pos(0) {
// nop
}
void actor_pool::round_robin::operator()(uplock& guard, const actor_vec& vec,
mailbox_element_ptr& ptr,
execution_unit* host) {
CAF_REQUIRE(!vec.empty());
actor selected = vec[m_pos++ % vec.size()];
guard.unlock();
selected->enqueue(std::move(ptr), host);
}
void actor_pool::broadcast::operator()(uplock&, const actor_vec& vec,
mailbox_element_ptr& ptr,
execution_unit* host) {
CAF_REQUIRE(!vec.empty());
for (size_t i = 1; i < vec.size(); ++i) {
vec[i]->enqueue(ptr->sender, ptr->mid, ptr->msg, host);
}
vec.front()->enqueue(std::move(ptr), host);
}
actor_pool::random::random() {
// nop
}
actor_pool::random::random(const random&) {
// nop
}
void actor_pool::random::operator()(uplock& guard, const actor_vec& vec,
mailbox_element_ptr& ptr,
execution_unit* host) {
std::uniform_int_distribution<size_t> dis(0, vec.size() - 1);
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
actor selected = vec[dis(m_rd)];
unique_guard.unlock();
selected->enqueue(std::move(ptr), host);
}
actor_pool::~actor_pool() {
// nop
}
actor actor_pool::make(policy pol) {
intrusive_ptr<actor_pool> ptr;
ptr.reset(new actor_pool);
ptr->m_policy = std::move(pol);
return actor_cast<actor>(ptr);
}
actor actor_pool::make(size_t num_workers, factory fac, policy pol) {
auto res = make(std::move(pol));
auto ptr = static_cast<actor_pool*>(actor_cast<abstract_actor*>(res));
auto res_addr = ptr->address();
for (size_t i = 0; i < num_workers; ++i) {
auto worker = fac();
worker->attach(default_attachable::make_monitor(res_addr));
ptr->m_workers.push_back(worker);
}
return res;
}
void actor_pool::enqueue(const actor_addr& sender, message_id mid,
message content, execution_unit* eu) {
upgrade_lock<detail::shared_spinlock> guard{m_mtx};
if (filter(guard, sender, mid, content, eu)) {
return;
}
auto ptr = mailbox_element::make(sender, mid, std::move(content));
m_policy(guard, m_workers, ptr, eu);
}
void actor_pool::enqueue(mailbox_element_ptr what, execution_unit* eu) {
upgrade_lock<detail::shared_spinlock> guard{m_mtx};
if (filter(guard, what->sender, what->mid, what->msg, eu)) {
return;
}
m_policy(guard, m_workers, what, eu);
}
actor_pool::actor_pool() : m_planned_reason(caf::exit_reason::not_exited) {
is_registered(true);
}
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
const actor_addr& sender, message_id mid,
const message& msg, execution_unit* eu) {
auto rsn = m_planned_reason;
if (rsn != caf::exit_reason::not_exited) {
guard.unlock();
if (mid.valid()) {
detail::sync_request_bouncer srq{rsn};
srq(sender, mid);
}
return true;
}
if (msg.match_elements<exit_msg>()) {
std::vector<actor> workers;
// send exit messages *always* to all workers and clear vector afterwards
// but first swap m_workers out of the critical section
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
m_workers.swap(workers);
m_planned_reason = msg.get_as<exit_msg>(0).reason;
unique_guard.unlock();
for (auto& w : workers) {
anon_send(w, msg);
}
// we can safely run our cleanup code here
// because abstract_actor has its own lock
cleanup(m_planned_reason);
is_registered(false);
return true;
}
if (msg.match_elements<down_msg>()) {
// remove failed worker from pool
auto& dm = msg.get_as<down_msg>(0);
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
auto last = m_workers.end();
auto i = std::find(m_workers.begin(), m_workers.end(), dm.source);
if (i != last) {
m_workers.erase(i);
}
return true;
}
if (msg.match_elements<sys_atom, put_atom, actor>()) {
auto& worker = msg.get_as<actor>(2);
if (worker == invalid_actor) {
return true;
}
worker->attach(default_attachable::make_monitor(address()));
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
m_workers.push_back(worker);
return true;
}
if (msg.match_elements<sys_atom, delete_atom, actor>()) {
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
auto& what = msg.get_as<actor>(2);
auto last = m_workers.end();
auto i = std::find(m_workers.begin(), last, what);
if (i != last) {
m_workers.erase(i);
}
return true;
}
if (msg.match_elements<sys_atom, get_atom>()) {
auto cpy = m_workers;
guard.unlock();
actor_cast<abstract_actor*>(sender)->enqueue(invalid_actor_addr,
mid.response_id(),
make_message(std::move(cpy)),
eu);
return true;
}
if (m_workers.empty()) {
guard.unlock();
if (sender != invalid_actor_addr && mid.valid()) {
// tell client we have ignored this sync message by sending
// and empty message back
auto ptr = actor_cast<abstract_actor_ptr>(sender);
ptr->enqueue(invalid_actor_addr, mid.response_id(), message{}, eu);
}
return true;
}
return false;
}
} // namespace caf
...@@ -44,6 +44,7 @@ add_unit_test(typed_remote_actor) ...@@ -44,6 +44,7 @@ add_unit_test(typed_remote_actor)
add_unit_test(unpublish) add_unit_test(unpublish)
add_unit_test(optional) add_unit_test(optional)
add_unit_test(fixed_stack_actor) add_unit_test(fixed_stack_actor)
add_unit_test(actor_pool)
if (NOT WIN32) if (NOT WIN32)
add_unit_test(profiled_coordinator) add_unit_test(profiled_coordinator)
endif () endif ()
...@@ -20,7 +20,7 @@ class watchdog { ...@@ -20,7 +20,7 @@ class watchdog {
watchdog() { watchdog() {
m_thread = thread([&] { m_thread = thread([&] {
auto tp = chrono::high_resolution_clock::now() + chrono::seconds(10); auto tp = chrono::high_resolution_clock::now() + chrono::seconds(10);
unique_lock<mutex> guard{m_mtx}; std::unique_lock<mutex> guard{m_mtx};
while (!m_canceled while (!m_canceled
&& m_cv.wait_until(guard, tp) != cv_status::timeout) { && m_cv.wait_until(guard, tp) != cv_status::timeout) {
// spin // spin
......
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2015 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#include "test.hpp"
#include "caf/all.hpp"
using namespace caf;
namespace {
std::atomic<size_t> s_ctors;
std::atomic<size_t> s_dtors;
} // namespace <anonymous>
class worker : public event_based_actor {
public:
worker();
~worker();
behavior make_behavior() override;
};
worker::worker() {
++s_ctors;
}
worker::~worker() {
++s_dtors;
}
behavior worker::make_behavior() {
return {
[](int x, int y) {
return x + y;
}
};
}
actor spawn_worker() {
return spawn<worker>();
}
void test_actor_pool() {
scoped_actor self;
auto w = actor_pool::make(5, spawn_worker, actor_pool::round_robin{});
self->monitor(w);
self->send(w, sys_atom::value, put_atom::value, spawn_worker());
std::vector<actor_addr> workers;
for (int i = 0; i < 6; ++i) {
self->sync_send(w, i, i).await(
[&](int res) {
CAF_CHECK_EQUAL(res, i + i);
auto sender = self->current_sender();
self->monitor(sender);
workers.push_back(sender);
}
);
}
CAF_CHECK(workers.size() == 6);
CAF_CHECK(std::unique(workers.begin(), workers.end()) == workers.end());
auto is_invalid = [](const actor_addr& addr) {
return addr == invalid_actor_addr;
};
CAF_CHECK(std::none_of(workers.begin(), workers.end(), is_invalid));
self->sync_send(w, sys_atom::value, get_atom::value).await(
[&](std::vector<actor>& ws) {
std::sort(workers.begin(), workers.end());
std::sort(ws.begin(), ws.end());
CAF_CHECK(workers.size() == ws.size()
&& std::equal(workers.begin(), workers.end(), ws.begin()));
}
);
anon_send_exit(workers.back(), exit_reason::user_shutdown);
self->receive(
after(std::chrono::milliseconds(25)) >> [] {
// wait some time to give the pool time to remove the failed worker
}
);
self->receive(
[&](const down_msg& dm) {
CAF_CHECK(dm.source == workers.back());
workers.pop_back();
// check whether actor pool removed failed worker
self->sync_send(w, sys_atom::value, get_atom::value).await(
[&](std::vector<actor>& ws) {
std::sort(ws.begin(), ws.end());
CAF_CHECK(workers.size() == ws.size()
&& std::equal(workers.begin(), workers.end(), ws.begin()));
}
);
},
after(std::chrono::milliseconds(250)) >> [] {
CAF_PRINTERR("didn't receive a down message");
}
);
CAF_CHECKPOINT();
self->send_exit(w, exit_reason::user_shutdown);
for (int i = 0; i < 6; ++i) {
self->receive(
[&](const down_msg& dm) {
auto last = workers.end();
auto src = dm.source;
CAF_CHECK(src != invalid_actor_addr);
auto pos = std::find(workers.begin(), last, src);
CAF_CHECK(pos != last || src == w);
if (pos != last) {
workers.erase(pos);
}
},
after(std::chrono::milliseconds(250)) >> [] {
CAF_PRINTERR("didn't receive a down message");
}
);
}
}
void test_broadcast_actor_pool() {
scoped_actor self;
auto spawn5 = []() {
return actor_pool::make(5, spawn_worker, actor_pool::broadcast{});
};
auto w = actor_pool::make(5, spawn5, actor_pool::broadcast{});
self->send(w, 1, 2);
std::vector<int> results;
int i = 0;
self->receive_for(i, 25)(
[&](int res) {
results.push_back(res);
},
after(std::chrono::milliseconds(250)) >> [] {
CAF_PRINTERR("didn't receive a result");
}
);
CAF_CHECK_EQUAL(results.size(), 25);
CAF_CHECK(std::all_of(results.begin(), results.end(),
[](int res) { return res == 3; }));
self->send_exit(w, exit_reason::user_shutdown);
self->await_all_other_actors_done();
}
void test_random_actor_pool() {
scoped_actor self;
auto w = actor_pool::make(5, spawn_worker, actor_pool::random{});
for (int i = 0; i < 5; ++i) {
self->sync_send(w, 1, 2).await(
[&](int res) {
CAF_CHECK_EQUAL(res, 3);
},
after(std::chrono::milliseconds(250)) >> [] {
CAF_PRINTERR("didn't receive a down message");
}
);
}
self->send_exit(w, exit_reason::user_shutdown);
self->await_all_other_actors_done();
}
int main() {
CAF_TEST(test_actor_pool);
test_actor_pool();
test_broadcast_actor_pool();
test_random_actor_pool();
await_all_actors_done();
shutdown();
CAF_CHECK_EQUAL(s_dtors.load(), s_ctors.load());
return CAF_TEST_RESULT();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment