Commit ad406f57 authored by Dominik Charousset's avatar Dominik Charousset

implemented serialization of local groups with a broker-based forwarding strategy

parent c65f4cd3
......@@ -183,15 +183,6 @@ class group : public channel {
virtual void unsubscribe(const channel_ptr& who) = 0;
/**
* @brief Called whenever a message was received via network. If @p this
* is a proxy, it should not send the message
* back to the original group but forward the message to its local
* subscribers. This member function should call @p enqueue for
* all non-proxy instances.
*/
virtual void remote_enqueue(actor* sender, any_tuple msg);
module_ptr m_module;
std::string m_identifier;
......
......@@ -93,8 +93,4 @@ const std::string& group::module_name() const {
return get_module()->name();
}
void group::remote_enqueue(actor* sender, any_tuple msg) {
enqueue(sender, std::move(msg));
}
} // namespace cppa
......@@ -31,11 +31,14 @@
#include <set>
#include <stdexcept>
#include "cppa/cppa.hpp"
#include "cppa/any_tuple.hpp"
#include "cppa/serializer.hpp"
#include "cppa/deserializer.hpp"
#include "cppa/event_based_actor.hpp"
#include "cppa/detail/middleman.hpp"
#include "cppa/detail/types_array.hpp"
#include "cppa/detail/group_manager.hpp"
#include "cppa/detail/addressed_message.hpp"
......@@ -51,30 +54,48 @@ typedef std::lock_guard<util::shared_spinlock> exclusive_guard;
typedef util::shared_lock_guard<util::shared_spinlock> shared_guard;
typedef util::upgrade_lock_guard<util::shared_spinlock> upgrade_guard;
class local_broker;
class local_group_module;
class local_group : public group {
public:
void enqueue(actor* sender, any_tuple msg) {
void send_all_subscribers(actor* sender, const any_tuple& msg) {
shared_guard guard(m_shared_mtx);
for (auto& s : m_subscribers) {
s->enqueue(sender, msg);
}
}
group::subscription subscribe(const channel_ptr& who) {
void enqueue(actor* sender, any_tuple msg) {
send_all_subscribers(sender, msg);
m_broker->enqueue(sender, std::move(msg));
}
std::pair<bool, size_t> add_subscriber(const channel_ptr& who) {
exclusive_guard guard(m_shared_mtx);
if (m_subscribers.insert(who).second) {
return {true, m_subscribers.size()};
}
return {false, m_subscribers.size()};
}
std::pair<bool, size_t> erase_subscriber(const channel_ptr& who) {
exclusive_guard guard(m_shared_mtx);
auto erased_one = m_subscribers.erase(who) > 0;
return {erased_one, m_subscribers.size()};
}
group::subscription subscribe(const channel_ptr& who) {
if (add_subscriber(who).first) {
return {who, this};
}
return {};
}
void unsubscribe(const channel_ptr& who) {
exclusive_guard guard(m_shared_mtx);
m_subscribers.erase(who);
erase_subscriber(who);
}
void serialize(serializer* sink);
......@@ -87,17 +108,72 @@ class local_group : public group {
return m_process;
}
local_group(local_group_module* mod, std::string id,
const actor_ptr& broker() const {
return m_broker;
}
local_group(bool spawn_local_broker,
local_group_module* mod, std::string id,
process_information_ptr parent = process_information::get());
private:
protected:
process_information_ptr m_process;
util::shared_spinlock m_shared_mtx;
std::set<channel_ptr> m_subscribers;
process_information_ptr m_process;
actor_ptr m_broker;
};
typedef intrusive_ptr<local_group> local_group_ptr;
class local_broker : public event_based_actor {
public:
local_broker(local_group_ptr g) : m_group(std::move(g)) { }
void init() {
become (
on(atom("JOIN"), arg_match) >> [=](const actor_ptr& other) {
if (other && m_acquaintances.insert(other).second) {
monitor(other);
}
},
on(atom("LEAVE"), arg_match) >> [=](const actor_ptr& other) {
if (other && m_acquaintances.erase(other) > 0) {
demonitor(other);
}
},
on(atom("FORWARD"), arg_match) >> [=](const any_tuple& what) {
m_group->send_all_subscribers(last_sender().get(), what);
},
on<atom("DOWN"), std::uint32_t>() >> [=] {
actor_ptr other = last_sender();
if (other) m_acquaintances.erase(other);
},
others() >> [=] {
auto sender = last_sender().get();
for (auto& acquaintance : m_acquaintances) {
acquaintance->enqueue(sender, last_dequeued());
}
}
);
}
private:
local_group_ptr m_group;
std::set<actor_ptr> m_acquaintances;
};
// Send a "JOIN" message to the original group if a proxy
// has local subscriptions and a "LEAVE" message to the original group
// if there's no subscription left.
class proxy_broker;
class local_group_proxy : public local_group {
typedef local_group super;
......@@ -105,19 +181,72 @@ class local_group_proxy : public local_group {
public:
template<typename... Args>
local_group_proxy(Args&&... args) : super(std::forward<Args>(args)...) { }
local_group_proxy(actor_ptr remote_broker, Args&&... args)
: super(false, std::forward<Args>(args)...) {
CPPA_REQUIRE(m_broker == nullptr);
CPPA_REQUIRE(remote_broker != nullptr);
CPPA_REQUIRE(remote_broker->is_proxy());
m_broker = std::move(remote_broker);
m_proxy_broker = spawn_hidden<proxy_broker>(this);
}
void enqueue(actor* sender, any_tuple msg) {
detail::middleman_enqueue(process_ptr(), sender, this, std::move(msg));
group::subscription subscribe(const channel_ptr& who) {
auto res = add_subscriber(who);
if (res.first) {
if (res.second == 1) {
// join the remote source
m_broker->enqueue(nullptr,
make_any_tuple(atom("JOIN"), m_proxy_broker));
}
return {who, this};
}
return {};
}
void unsubscribe(const channel_ptr& who) {
auto res = erase_subscriber(who);
if (res.first && res.second == 0) {
// leave the remote source,
// because there's no more subscriber on this node
m_broker->enqueue(nullptr,
make_any_tuple(atom("LEAVE"), m_proxy_broker));
}
}
void remote_enqueue(actor* sender, any_tuple msg) {
super::enqueue(sender, std::move(msg));
void enqueue(actor* sender, any_tuple msg) {
// forward message to the broker
m_broker->enqueue(sender,
make_any_tuple(atom("FORWARD"), std::move(msg)));
}
private:
actor_ptr m_proxy_broker;
};
typedef intrusive_ptr<local_group> local_group_ptr;
typedef intrusive_ptr<local_group_proxy> local_group_proxy_ptr;
class proxy_broker : public event_based_actor {
public:
proxy_broker(local_group_proxy_ptr grp) : m_group(std::move(grp)) { }
void init() {
become (
others() >> [=] {
m_group->send_all_subscribers(last_sender().get(),
last_dequeued());
}
);
}
private:
local_group_proxy_ptr m_group;
};
class local_group_module : public group::module {
......@@ -126,7 +255,8 @@ class local_group_module : public group::module {
public:
local_group_module()
: super("local"), m_process(process_information::get()) { }
: super("local"), m_process(process_information::get())
, m_actor_utype(uniform_typeid<actor_ptr>()){ }
group_ptr get(const std::string& identifier) {
shared_guard guard(m_instances_mtx);
......@@ -135,7 +265,7 @@ class local_group_module : public group::module {
return i->second;
}
else {
local_group_ptr tmp(new local_group(this, identifier));
local_group_ptr tmp(new local_group(true, this, identifier));
{ // lifetime scope of uguard
upgrade_guard uguard(guard);
auto p = m_instances.insert(std::make_pair(identifier, tmp));
......@@ -146,18 +276,19 @@ class local_group_module : public group::module {
}
intrusive_ptr<group> deserialize(deserializer* source) {
primitive_variant ptup[3];
primitive_type ptypes[] = {pt_u8string, pt_uint32, pt_u8string};
source->read_tuple(3, ptypes, ptup);
auto& identifier = cppa::get<std::string>(ptup[0]);
auto process_id = cppa::get<std::uint32_t>(ptup[1]);
auto& node_id = cppa::get<std::string>(ptup[2]);
if ( process_id == process().process_id()
&& equal(node_id, process().node_id())) {
// deserialize {identifier, process_id, node_id}
auto pv_identifier = source->read_value(pt_u8string);
auto& identifier = cppa::get<std::string>(pv_identifier);
// deserialize broker
actor_ptr broker;
m_actor_utype->deserialize(&broker, source);
CPPA_REQUIRE(broker != nullptr);
if (!broker) return nullptr;
if (broker->parent_process() == process()) {
return this->get(identifier);
}
else {
process_information pinf(process_id, node_id);
auto& pinf = broker->parent_process();
shared_guard guard(m_proxies_mtx);
auto& node_map = m_proxies[pinf];
auto i = node_map.find(identifier);
......@@ -165,14 +296,9 @@ class local_group_module : public group::module {
return i->second;
}
else {
local_group_ptr tmp(new local_group_proxy(this, identifier));
process_information_ptr piptr;
// re-use process_information_ptr from another proxy if possible
if (node_map.empty()) {
piptr.reset(new process_information(pinf));
} else {
piptr = node_map.begin()->second->process_ptr();
}
local_group_ptr tmp(new local_group_proxy(broker, this,
identifier,
broker->parent_process_ptr()));
upgrade_guard uguard(guard);
auto p = node_map.insert(std::make_pair(identifier, tmp));
// someone might preempt us
......@@ -182,11 +308,10 @@ class local_group_module : public group::module {
}
void serialize(local_group* ptr, serializer* sink) {
primitive_variant ptup[3];
ptup[0] = ptr->identifier();
ptup[1] = ptr->process().process_id();
ptup[2] = to_string(ptr->process().node_id());
sink->write_tuple(3, ptup);
// serialize identifier & broker
sink->write_value(ptr->identifier());
CPPA_REQUIRE(ptr->broker() != nullptr);
m_actor_utype->serialize(&ptr->broker(), sink);
}
inline const process_information& process() const {
......@@ -198,6 +323,7 @@ class local_group_module : public group::module {
typedef std::map<std::string, local_group_ptr> local_group_map;
process_information_ptr m_process;
const uniform_type_info* m_actor_utype;
util::shared_spinlock m_instances_mtx;
local_group_map m_instances;
util::shared_spinlock m_proxies_mtx;
......@@ -205,10 +331,15 @@ class local_group_module : public group::module {
};
local_group::local_group(local_group_module* mod,
local_group::local_group(bool spawn_local_broker,
local_group_module* mod,
std::string id,
process_information_ptr parent)
: group(mod, std::move(id)), m_process(std::move(parent)) { }
: group(mod, std::move(id)), m_process(std::move(parent)) {
if (spawn_local_broker) {
m_broker = spawn_hidden<local_broker>(this);
}
}
void local_group::serialize(serializer* sink) {
// this cast is safe, because the only available constructor accepts
......
......@@ -35,6 +35,17 @@ std::vector<string_pair> get_kv_pairs(int argc, char** argv, int begin = 1) {
return result;
}
struct reflector : public event_based_actor {
void init() {
become (
others() >> [=] {
reply_tuple(last_dequeued());
quit();
}
);
}
};
int client_part(const std::vector<string_pair>& args) {
CPPA_TEST(test__remote_actor_client_part);
auto i = std::find_if(args.begin(), args.end(),
......@@ -93,13 +104,38 @@ int client_part(const std::vector<string_pair>& args) {
}
);
}
// test group communication
auto grp = group::anonymous();
spawn_in_group<reflector>(grp);
spawn_in_group<reflector>(grp);
receive_response (sync_send(server, atom("Spawn5"), grp)) (
on(atom("ok")) >> [&] {
send(grp, "Hello reflectors!", 5.0);
},
after(std::chrono::seconds(10)) >> [&] {
CPPA_ERROR("unexpected timeout!");
}
);
// receive seven reply messages (2 local, 5 remote)
int x = 0;
receive_for(x, 7) (
on("Hello reflectors!", 5.0) >> [] { },
others() >> [&] {
CPPA_ERROR("unexpected message; "
<< __FILE__ << " line " << __LINE__ << ": "
<< to_string(self->last_dequeued()));
}
);
// wait for locally spawned reflectors
await_all_others_done();
send(server, atom("farewell"));
shutdown();
return CPPA_TEST_RESULT;
}
} // namespace <anonymous>
int main(int argc, char** argv) {
cout << "argv[0] = " << argv[0] << endl;
std::string app_path = argv[0];
bool run_remote_actor = true;
if (argc > 1) {
......@@ -169,13 +205,30 @@ cout << "argv[0] = " << argv[0] << endl;
}
);
// test 100 sync messages
cout << "test 100 synchronous messages" << endl;
int i = 0;
receive_for(i, 100) (
others() >> [] {
reply_tuple(self->last_dequeued());
}
);
cout << "test group communication via network" << endl;
// group test
receive (
on(atom("Spawn5"), arg_match) >> [](const group_ptr& grp) {
for (int i = 0; i < 5; ++i) {
spawn_in_group<reflector>(grp);
}
reply(atom("ok"));
}
);
await_all_others_done();
cout << "wait for a last goodbye" << endl;
receive (
on(atom("farewell")) >> [] { }
);
// wait until separate process (in sep. thread) finished execution
if (run_remote_actor) child.join();
shutdown();
return CPPA_TEST_RESULT;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment