Commit 993ad2c2 authored by Dominik Charousset's avatar Dominik Charousset

replaced post_office & mailman by a single middleman that handles all asynchronous IO

parent 808b96c7
......@@ -99,14 +99,12 @@ set(LIBCPPA_SRC
src/ipv4_acceptor.cpp
src/ipv4_io_stream.cpp
src/local_actor.cpp
src/mailman.cpp
src/middleman.cpp
src/network_manager.cpp
src/object.cpp
src/object_array.cpp
src/partial_function.cpp
src/pattern.cpp
src/post_office.cpp
src/primitive_variant.cpp
src/process_information.cpp
src/receive.cpp
......
......@@ -55,14 +55,12 @@ cppa/detail/get_behavior.hpp
cppa/detail/group_manager.hpp
cppa/detail/implicit_conversions.hpp
cppa/detail/list_member.hpp
cppa/detail/mailman.hpp
cppa/detail/map_member.hpp
cppa/detail/matches.hpp
cppa/detail/network_manager.hpp
cppa/detail/object_array.hpp
cppa/detail/object_impl.hpp
cppa/detail/pair_member.hpp
cppa/detail/post_office.hpp
cppa/detail/primitive_member.hpp
cppa/detail/projection.hpp
cppa/detail/pseudo_tuple.hpp
......@@ -199,13 +197,11 @@ src/fiber.cpp
src/group.cpp
src/group_manager.cpp
src/local_actor.cpp
src/mailman.cpp
src/network_manager.cpp
src/object.cpp
src/object_array.cpp
src/partial_function.cpp
src/pattern.cpp
src/post_office.cpp
src/primitive_variant.cpp
src/process_information.cpp
src/receive.cpp
......
......@@ -31,10 +31,79 @@
#ifndef MIDDLEMAN_HPP
#define MIDDLEMAN_HPP
class middleman
{
public:
middleman();
#include <memory>
#include "cppa/actor.hpp"
#include "cppa/process_information.hpp"
#include "cppa/util/acceptor.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/addressed_message.hpp"
#include "cppa/detail/singleton_manager.hpp"
namespace cppa { namespace detail {
enum class middleman_message_type {
add_peer,
publish,
unpublish,
outgoing_message,
shutdown
};
struct middleman_message {
middleman_message* next;
const middleman_message_type type;
union {
std::pair<util::io_stream_ptr_pair, process_information_ptr> new_peer;
std::pair<std::unique_ptr<util::acceptor>, actor_ptr> new_published_actor;
actor_ptr published_actor;
std::pair<process_information_ptr, addressed_message> out_msg;
};
middleman_message();
middleman_message(util::io_stream_ptr_pair, process_information_ptr);
middleman_message(std::unique_ptr<util::acceptor>, actor_ptr);
middleman_message(process_information_ptr, addressed_message);
middleman_message(actor_ptr);
~middleman_message();
template<typename... Args>
static inline std::unique_ptr<middleman_message> create(Args&&... args) {
return std::unique_ptr<middleman_message>(new middleman_message(std::forward<Args>(args)...));
}
};
typedef intrusive::single_reader_queue<middleman_message> middleman_queue;
void middleman_loop(int pipe_rd, middleman_queue& queue);
template<typename... Args>
inline void send2mm(Args&&... args) {
auto nm = singleton_manager::get_network_manager();
nm->send_to_middleman(middleman_message::create(std::forward<Args>(args)...));
}
inline void middleman_add_peer(util::io_stream_ptr_pair peer_streams,
process_information_ptr peer_ptr ) {
send2mm(std::move(peer_streams), std::move(peer_ptr));
}
inline void middleman_publish(std::unique_ptr<util::acceptor> server,
actor_ptr published_actor ) {
send2mm(std::move(server), std::move(published_actor));
}
inline void middleman_unpublish(actor_ptr whom) {
send2mm(std::move(whom));
}
inline void middleman_enqueue(process_information_ptr peer,
addressed_message outgoing_message) {
send2mm(std::move(peer), std::move(outgoing_message));
}
} } // namespace cppa::detail
#endif // MIDDLEMAN_HPP
......@@ -37,8 +37,7 @@
namespace cppa { namespace detail {
struct po_message;
struct mm_message;
struct middleman_message;
class network_manager {
......@@ -50,9 +49,7 @@ class network_manager {
virtual void stop() = 0;
virtual void send_to_post_office(std::unique_ptr<po_message> msg) = 0;
virtual void send_to_mailman(std::unique_ptr<mm_message> msg) = 0;
virtual void send_to_middleman(std::unique_ptr<middleman_message> msg) = 0;
static network_manager* create_singleton();
......
......@@ -29,14 +29,17 @@
#include "cppa/atom.hpp"
#include "cppa/to_string.hpp"
#include "cppa/any_tuple.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/middleman.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/singleton_manager.hpp"
#include <iostream>
namespace cppa {
actor_proxy::actor_proxy(std::uint32_t mid, const process_information_ptr& pptr)
......@@ -49,8 +52,7 @@ void actor_proxy::forward_message(const process_information_ptr& piptr,
any_tuple&& msg,
message_id_t id ) {
detail::addressed_message amsg{sender, this, std::move(msg), id};
detail::singleton_manager::get_network_manager()
->send_to_mailman(detail::mm_message::create(piptr, std::move(amsg)));
detail::middleman_enqueue(piptr, std::move(amsg));
}
void actor_proxy::enqueue(actor* sender, any_tuple msg) {
......
......@@ -28,90 +28,168 @@
\******************************************************************************/
#include <set>
#include <map>
#include <vector>
#include <cstring>
#include <sstream>
#include <iostream>
#include "cppa/on.hpp"
#include "cppa/actor.hpp"
#include "cppa/match.hpp"
#include "cppa/config.hpp"
#include "cppa/to_string.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/binary_serializer.hpp"
#include "cppa/uniform_type_info.hpp"
#include "cppa/binary_deserializer.hpp"
#include "cppa/process_information.hpp"
#include "cppa/util/buffer.hpp"
#include "cppa/util/acceptor.hpp"
#include "cppa/util/input_stream.hpp"
#include "cppa/util/output_stream.hpp"
#include "cppa/detail/middleman.hpp"
#include "cppa/detail/addressed_message.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/util/buffer.hpp"
#include "cppa/util/input_stream.hpp"
#include "cppa/util/output_stream.hpp"
using namespace std;
#define DEBUG(arg) \
{ \
std::ostringstream oss; \
#define DEBUG(arg) { \
ostringstream oss; \
oss << "[process id: " \
<< cppa::process_information::get()->process_id() \
<< "] " << arg << std::endl; \
std::cout << oss.str(); \
} (void) 0
<< "] " << arg << endl; \
cout << oss.str(); \
} (void) 0
#undef DEBUG
#define DEBUG(unused) ((void) 0)
namespace cppa { namespace detail {
namespace { const size_t ui32_size = sizeof(std::uint32_t); }
namespace {
const size_t ui32_size = sizeof(uint32_t);
template<typename T, typename... Args>
void call_ctor(T& var, Args&&... args) {
new (&var) T (forward<Args>(args)...);
}
template<typename T>
void call_dtor(T& var) {
var.~T();
}
template<class Container, class Element>
void erase_from(Container& haystack, const Element& needle) {
typedef typename Container::value_type value_type;
auto last = end(haystack);
auto i = find_if(begin(haystack), last, [&](const value_type& value) {
return value == needle;
});
if (i != last) haystack.erase(i);
}
template<class Container, class UnaryPredicate>
void erase_from_if(Container& container, const UnaryPredicate& predicate) {
auto last = end(container);
auto i = find_if(begin(container), last, predicate);
if (i != last) container.erase(i);
}
} // namespace <anonmyous>
middleman_message::middleman_message()
: next(0), type(middleman_message_type::shutdown) { }
middleman_message::middleman_message(util::io_stream_ptr_pair a0,
process_information_ptr a1)
: next(0), type(middleman_message_type::add_peer) {
call_ctor(new_peer, move(a0), move(a1));
}
middleman_message::middleman_message(unique_ptr<util::acceptor> a0,
actor_ptr a1)
: next(0), type(middleman_message_type::publish) {
call_ctor(new_published_actor, move(a0), move(a1));
}
middleman_message::middleman_message(actor_ptr a0)
: next(0), type(middleman_message_type::unpublish) {
call_ctor(published_actor, move(a0));
}
middleman_message::middleman_message(process_information_ptr a0,
addressed_message a1)
: next(0), type(middleman_message_type::outgoing_message) {
call_ctor(out_msg, move(a0), move(a1));
}
middleman_message::~middleman_message() {
switch (type) {
case middleman_message_type::add_peer: {
call_dtor(new_peer);
break;
}
case middleman_message_type::publish: {
call_dtor(new_published_actor);
break;
}
case middleman_message_type::unpublish: {
call_dtor(published_actor);
break;
}
case middleman_message_type::outgoing_message: {
call_dtor(out_msg);
break;
}
default: break;
}
}
class middleman;
class connection {
typedef intrusive::single_reader_queue<middleman_message> middleman_queue;
class channel : public ref_counted {
public:
connection(middleman* ptr, native_socket_type ifd, native_socket_type ofd)
: m_parent(ptr), m_has_unwritten_data(false)
, m_read_handle(ifd), m_write_handle(ofd) { }
channel(middleman* ptr, native_socket_type read_fd)
: m_parent(ptr), m_read_handle(read_fd) { }
virtual bool continue_reading() = 0;
virtual bool continue_writing() = 0;
virtual void write(const addressed_message& msg);
inline native_socket_type read_handle() const {
return m_read_handle;
}
inline native_socket_type write_handle() const {
return m_write_handle;
}
virtual bool is_acceptor_of(const actor_ptr&) const {
return false;
}
inline bool has_unwritten_data() const {
return m_has_unwritten_data;
}
protected:
inline middleman* parent() { return m_parent; }
inline const middleman* parent() const { return m_parent; }
inline void has_unwritten_data(bool value) {
m_has_unwritten_data = value;
}
private:
middleman* m_parent;
bool m_has_unwritten_data;
native_socket_type m_read_handle;
native_socket_type m_write_handle;
};
class peer_connection : public connection {
typedef intrusive_ptr<channel> channel_ptr;
typedef vector<channel_ptr> channel_ptr_vector;
class peer_connection : public channel {
typedef connection super;
typedef channel super;
public:
......@@ -119,10 +197,12 @@ class peer_connection : public connection {
util::input_stream_ptr istream,
util::output_stream_ptr ostream,
process_information_ptr peer_ptr = nullptr)
: super(parent, istream->read_file_handle(), ostream->write_file_handle())
: super(parent, istream->read_file_handle())
, m_istream(istream), m_ostream(ostream), m_peer(peer_ptr)
, m_rd_state((peer_ptr) ? wait_for_msg_size : wait_for_process_info)
, m_meta_msg(uniform_typeid<addressed_message>()) {
, m_meta_msg(uniform_typeid<addressed_message>())
, m_has_unwritten_data(false)
, m_write_handle(ostream->write_file_handle()) {
m_rd_buf.reset(m_rd_state == wait_for_process_info
? ui32_size + process_information::node_id_size
: ui32_size);
......@@ -131,12 +211,12 @@ class peer_connection : public connection {
~peer_connection() {
if (m_peer) {
// collect all children (proxies to actors of m_peer)
std::vector<actor_proxy_ptr> children;
vector<actor_proxy_ptr> children;
children.reserve(20);
get_actor_proxy_cache().erase_all(m_peer->node_id(),
m_peer->process_id(),
[&](actor_proxy_ptr& pptr) {
children.push_back(std::move(pptr));
children.push_back(move(pptr));
});
// kill all proxies
for (actor_proxy_ptr& pptr: children) {
......@@ -147,15 +227,53 @@ class peer_connection : public connection {
}
}
bool continue_reading() {
for (;;) {
try { m_rd_buf.append_from(m_istream.get()); }
catch (std::exception& e) {
DEBUG(e.what());
return false;
inline native_socket_type write_handle() const {
return m_write_handle;
}
if (!m_rd_buf.full()) return true; // try again later
bool continue_reading();
bool continue_writing() {
DEBUG("peer_connection::continue_writing");
if (has_unwritten_data()) {
size_t written;
written = m_ostream->write_some(m_wr_buf.data(),
m_wr_buf.size());
if (written != m_wr_buf.size()) {
m_wr_buf.erase_leading(written);
}
else {
m_wr_buf.reset();
has_unwritten_data(false);
}
}
return true;
}
void write(const addressed_message& msg) {
binary_serializer bs(&m_wr_buf);
bs << msg;
if (!has_unwritten_data()) {
size_t written = m_ostream->write_some(m_wr_buf.data(),
m_wr_buf.size());
if (written != m_wr_buf.size()) {
m_wr_buf.erase_leading(written);
has_unwritten_data(true);
}
else {
m_wr_buf.reset();
}
}
}
inline bool has_unwritten_data() const {
return m_has_unwritten_data;
}
protected:
inline void has_unwritten_data(bool value) {
m_has_unwritten_data = value;
}
private:
......@@ -174,10 +292,445 @@ class peer_connection : public connection {
process_information_ptr m_peer;
read_state m_rd_state;
const uniform_type_info* m_meta_msg;
bool m_has_unwritten_data;
native_socket_type m_write_handle;
util::buffer m_rd_buf;
util::buffer m_wr_buf;
};
typedef intrusive_ptr<peer_connection> peer_connection_ptr;
typedef map<process_information, peer_connection_ptr> peer_map;
class middleman {
public:
middleman() : m_done(false), m_pself(process_information::get()) { }
template<class Connection, typename... Args>
inline void add_channel(Args&&... args) {
m_new_channels.emplace_back(new Connection(this, forward<Args>(args)...));
}
inline void add_channel_ptr(channel_ptr ptr) {
m_new_channels.push_back(std::move(ptr));
}
inline void add_peer(const process_information& pinf, peer_connection_ptr cptr) {
auto& ptrref = m_peers[pinf];
if (ptrref) {
DEBUG("peer already defined!");
}
else {
ptrref = cptr;
}
}
void operator()(int pipe_fd, middleman_queue& queue);
inline const process_information_ptr& pself() {
return m_pself;
}
inline void quit() {
m_done = true;
}
peer_connection_ptr peer(const process_information& pinf) {
auto i = m_peers.find(pinf);
if (i != m_peers.end()) {
CPPA_REQUIRE(i->second != nullptr);
return i->second;
}
return nullptr;
}
channel_ptr acceptor_of(const actor_ptr& whom) {
auto last = m_channels.end();
auto i = find_if(m_channels.begin(), last, [=](channel_ptr& ptr) {
return ptr->is_acceptor_of(whom);
});
return (i != last) ? *i : nullptr;
}
void continue_writing(peer_connection_ptr ptr) {
m_peers_with_unwritten_data.insert(move(ptr));
}
void erase(channel_ptr ptr) {
m_erased_channels.insert(move(ptr));
}
private:
bool m_done;
process_information_ptr m_pself;
peer_map m_peers;
channel_ptr_vector m_channels;
channel_ptr_vector m_new_channels;
set<peer_connection_ptr> m_peers_with_unwritten_data;
set<channel_ptr> m_erased_channels;
};
bool peer_connection::continue_reading() {
DEBUG("peer_connection::continue_reading");
for (;;) {
m_rd_buf.append_from(m_istream.get());
if (!m_rd_buf.full()) return true; // try again later
switch (m_rd_state) {
case wait_for_process_info: {
DEBUG("peer_connection::continue_reading: "
"wait_for_process_info");
uint32_t process_id;
process_information::node_id_type node_id;
memcpy(&process_id, m_rd_buf.data(), sizeof(uint32_t));
memcpy(node_id.data(), m_rd_buf.data() + sizeof(uint32_t),
process_information::node_id_size);
m_peer.reset(new process_information(process_id, node_id));
parent()->add_peer(*m_peer, this);
// initialization done
m_rd_state = wait_for_msg_size;
m_rd_buf.reset(sizeof(uint32_t));
DEBUG("pinfo read: "
<< m_peer->process_id()
<< "@"
<< to_string(m_peer->node_id()));
break;
}
case wait_for_msg_size: {
DEBUG("peer_connection::continue_reading: wait_for_msg_size");
uint32_t msg_size;
memcpy(&msg_size, m_rd_buf.data(), sizeof(uint32_t));
DEBUG("msg_size: " << msg_size);
m_rd_buf.reset(msg_size);
m_rd_state = read_message;
break;
}
case read_message: {
DEBUG("peer_connection::continue_reading: read_message");
addressed_message msg;
binary_deserializer bd(m_rd_buf.data(), m_rd_buf.size());
m_meta_msg->deserialize(&msg, &bd);
auto& content = msg.content();
DEBUG("<-- " << to_string(msg));
match(content) (
on(atom("MONITOR")) >> [&]() {
auto receiver = msg.receiver().downcast<actor>();
CPPA_REQUIRE(receiver.get() != nullptr);
if (!receiver) {
DEBUG("empty receiver");
}
else if (receiver->parent_process() == *process_information::get()) {
auto mpeer = m_peer;
// this message was send from a proxy
receiver->attach_functor([mpeer, receiver](uint32_t reason) {
addressed_message kmsg{receiver, receiver, make_any_tuple(atom("KILL_PROXY"), reason)};
middleman_enqueue(mpeer, kmsg);
});
}
else {
DEBUG("MONITOR received for a remote actor");
}
},
on(atom("LINK"), arg_match) >> [&](actor_ptr ptr) {
if (msg.sender()->is_proxy() == false) {
DEBUG("msg.sender() is not a proxy");
return;
}
auto whom = msg.sender().downcast<actor_proxy>();
if ((whom) && (ptr)) whom->local_link_to(ptr);
},
on(atom("UNLINK"), arg_match) >> [](actor_ptr ptr) {
if (ptr->is_proxy() == false) {
DEBUG("msg.sender() is not a proxy");
return;
}
auto whom = ptr.downcast<actor_proxy>();
if ((whom) && (ptr)) whom->local_unlink_from(ptr);
},
others() >> [&]() {
auto receiver = msg.receiver().get();
if (receiver) {
if (msg.id().valid()) {
auto ra = dynamic_cast<actor*>(receiver);
DEBUG("sync message for actor "
<< ra->id());
if (ra) {
ra->sync_enqueue(
msg.sender().get(),
msg.id(),
move(msg.content()));
}
else{
DEBUG("ERROR: sync message to a non-actor");
}
}
else {
DEBUG("async message (sender is "
<< (msg.sender() ? "valid" : "NULL")
<< ")");
receiver->enqueue(
msg.sender().get(),
move(msg.content()));
}
}
else {
DEBUG("empty receiver");
}
}
);
m_rd_buf.reset(sizeof(uint32_t));
m_rd_state = wait_for_msg_size;
break;
}
default: {
CPPA_CRITICAL("illegal state");
}
}
// try to read more (next iteration)
}
}
class peer_acceptor : public channel {
typedef channel super;
public:
peer_acceptor(middleman* parent,
actor_id aid,
unique_ptr<util::acceptor> acceptor)
: super(parent, acceptor->acceptor_file_handle())
, m_actor_id(aid)
, m_acceptor(move(acceptor)) { }
bool is_doorman_of(actor_id aid) const {
return m_actor_id == aid;
}
bool continue_reading() {
DEBUG("peer_acceptor::continue_reading");
// accept as many connections as possible
for (;;) {
auto opt = m_acceptor->try_accept_connection();
if (opt) {
auto& pair = *opt;
auto& pself = parent()->pself();
uint32_t process_id = pself->process_id();
pair.second->write(&m_actor_id, sizeof(actor_id));
pair.second->write(&process_id, sizeof(uint32_t));
pair.second->write(pself->node_id().data(),
pself->node_id().size());
parent()->add_channel<peer_connection>(pair.first,
pair.second);
}
else {
return true;
}
}
}
private:
actor_id m_actor_id;
unique_ptr<util::acceptor> m_acceptor;
};
class middleman_overseer : public channel {
typedef channel super;
public:
middleman_overseer(middleman* parent, int pipe_fd, middleman_queue& q)
: super(parent, pipe_fd), m_queue(q) { }
bool continue_reading() {
DEBUG("middleman_overseer::continue_reading");
uint32_t dummy;
if (::read(read_handle(), &dummy, sizeof(dummy)) != sizeof(dummy)) {
CPPA_CRITICAL("cannot read from pipe");
}
atomic_thread_fence(memory_order_seq_cst);
unique_ptr<middleman_message> msg(m_queue.try_pop());
if (!msg) { CPPA_CRITICAL("nullptr dequeued"); }
switch (msg->type) {
case middleman_message_type::add_peer: {
DEBUG("middleman_overseer: add_peer: "
<< to_string(*(msg->new_peer.second)));
auto& new_peer = msg->new_peer;
auto& io_ptrs = new_peer.first;
peer_connection_ptr peer;
peer.reset(new peer_connection(parent(),
io_ptrs.first,
io_ptrs.second,
new_peer.second));
parent()->add_channel_ptr(peer);
parent()->add_peer(*(new_peer.second), peer);
break;
}
case middleman_message_type::publish: {
DEBUG("middleman_overseer: publish");
auto& ptrs = msg->new_published_actor;
parent()->add_channel<peer_acceptor>(ptrs.second->id(),
move(ptrs.first));
break;
}
case middleman_message_type::unpublish: {
if (msg->published_actor) {
DEBUG("middleman_overseer: unpublish actor id "
<< msg->published_actor->id());
auto channel = parent()->acceptor_of(msg->published_actor);
if (channel) {
parent()->erase(channel);
}
}
break;
}
case middleman_message_type::outgoing_message: {
DEBUG("middleman_overseer: outgoing_message");
auto& target_peer = msg->out_msg.first;
auto& out_msg = msg->out_msg.second;
CPPA_REQUIRE(target_peer != nullptr);
auto peer = parent()->peer(*target_peer);
if (!peer) {
DEBUG("message to an unknown peer: " << to_string(out_msg));
break;
}
DEBUG("--> " << to_string(out_msg));
auto had_unwritten_data = peer->has_unwritten_data();
try {
peer->write(out_msg);
if (!had_unwritten_data && peer->has_unwritten_data()) {
parent()->continue_writing(peer);
}
}
catch (exception& e) {
DEBUG("peer disconnected: " << e.what());
parent()->erase(peer);
}
break;
}
case middleman_message_type::shutdown: {
DEBUG("middleman: shutdown");
parent()->quit();
break;
}
}
return true;
}
private:
middleman_queue& m_queue;
};
void middleman::operator()(int pipe_fd, middleman_queue& queue) {
DEBUG("pself: " << to_string(*m_pself));
int maxfd = 0;
fd_set rdset;
fd_set wrset;
fd_set* wrset_ptr = nullptr;
m_channels.emplace_back(new middleman_overseer(this, pipe_fd, queue));
do {
FD_ZERO(&rdset);
maxfd = 0;
CPPA_REQUIRE(m_channels.size() > 0);
for (auto& channel : m_channels) {
auto fd = channel->read_handle();
maxfd = max(maxfd, fd);
FD_SET(fd, &rdset);
}
if (m_peers_with_unwritten_data.empty()) {
if (wrset_ptr) wrset_ptr = nullptr;
}
else {
for (auto& peer : m_peers_with_unwritten_data) {
auto fd = peer->write_handle();
maxfd = max(maxfd, fd);
FD_SET(fd, &wrset);
}
wrset_ptr = &wrset;
}
CPPA_REQUIRE(maxfd > 0);
DEBUG("select()");
int sresult;
do {
sresult = select(maxfd + 1, &rdset, wrset_ptr, nullptr, nullptr);
if (sresult < 0) {
CPPA_CRITICAL("select() failed");
}
}
while (sresult == 0);
DEBUG("continue reading ...");
{ // iterate over all channels and remove channels as needed
for (auto& channel : m_channels) {
if (FD_ISSET(channel->read_handle(), &rdset)) {
bool erase_channel = false;
try { erase_channel = !channel->continue_reading(); }
catch (std::exception& e) {
DEBUG(demangle(typeid(e).name()) << ": " << e.what());
erase_channel = true;
}
if (erase_channel) {
DEBUG("erase worker");
m_erased_channels.insert(channel);
}
}
}
}
if (wrset_ptr) { // iterate over peers with unwritten data
DEBUG("continue writing ...");
for (auto& peer : m_peers_with_unwritten_data) {
if (FD_ISSET(peer->write_handle(), &wrset)) {
bool erase_channel = false;
try { erase_channel = !peer->continue_writing(); }
catch (std::exception& e) {
DEBUG(demangle(typeid(e).name()) << ": " << e.what());
erase_channel = true;
}
if (erase_channel) {
DEBUG("erase worker");
m_erased_channels.insert(peer);
}
}
}
}
// insert new handlers
if (m_new_channels.empty() == false) {
DEBUG("insert new channel(s)");
move(m_new_channels.begin(), m_new_channels.end(),
back_inserter(m_channels));
m_new_channels.clear();
}
if (!m_erased_channels.empty()) {
DEBUG("erase channel(s)");
// erase all marked channels
for (channel_ptr channel : m_erased_channels) {
erase_from(m_channels, channel);
erase_from(m_peers_with_unwritten_data, channel);
erase_from_if(m_peers, [=](const peer_map::value_type& kvp) {
return kvp.second == channel;
});
}
m_erased_channels.clear();
}
}
while (m_done == false);
DEBUG("middleman done");
}
void middleman_loop(int pipe_fd, middleman_queue& queue) {
DEBUG("run middleman loop");
middleman mm;
mm(pipe_fd, queue);
DEBUG("middleman loop done");
}
} } // namespace cppa::detail
......@@ -44,8 +44,7 @@
#include "cppa/intrusive/single_reader_queue.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/middleman.hpp"
#include "cppa/detail/network_manager.hpp"
namespace {
......@@ -55,11 +54,8 @@ using namespace cppa::detail;
struct network_manager_impl : network_manager {
intrusive::single_reader_queue<mm_message> m_mailman_queue;
std::thread m_mailman_thread;
intrusive::single_reader_queue<po_message> m_post_office_queue;
std::thread m_post_office_thread;
middleman_queue m_middleman_queue;
std::thread m_middleman_thread;
int pipe_fd[2];
......@@ -70,47 +66,35 @@ struct network_manager_impl : network_manager {
// store pipe read handle in local variables for lambda expression
int pipe_fd0 = pipe_fd[0];
// start threads
m_post_office_thread = std::thread([this, pipe_fd0] {
post_office_loop(pipe_fd0, this->m_post_office_queue);
});
m_mailman_thread = std::thread([this] {
mailman_loop(this->m_mailman_queue);
m_middleman_thread = std::thread([this, pipe_fd0] {
middleman_loop(pipe_fd0, this->m_middleman_queue);
});
}
void stop() { // override
//m_mailman->enqueue(nullptr, make_any_tuple(atom("DONE")));
m_mailman_thread.join();
// wait until mailman is done; post_office closes all sockets
std::atomic_thread_fence(std::memory_order_seq_cst);
m_post_office_queue.push_back(new po_message);
//send_to_post_office(po_message{atom("DONE"), -1, 0});
m_post_office_thread.join();
send_to_middleman(middleman_message::create());
m_middleman_thread.join();
close(pipe_fd[0]);
close(pipe_fd[1]);
}
void send_to_post_office(std::unique_ptr<po_message> msg) {
m_post_office_queue.push_back(msg.release());
void send_to_middleman(std::unique_ptr<middleman_message> msg) {
m_middleman_queue._push_back(msg.release());
std::atomic_thread_fence(std::memory_order_seq_cst);
std::uint32_t dummy = 0;
if (write(pipe_fd[1], &dummy, sizeof(dummy)) != sizeof(dummy)) {
CPPA_CRITICAL("cannot write to pipe");
}
}
void send_to_mailman(std::unique_ptr<mm_message> msg) {
m_mailman_queue.push_back(msg.release());
//m_mailman->enqueue(nullptr, std::move(msg));
}
};
} // namespace <anonymous>
namespace cppa { namespace detail {
network_manager::~network_manager() {
}
network_manager::~network_manager() { }
network_manager* network_manager::create_singleton() {
return new network_manager_impl;
......
......@@ -50,8 +50,7 @@
#include "cppa/intrusive/single_reader_queue.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/middleman.hpp"
#include "cppa/detail/ipv4_acceptor.hpp"
#include "cppa/detail/ipv4_io_stream.hpp"
#include "cppa/detail/actor_registry.hpp"
......@@ -68,7 +67,7 @@ namespace cppa {
void publish(actor_ptr whom, std::unique_ptr<util::acceptor> acceptor) {
if (!whom && !acceptor) return;
detail::singleton_manager::get_actor_registry()->put(whom->id(), whom);
detail::post_office_publish(std::move(acceptor), whom);
detail::middleman_publish(std::move(acceptor), whom);
}
actor_ptr remote_actor(util::io_stream_ptr_pair peer) {
......@@ -85,8 +84,7 @@ actor_ptr remote_actor(util::io_stream_ptr_pair peer) {
peer.first->read(peer_node_id.data(), peer_node_id.size());
process_information_ptr pinfptr(new process_information(peer_pid, peer_node_id));
//auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
detail::mailman_add_peer(peer, pinfptr);
detail::post_office_add_peer(peer, pinfptr);
detail::middleman_add_peer(peer, pinfptr);
return detail::get_actor_proxy_cache().get(remote_actor_id,
pinfptr->process_id(),
pinfptr->node_id());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment