Commit 783cd02d authored by neverlord's avatar neverlord

select()

parent 69ef1b78
......@@ -39,7 +39,6 @@ libcppa_la_SOURCES = \
src/partial_function.cpp \
src/pattern.cpp \
src/post_office.cpp \
src/post_office_msg.cpp \
src/primitive_variant.cpp \
src/process_information.cpp \
src/receive.cpp \
......@@ -102,6 +101,7 @@ nobase_library_include_HEADERS = \
cppa/detail/demangle.hpp \
cppa/detail/disablable_delete.hpp \
cppa/detail/empty_tuple.hpp \
cppa/detail/filter_result.hpp \
cppa/detail/get_behavior.hpp \
cppa/detail/group_manager.hpp \
cppa/detail/implicit_conversions.hpp \
......@@ -112,12 +112,12 @@ nobase_library_include_HEADERS = \
cppa/detail/matches.hpp \
cppa/detail/mock_scheduler.hpp \
cppa/detail/native_socket.hpp \
cppa/detail/nestable_receive_actor.hpp \
cppa/detail/network_manager.hpp \
cppa/detail/object_array.hpp \
cppa/detail/object_impl.hpp \
cppa/detail/pair_member.hpp \
cppa/detail/post_office.hpp \
cppa/detail/post_office_msg.hpp \
cppa/detail/primitive_member.hpp \
cppa/detail/projection.hpp \
cppa/detail/pseudo_tuple.hpp \
......
......@@ -169,8 +169,6 @@ src/actor_registry.cpp
cppa/detail/uniform_type_info_map.hpp
cppa/detail/network_manager.hpp
src/network_manager.cpp
cppa/detail/post_office_msg.hpp
src/post_office_msg.cpp
cppa/detail/group_manager.hpp
src/group_manager.cpp
cppa/detail/empty_tuple.hpp
......
......@@ -31,8 +31,7 @@
#ifndef NETWORK_MANAGER_HPP
#define NETWORK_MANAGER_HPP
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/post_office.hpp"
namespace cppa { namespace detail {
......@@ -47,6 +46,8 @@ class network_manager
virtual void stop() = 0;
virtual void send_to_post_office(po_message const& msg) = 0;
virtual void send_to_post_office(any_tuple msg) = 0;
virtual void send_to_mailman(any_tuple msg) = 0;
......
......@@ -33,12 +33,20 @@
#include <memory>
#include "cppa/atom.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/detail/native_socket.hpp"
namespace cppa { namespace detail {
void post_office_loop();
struct po_message
{
atom_value flag;
native_socket_type fd;
actor_id aid;
};
void post_office_loop(int input_fd);
void post_office_add_peer(native_socket_type peer_socket,
process_information_ptr const& peer_ptr);
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef POST_OFFICE_MSG_HPP
#define POST_OFFICE_MSG_HPP
#include "cppa/attachable.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/intrusive_ptr.hpp"
#include "cppa/process_information.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { namespace detail {
class post_office_msg
{
public:
enum msg_type
{
invalid_type,
add_peer_type,
add_server_socket_type,
proxy_exited_type
};
struct add_peer
{
native_socket_type sockfd;
process_information_ptr peer;
actor_proxy_ptr first_peer_actor;
std::unique_ptr<attachable> attachable_ptr;
add_peer(native_socket_type peer_socket,
process_information_ptr const& peer_ptr,
actor_proxy_ptr const& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer);
};
struct add_server_socket
{
native_socket_type server_sockfd;
actor_ptr published_actor;
add_server_socket(native_socket_type ssockfd, actor_ptr const& whom);
};
struct proxy_exited
{
actor_proxy_ptr proxy_ptr;
inline proxy_exited(actor_proxy_ptr const& who) : proxy_ptr(who) { }
};
inline post_office_msg() : next(nullptr), m_type(invalid_type) { }
post_office_msg(native_socket_type arg0,
process_information_ptr const& arg1,
actor_proxy_ptr const& arg2,
std::unique_ptr<attachable>&& arg3);
post_office_msg(native_socket_type arg0, actor_ptr const& arg1);
post_office_msg(actor_proxy_ptr const& proxy_ptr);
inline bool is_add_peer_msg() const
{
return m_type == add_peer_type;
}
inline bool is_add_server_socket_msg() const
{
return m_type == add_server_socket_type;
}
inline bool is_proxy_exited_msg() const
{
return m_type == proxy_exited_type;
}
inline add_peer& as_add_peer_msg()
{
return m_add_peer_msg;
}
inline add_server_socket& as_add_server_socket_msg()
{
return m_add_server_socket;
}
inline proxy_exited& as_proxy_exited_msg()
{
return m_proxy_exited;
}
~post_office_msg();
post_office_msg* next;
private:
msg_type m_type;
union
{
add_peer m_add_peer_msg;
add_server_socket m_add_server_socket;
proxy_exited m_proxy_exited;
};
};
constexpr std::uint32_t rd_queue_event = 0x00;
constexpr std::uint32_t unpublish_actor_event = 0x01;
constexpr std::uint32_t close_socket_event = 0x02;
constexpr std::uint32_t shutdown_event = 0x03;
typedef std::uint32_t pipe_msg[2];
constexpr size_t pipe_msg_size = 2 * sizeof(std::uint32_t);
} } // namespace cppa::detail
#endif // POST_OFFICE_MSG_HPP
......@@ -86,14 +86,11 @@ actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key)
}
m_entries.insert(std::make_pair(key, result));
}
auto msg = make_any_tuple(atom("ADD_PROXY"), result);
singleton_manager::get_network_manager()->send_to_post_office(std::move(msg));
result->enqueue(nullptr, make_any_tuple(atom("MONITOR")));
result->attach_functor([result](std::uint32_t)
{
auto msg = make_any_tuple(atom("RM_PROXY"), result);
singleton_manager::get_network_manager()->send_to_post_office(std::move(msg));
get_actor_proxy_cache().erase(result);
});
result->enqueue(nullptr, make_any_tuple(atom("MONITOR")));
return result;
}
......
......@@ -41,7 +41,6 @@
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/converted_thread_context.hpp"
......@@ -59,10 +58,17 @@ struct network_manager_impl : network_manager
local_actor_ptr m_post_office;
thread m_post_office_thread;
int pipe_fd[2];
void start() // override
{
if (pipe(pipe_fd) != 0)
{
CPPA_CRITICAL("cannot create pipe");
}
m_post_office.reset(new converted_thread_context);
m_post_office_thread = mock_scheduler::spawn_hidden_impl(post_office_loop, m_post_office);
m_post_office_thread = mock_scheduler::spawn_hidden_impl(std::bind(post_office_loop, pipe_fd[0]), m_post_office);
m_mailman.reset(new converted_thread_context);
m_mailman_thread = mock_scheduler::spawn_hidden_impl(mailman_loop, m_mailman);
......@@ -74,6 +80,16 @@ struct network_manager_impl : network_manager
m_mailman->enqueue(nullptr, make_any_tuple(atom("DONE")));
m_post_office_thread.join();
m_mailman_thread.join();
close(pipe_fd[0]);
close(pipe_fd[0]);
}
void send_to_post_office(po_message const& msg)
{
if (write(pipe_fd[1], &msg, sizeof(po_message)) != sizeof(po_message))
{
CPPA_CRITICAL("cannot write to pipe");
}
}
void send_to_post_office(any_tuple msg)
......
......@@ -63,7 +63,6 @@
#include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/singleton_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/addressed_message.hpp"
......@@ -73,8 +72,8 @@
<< cppa::process_information::get()->process_id() \
<< "] " << arg << std::endl
//#undef DEBUG
//#define DEBUG(unused) ((void) 0)
#undef DEBUG
#define DEBUG(unused) ((void) 0)
using std::cout;
using std::cerr;
......@@ -100,11 +99,22 @@ static_assert(sizeof(cppa::detail::native_socket_type) == sizeof(std::uint32_t),
namespace cppa { namespace detail {
inline void send2po_(network_manager*) { }
template<typename Arg0, typename... Args>
inline void send2po_(network_manager* nm, Arg0&& arg0, Args&&... args)
{
nm->send_to_post_office(make_any_tuple(std::forward<Arg0>(arg0),
std::forward<Args>(args)...));
}
template<typename... Args>
inline void send2po(Args&&... args)
inline void send2po(po_message const& msg, Args&&... args)
{
singleton_manager::get_network_manager()
->send_to_post_office(make_any_tuple(std::forward<Args>(args)...));
auto nm = singleton_manager::get_network_manager();
nm->send_to_post_office(msg);
send2po_(nm, std::forward<Args>(args)...);
}
template<class Fun>
......@@ -118,8 +128,36 @@ struct scope_guard
template<class Fun>
scope_guard<Fun> make_scope_guard(Fun fun) { return {std::move(fun)}; }
class po_socket_handler
{
public:
po_socket_handler(native_socket_type fd) : m_socket(fd) { }
virtual ~po_socket_handler() { }
// returns bool if either done or an error occured
virtual bool read_and_continue() = 0;
native_socket_type get_socket() const
{
return m_socket;
}
virtual bool is_doorman_of(actor_id) const { return false; }
protected:
native_socket_type m_socket;
};
typedef std::unique_ptr<po_socket_handler> po_socket_handler_ptr;
typedef std::vector<po_socket_handler_ptr> po_socket_handler_vector;
// represents a TCP connection to another peer
class po_peer
class po_peer : public po_socket_handler
{
enum state
......@@ -133,61 +171,49 @@ class po_peer
};
state m_state;
// TCP socket to remote peer
native_socket_type m_socket;
// caches process_information::get()
process_information_ptr m_pself;
// the process information or our remote peer
// the process information of our remote peer
process_information_ptr m_peer;
thread m_thread;
// caches uniform_typeid<addressed_message>()
uniform_type_info const* m_meta_msg;
// manages socket input
buffer<512, (16 * 1024 * 1024)> m_buf;
public:
po_peer(native_socket_type fd, process_information_ptr peer)
: m_state((peer) ? wait_for_msg_size : wait_for_process_info)
, m_socket(fd)
po_peer(native_socket_type fd, process_information_ptr peer = nullptr)
: po_socket_handler(fd)
, m_state((peer) ? wait_for_msg_size : wait_for_process_info)
, m_pself(process_information::get())
, m_peer(std::move(peer))
, m_meta_msg(uniform_typeid<addressed_message>())
{
m_buf.reset(m_state == wait_for_process_info
? sizeof(std::uint32_t) + process_information::node_id_size
: sizeof(std::uint32_t));
}
~po_peer()
{
closesocket(m_socket);
m_thread.join();
}
inline native_socket_type get_socket() const { return m_socket; }
void start()
{
m_thread = thread{std::bind(&po_peer::operator(), this)};
}
// @returns false if an error occured; otherwise true
void operator()()
bool read_and_continue()
{
auto nm = singleton_manager::get_network_manager();
auto meta_msg = uniform_typeid<addressed_message>();
auto guard = make_scope_guard([&]()
{
DEBUG("po_peer loop done");
send2po(atom("RM_PEER"), m_socket);
});
DEBUG("po_peer loop started");
buffer<512, (16 * 1024 * 1024)> m_buf;
m_buf.reset(m_state == wait_for_process_info ? sizeof(std::uint32_t) + process_information::node_id_size
: sizeof(std::uint32_t));
for (;;)
{
while(m_buf.ready() == false)
if (m_buf.append_from(m_socket) == false)
{
if (m_buf.append_from(m_socket, 0) == false)
{
DEBUG("cannot read from socket");
return;
}
DEBUG("cannot read from socket");
return false;
}
if (m_buf.ready() == false)
{
return true; // try again later
}
switch (m_state)
{
......@@ -200,13 +226,14 @@ class po_peer
process_information::node_id_size);
m_peer.reset(new process_information(process_id, node_id));
// inform mailman about new peer
nm->send_to_mailman(make_any_tuple(m_socket, m_peer));
singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(m_socket, m_peer));
m_state = wait_for_msg_size;
m_buf.reset(sizeof(std::uint32_t));
DEBUG("pinfo read: "
<< m_peer->process_id()
<< "@"
<< to_string(m_peer->node_id()));
m_buf.reset(sizeof(std::uint32_t));
break;
}
case wait_for_msg_size:
......@@ -223,13 +250,13 @@ class po_peer
binary_deserializer bd(m_buf.data(), m_buf.size());
try
{
meta_msg->deserialize(&msg, &bd);
m_meta_msg->deserialize(&msg, &bd);
}
catch (std::exception& e)
{
// unable to deserialize message (format error)
DEBUG(to_uniform_name(typeid(e)) << ": " << e.what());
return;
return false;
}
auto& content = msg.content();
DEBUG("<-- " << to_string(msg));
......@@ -249,7 +276,8 @@ class po_peer
receiver->attach_functor([=](std::uint32_t reason)
{
addressed_message kmsg{receiver, receiver, make_any_tuple(atom("KILL_PROXY"), reason)};
nm->send_to_mailman(make_any_tuple(m_peer, kmsg));
singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(m_peer, kmsg));
});
}
else
......@@ -299,256 +327,214 @@ class po_peer
CPPA_CRITICAL("illegal state");
}
}
// try to read more (next iteration)
}
}
};
// accepts new connections to a published actor
class po_doorman
class po_doorman : public po_socket_handler
{
actor_ptr published_actor;
actor_id m_actor_id;
// caches process_information::get()
process_information_ptr m_pself;
int m_pipe_rd;
int m_pipe_wr;
thread m_thread;
struct socket_aid_pair
{
native_socket_type fd;
actor_id aid;
};
po_socket_handler_vector* new_handler;
public:
po_doorman() : m_pself(process_information::get())
po_doorman(actor_id aid, native_socket_type fd, po_socket_handler_vector* v)
: po_socket_handler(fd)
, m_actor_id(aid), m_pself(process_information::get())
, new_handler(v)
{
int mpipe[2];
if (pipe(mpipe) < 0)
{
CPPA_CRITICAL("cannot open pipe");
}
m_pipe_rd = mpipe[0];
m_pipe_wr = mpipe[1];
}
~po_doorman()
bool is_doorman_of(actor_id aid) const
{
DEBUG(__PRETTY_FUNCTION__);
socket_aid_pair msg{-1, 0};
if (write(m_pipe_wr, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
{
CPPA_CRITICAL("cannot write to pipe");
}
m_thread.join();
close(m_pipe_rd);
close(m_pipe_wr);
return m_actor_id == aid;
}
void start()
~po_doorman()
{
m_thread = thread{std::bind(&po_doorman::operator(), this)};
closesocket(m_socket);
}
void add(native_socket_type fd, actor_id aid)
bool read_and_continue()
{
DEBUG("add, aid = " << aid);
CPPA_REQUIRE(fd != -1);
CPPA_REQUIRE(aid != 0);
socket_aid_pair msg{fd, aid};
if (write(m_pipe_wr, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
for (;;)
{
CPPA_CRITICAL("cannot write to pipe");
sockaddr addr;
socklen_t addrlen;
memset(&addr, 0, sizeof(addr));
memset(&addrlen, 0, sizeof(addrlen));
auto sfd = ::accept(m_socket, &addr, &addrlen);
if (sfd < 0)
{
switch (errno)
{
case EAGAIN:
# if EAGAIN != EWOULDBLOCK
case EWOULDBLOCK:
# endif
// just try again
return true;
default:
perror("accept()");
DEBUG("accept failed (actor unpublished?)");
return false;
}
}
int flags = 1;
setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
flags = fcntl(sfd, F_GETFL, 0);
if (flags == -1)
{
throw network_error("unable to get socket flags");
}
if (fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0)
{
throw network_error("unable to set socket to nonblock");
}
auto id = m_actor_id;
std::uint32_t process_id = m_pself->process_id();
::send(sfd, &id, sizeof(std::uint32_t), 0);
::send(sfd, &process_id, sizeof(std::uint32_t), 0);
::send(sfd, m_pself->node_id().data(), m_pself->node_id().size(), 0);
new_handler->emplace_back(new po_peer(sfd));
DEBUG("socket accepted; published actor: " << id);
}
}
void rm(actor_id aid)
};
inline constexpr std::uint64_t valof(atom_value val)
{
return static_cast<std::uint64_t>(val);
}
void post_office_loop(int input_fd)
{
int maxfd = 0;
fd_set readset;
bool done = false;
po_socket_handler_vector handler;
po_socket_handler_vector new_handler;
do
{
CPPA_REQUIRE(aid != 0);
socket_aid_pair msg{-1, aid};
if (write(m_pipe_wr, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
FD_ZERO(&readset);
FD_SET(input_fd, &readset);
maxfd = input_fd;
for (auto& hptr : handler)
{
CPPA_CRITICAL("cannot write to pipe");
auto fd = hptr->get_socket();
maxfd = std::max(maxfd, fd);
FD_SET(fd, &readset);
}
}
void operator()()
{
sockaddr addr;
socklen_t addrlen;
std::vector<socket_aid_pair> pvec;
int maxfd = 0;
fd_set readset;
auto guard = make_scope_guard([&]()
if (select(maxfd + 1, &readset, nullptr, nullptr, nullptr) < 0)
{
DEBUG(__PRETTY_FUNCTION__);
for (auto& pm : pvec)
{
closesocket(pm.fd);
}
});
for (;;)
// must not happen
DEBUG("select failed!");
perror("select()");
exit(3);
}
// iterate over all handler and remove if needed
{
FD_ZERO(&readset);
FD_SET(m_pipe_rd, &readset);
maxfd = m_pipe_rd;
for (auto i = pvec.begin(); i != pvec.end(); ++i)
auto i = handler.begin();
while (i != handler.end())
{
maxfd = std::max(maxfd, i->fd);
FD_SET(i->fd, &readset);
if ( FD_ISSET((*i)->get_socket(), &readset)
&& (*i)->read_and_continue() == false)
{
DEBUG("handler erased");
i = handler.erase(i);
}
else
{
++i;
}
}
if (select(maxfd + 1, &readset, nullptr, nullptr, nullptr) < 0)
}
if (FD_ISSET(input_fd, &readset))
{
DEBUG("post_office: read from pipe");
po_message msg;
if (read(input_fd, &msg, sizeof(po_message)) != sizeof(po_message))
{
// must not happen
perror("select()");
exit(3);
CPPA_CRITICAL("cannot read from pipe");
}
// iterate over sockets
switch (valof(msg.flag))
{
auto i = pvec.begin();
while (i != pvec.end())
case valof(atom("ADD_PEER")):
{
if (FD_ISSET(i->fd, &readset))
{
memset(&addr, 0, sizeof(addr));
memset(&addrlen, 0, sizeof(addrlen));
auto sfd = ::accept(i->fd, &addr, &addrlen);
if (sfd < 0)
{
switch (errno)
{
case EAGAIN:
# if EAGAIN != EWOULDBLOCK
case EWOULDBLOCK:
# endif
// just try again
++i;
break;
default:
// remove socket on failure
i = pvec.erase(i);
break;
}
}
else
receive
(
on_arg_match >> [&](native_socket_type fd,
process_information_ptr piptr)
{
int flags = 1;
setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
auto id = i->aid;
std::uint32_t process_id = m_pself->process_id();
::send(sfd, &id, sizeof(std::uint32_t), 0);
::send(sfd, &process_id, sizeof(std::uint32_t), 0);
::send(sfd, m_pself->node_id().data(), m_pself->node_id().size(), 0);
send2po(atom("ADD_PEER"), sfd, process_information_ptr{});
DEBUG("socket accepted; published actor: " << id);
++i;
DEBUG("post_office: add_peer");
handler.emplace_back(new po_peer(fd, piptr));
}
}
);
break;
}
}
if (FD_ISSET(m_pipe_rd, &readset))
{
DEBUG("po_doorman: read from pipe");
socket_aid_pair msg;
if (read(m_pipe_rd, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
case valof(atom("RM_PEER")):
{
CPPA_CRITICAL("cannot read from pipe");
DEBUG("post_office: rm_peer");
auto i = std::find_if(handler.begin(), handler.end(),
[&](po_socket_handler_ptr const& hp)
{
return hp->get_socket() == msg.fd;
});
if (i != handler.end()) handler.erase(i);
break;
}
if (msg.fd == -1)
case valof(atom("PUBLISH")):
{
if (msg.aid == 0)
{
DEBUG("fd == -1 && aid == 0 [done]");
return;
}
else
{
auto i = std::find_if(pvec.begin(), pvec.end(), [&](socket_aid_pair const& m)
{
return m.aid == msg.aid;
});
if (i != pvec.end())
receive
(
on_arg_match >> [&](native_socket_type sockfd,
actor_ptr whom)
{
DEBUG("removed socket of actor" << i->aid);
closesocket(i->fd);
pvec.erase(i);
DEBUG("post_office: publish_actor");
CPPA_REQUIRE(sockfd > 0);
CPPA_REQUIRE(whom.get() != nullptr);
handler.emplace_back(new po_doorman(whom->id(), sockfd, &new_handler));
}
}
);
break;
}
else
case valof(atom("UNPUBLISH")):
{
DEBUG("post_office: unpublish_actor");
auto i = std::find_if(handler.begin(), handler.end(),
[&](po_socket_handler_ptr const& hp)
{
return hp->is_doorman_of(msg.aid);
});
if (i != handler.end()) handler.erase(i);
break;
}
case valof(atom("DONE")):
{
done = true;
break;
}
default:
{
DEBUG("added socket for actor" << msg.aid);
pvec.push_back(msg);
CPPA_CRITICAL("illegal pipe message");
}
}
}
}
};
void post_office_loop()
{
po_doorman doorman;
doorman.start();
bool done = false;
// list of all peers to which we established a connection via remote_actor()
std::list<po_peer> peers;
do_receive
(
on(atom("ADD_PEER"), arg_match) >> [&](native_socket_type fd,
process_information_ptr piptr)
if (new_handler.empty() == false)
{
DEBUG("post_office: add_peer");
peers.emplace_back(fd, std::move(piptr));
peers.back().start();
},
on(atom("RM_PEER"), arg_match) >> [&](native_socket_type fd)
{
DEBUG("post_office: rm_peer");
auto i = std::find_if(peers.begin(), peers.end(), [fd](po_peer& pp)
{
return pp.get_socket() == fd;
});
if (i != peers.end()) peers.erase(i);
},
on(atom("ADD_PROXY"), arg_match) >> [&](actor_proxy_ptr)
{
DEBUG("post_office: add_proxy");
},
on(atom("RM_PROXY"), arg_match) >> [&](actor_proxy_ptr pptr)
{
DEBUG("post_office: rm_proxy");
CPPA_REQUIRE(pptr.get() != nullptr);
get_actor_proxy_cache().erase(pptr);
},
on(atom("PUBLISH"), arg_match) >> [&](native_socket_type sockfd,
actor_ptr whom)
{
DEBUG("post_office: publish_actor");
CPPA_REQUIRE(whom.get() != nullptr);
doorman.add(sockfd, whom->id());
},
on(atom("UNPUBLISH"), arg_match) >> [&](actor_id whom)
{
DEBUG("post_office: unpublish_actor");
doorman.rm(whom);
},
on(atom("DONE")) >> [&]()
{
done = true;
},
others() >> []()
{
std::string str = "unexpected message in post_office: ";
str += to_string(self->last_dequeued());
CPPA_CRITICAL(str.c_str());
std::move(new_handler.begin(), new_handler.end(),
std::back_inserter(handler));
new_handler.clear();
}
)
.until(gref(done));
}
while (done == false);
}
/******************************************************************************
......@@ -559,24 +545,27 @@ void post_office_loop()
void post_office_add_peer(native_socket_type a0,
process_information_ptr const& a1)
{
DEBUG("post_office_add_peer(" << a0 << ", " << to_string(a1) << ")");
send2po(atom("ADD_PEER"), a0, a1);
po_message msg{atom("ADD_PEER"), -1, 0};
send2po(msg, a0, a1);
}
void post_office_publish(native_socket_type server_socket,
actor_ptr const& published_actor)
{
send2po(atom("PUBLISH"), server_socket, published_actor);
po_message msg{atom("PUBLISH"), -1, 0};
send2po(msg, server_socket, published_actor);
}
void post_office_unpublish(actor_id whom)
{
send2po(atom("UNPUBLISH"), whom);
po_message msg{atom("UNPUBLISH"), -1, whom};
send2po(msg);
}
void post_office_close_socket(native_socket_type sfd)
{
send2po(atom("RM_PEER"), sfd);
po_message msg{atom("RM_PEER"), sfd, 0};
send2po(msg);
}
} } // namespace cppa::detail
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#include "cppa/detail/post_office_msg.hpp"
namespace cppa { namespace detail {
post_office_msg::add_peer::add_peer(native_socket_type peer_socket,
const process_information_ptr& peer_ptr,
const actor_proxy_ptr& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer)
: sockfd(peer_socket)
, peer(peer_ptr)
, first_peer_actor(peer_actor_ptr)
, attachable_ptr(std::move(peer_observer))
{
}
post_office_msg::add_server_socket::add_server_socket(native_socket_type ssockfd,
const actor_ptr& whom)
: server_sockfd(ssockfd)
, published_actor(whom)
{
}
post_office_msg::post_office_msg(native_socket_type arg0,
const process_information_ptr& arg1,
const actor_proxy_ptr& arg2,
std::unique_ptr<attachable>&& arg3)
: next(nullptr)
, m_type(add_peer_type)
{
new (&m_add_peer_msg) add_peer(arg0, arg1, arg2, std::move(arg3));
}
post_office_msg::post_office_msg(native_socket_type arg0, const actor_ptr& arg1)
: next(nullptr)
, m_type(add_server_socket_type)
{
new (&m_add_server_socket) add_server_socket(arg0, arg1);
}
post_office_msg::post_office_msg(const actor_proxy_ptr& proxy_ptr)
: next(nullptr)
, m_type(proxy_exited_type)
{
new (&m_proxy_exited) proxy_exited(proxy_ptr);
}
post_office_msg::~post_office_msg()
{
switch (m_type)
{
case add_peer_type:
{
m_add_peer_msg.~add_peer();
break;
}
case add_server_socket_type:
{
m_add_server_socket.~add_server_socket();
break;
}
case proxy_exited_type:
{
m_proxy_exited.~proxy_exited();
break;
}
default: break;
}
}
} } // namespace cppa::detail
......@@ -129,6 +129,14 @@ void publish(actor_ptr& whom, std::uint16_t port)
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY;
serv_addr.sin_port = htons(port);
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0)
{
throw bind_failure(errno);
}
if (listen(sockfd, 10) != 0)
{
throw network_error("listen() failed");
}
int flags = fcntl(sockfd, F_GETFL, 0);
if (flags == -1)
{
......@@ -140,14 +148,6 @@ void publish(actor_ptr& whom, std::uint16_t port)
}
flags = 1;
setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0)
{
throw bind_failure(errno);
}
if (listen(sockfd, 10) != 0)
{
throw network_error("listen() failed");
}
// ok, no exceptions
sguard.release();
detail::post_office_publish(sockfd, whom);
......@@ -195,6 +195,17 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
read_from_socket(sockfd, &remote_actor_id, sizeof(remote_actor_id));
read_from_socket(sockfd, &peer_pid, sizeof(std::uint32_t));
read_from_socket(sockfd, peer_node_id.data(), peer_node_id.size());
flags = fcntl(sockfd, F_GETFL, 0);
if (flags == -1)
{
throw network_error("unable to get socket flags");
}
if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) < 0)
{
throw network_error("unable to set socket to nonblock");
}
auto peer_pinf = new process_information(peer_pid, peer_node_id);
process_information_ptr pinfptr(peer_pinf);
auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment