Commit 783cd02d authored by neverlord's avatar neverlord

select()

parent 69ef1b78
...@@ -39,7 +39,6 @@ libcppa_la_SOURCES = \ ...@@ -39,7 +39,6 @@ libcppa_la_SOURCES = \
src/partial_function.cpp \ src/partial_function.cpp \
src/pattern.cpp \ src/pattern.cpp \
src/post_office.cpp \ src/post_office.cpp \
src/post_office_msg.cpp \
src/primitive_variant.cpp \ src/primitive_variant.cpp \
src/process_information.cpp \ src/process_information.cpp \
src/receive.cpp \ src/receive.cpp \
...@@ -102,6 +101,7 @@ nobase_library_include_HEADERS = \ ...@@ -102,6 +101,7 @@ nobase_library_include_HEADERS = \
cppa/detail/demangle.hpp \ cppa/detail/demangle.hpp \
cppa/detail/disablable_delete.hpp \ cppa/detail/disablable_delete.hpp \
cppa/detail/empty_tuple.hpp \ cppa/detail/empty_tuple.hpp \
cppa/detail/filter_result.hpp \
cppa/detail/get_behavior.hpp \ cppa/detail/get_behavior.hpp \
cppa/detail/group_manager.hpp \ cppa/detail/group_manager.hpp \
cppa/detail/implicit_conversions.hpp \ cppa/detail/implicit_conversions.hpp \
...@@ -112,12 +112,12 @@ nobase_library_include_HEADERS = \ ...@@ -112,12 +112,12 @@ nobase_library_include_HEADERS = \
cppa/detail/matches.hpp \ cppa/detail/matches.hpp \
cppa/detail/mock_scheduler.hpp \ cppa/detail/mock_scheduler.hpp \
cppa/detail/native_socket.hpp \ cppa/detail/native_socket.hpp \
cppa/detail/nestable_receive_actor.hpp \
cppa/detail/network_manager.hpp \ cppa/detail/network_manager.hpp \
cppa/detail/object_array.hpp \ cppa/detail/object_array.hpp \
cppa/detail/object_impl.hpp \ cppa/detail/object_impl.hpp \
cppa/detail/pair_member.hpp \ cppa/detail/pair_member.hpp \
cppa/detail/post_office.hpp \ cppa/detail/post_office.hpp \
cppa/detail/post_office_msg.hpp \
cppa/detail/primitive_member.hpp \ cppa/detail/primitive_member.hpp \
cppa/detail/projection.hpp \ cppa/detail/projection.hpp \
cppa/detail/pseudo_tuple.hpp \ cppa/detail/pseudo_tuple.hpp \
......
...@@ -169,8 +169,6 @@ src/actor_registry.cpp ...@@ -169,8 +169,6 @@ src/actor_registry.cpp
cppa/detail/uniform_type_info_map.hpp cppa/detail/uniform_type_info_map.hpp
cppa/detail/network_manager.hpp cppa/detail/network_manager.hpp
src/network_manager.cpp src/network_manager.cpp
cppa/detail/post_office_msg.hpp
src/post_office_msg.cpp
cppa/detail/group_manager.hpp cppa/detail/group_manager.hpp
src/group_manager.cpp src/group_manager.cpp
cppa/detail/empty_tuple.hpp cppa/detail/empty_tuple.hpp
......
...@@ -31,8 +31,7 @@ ...@@ -31,8 +31,7 @@
#ifndef NETWORK_MANAGER_HPP #ifndef NETWORK_MANAGER_HPP
#define NETWORK_MANAGER_HPP #define NETWORK_MANAGER_HPP
#include "cppa/detail/mailman.hpp" #include "cppa/detail/post_office.hpp"
#include "cppa/detail/post_office_msg.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
...@@ -47,6 +46,8 @@ class network_manager ...@@ -47,6 +46,8 @@ class network_manager
virtual void stop() = 0; virtual void stop() = 0;
virtual void send_to_post_office(po_message const& msg) = 0;
virtual void send_to_post_office(any_tuple msg) = 0; virtual void send_to_post_office(any_tuple msg) = 0;
virtual void send_to_mailman(any_tuple msg) = 0; virtual void send_to_mailman(any_tuple msg) = 0;
......
...@@ -33,12 +33,20 @@ ...@@ -33,12 +33,20 @@
#include <memory> #include <memory>
#include "cppa/atom.hpp"
#include "cppa/actor_proxy.hpp" #include "cppa/actor_proxy.hpp"
#include "cppa/detail/native_socket.hpp" #include "cppa/detail/native_socket.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
void post_office_loop(); struct po_message
{
atom_value flag;
native_socket_type fd;
actor_id aid;
};
void post_office_loop(int input_fd);
void post_office_add_peer(native_socket_type peer_socket, void post_office_add_peer(native_socket_type peer_socket,
process_information_ptr const& peer_ptr); process_information_ptr const& peer_ptr);
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef POST_OFFICE_MSG_HPP
#define POST_OFFICE_MSG_HPP
#include "cppa/attachable.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/intrusive_ptr.hpp"
#include "cppa/process_information.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { namespace detail {
class post_office_msg
{
public:
enum msg_type
{
invalid_type,
add_peer_type,
add_server_socket_type,
proxy_exited_type
};
struct add_peer
{
native_socket_type sockfd;
process_information_ptr peer;
actor_proxy_ptr first_peer_actor;
std::unique_ptr<attachable> attachable_ptr;
add_peer(native_socket_type peer_socket,
process_information_ptr const& peer_ptr,
actor_proxy_ptr const& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer);
};
struct add_server_socket
{
native_socket_type server_sockfd;
actor_ptr published_actor;
add_server_socket(native_socket_type ssockfd, actor_ptr const& whom);
};
struct proxy_exited
{
actor_proxy_ptr proxy_ptr;
inline proxy_exited(actor_proxy_ptr const& who) : proxy_ptr(who) { }
};
inline post_office_msg() : next(nullptr), m_type(invalid_type) { }
post_office_msg(native_socket_type arg0,
process_information_ptr const& arg1,
actor_proxy_ptr const& arg2,
std::unique_ptr<attachable>&& arg3);
post_office_msg(native_socket_type arg0, actor_ptr const& arg1);
post_office_msg(actor_proxy_ptr const& proxy_ptr);
inline bool is_add_peer_msg() const
{
return m_type == add_peer_type;
}
inline bool is_add_server_socket_msg() const
{
return m_type == add_server_socket_type;
}
inline bool is_proxy_exited_msg() const
{
return m_type == proxy_exited_type;
}
inline add_peer& as_add_peer_msg()
{
return m_add_peer_msg;
}
inline add_server_socket& as_add_server_socket_msg()
{
return m_add_server_socket;
}
inline proxy_exited& as_proxy_exited_msg()
{
return m_proxy_exited;
}
~post_office_msg();
post_office_msg* next;
private:
msg_type m_type;
union
{
add_peer m_add_peer_msg;
add_server_socket m_add_server_socket;
proxy_exited m_proxy_exited;
};
};
constexpr std::uint32_t rd_queue_event = 0x00;
constexpr std::uint32_t unpublish_actor_event = 0x01;
constexpr std::uint32_t close_socket_event = 0x02;
constexpr std::uint32_t shutdown_event = 0x03;
typedef std::uint32_t pipe_msg[2];
constexpr size_t pipe_msg_size = 2 * sizeof(std::uint32_t);
} } // namespace cppa::detail
#endif // POST_OFFICE_MSG_HPP
...@@ -86,14 +86,11 @@ actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key) ...@@ -86,14 +86,11 @@ actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key)
} }
m_entries.insert(std::make_pair(key, result)); m_entries.insert(std::make_pair(key, result));
} }
auto msg = make_any_tuple(atom("ADD_PROXY"), result);
singleton_manager::get_network_manager()->send_to_post_office(std::move(msg));
result->enqueue(nullptr, make_any_tuple(atom("MONITOR")));
result->attach_functor([result](std::uint32_t) result->attach_functor([result](std::uint32_t)
{ {
auto msg = make_any_tuple(atom("RM_PROXY"), result); get_actor_proxy_cache().erase(result);
singleton_manager::get_network_manager()->send_to_post_office(std::move(msg));
}); });
result->enqueue(nullptr, make_any_tuple(atom("MONITOR")));
return result; return result;
} }
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include "cppa/detail/mailman.hpp" #include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp" #include "cppa/detail/post_office.hpp"
#include "cppa/detail/mock_scheduler.hpp" #include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/network_manager.hpp" #include "cppa/detail/network_manager.hpp"
#include "cppa/detail/converted_thread_context.hpp" #include "cppa/detail/converted_thread_context.hpp"
...@@ -59,10 +58,17 @@ struct network_manager_impl : network_manager ...@@ -59,10 +58,17 @@ struct network_manager_impl : network_manager
local_actor_ptr m_post_office; local_actor_ptr m_post_office;
thread m_post_office_thread; thread m_post_office_thread;
int pipe_fd[2];
void start() // override void start() // override
{ {
if (pipe(pipe_fd) != 0)
{
CPPA_CRITICAL("cannot create pipe");
}
m_post_office.reset(new converted_thread_context); m_post_office.reset(new converted_thread_context);
m_post_office_thread = mock_scheduler::spawn_hidden_impl(post_office_loop, m_post_office); m_post_office_thread = mock_scheduler::spawn_hidden_impl(std::bind(post_office_loop, pipe_fd[0]), m_post_office);
m_mailman.reset(new converted_thread_context); m_mailman.reset(new converted_thread_context);
m_mailman_thread = mock_scheduler::spawn_hidden_impl(mailman_loop, m_mailman); m_mailman_thread = mock_scheduler::spawn_hidden_impl(mailman_loop, m_mailman);
...@@ -74,6 +80,16 @@ struct network_manager_impl : network_manager ...@@ -74,6 +80,16 @@ struct network_manager_impl : network_manager
m_mailman->enqueue(nullptr, make_any_tuple(atom("DONE"))); m_mailman->enqueue(nullptr, make_any_tuple(atom("DONE")));
m_post_office_thread.join(); m_post_office_thread.join();
m_mailman_thread.join(); m_mailman_thread.join();
close(pipe_fd[0]);
close(pipe_fd[0]);
}
void send_to_post_office(po_message const& msg)
{
if (write(pipe_fd[1], &msg, sizeof(po_message)) != sizeof(po_message))
{
CPPA_CRITICAL("cannot write to pipe");
}
} }
void send_to_post_office(any_tuple msg) void send_to_post_office(any_tuple msg)
......
...@@ -63,7 +63,6 @@ ...@@ -63,7 +63,6 @@
#include "cppa/detail/native_socket.hpp" #include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_registry.hpp" #include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/network_manager.hpp" #include "cppa/detail/network_manager.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/singleton_manager.hpp" #include "cppa/detail/singleton_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp" #include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/addressed_message.hpp" #include "cppa/detail/addressed_message.hpp"
...@@ -73,8 +72,8 @@ ...@@ -73,8 +72,8 @@
<< cppa::process_information::get()->process_id() \ << cppa::process_information::get()->process_id() \
<< "] " << arg << std::endl << "] " << arg << std::endl
//#undef DEBUG #undef DEBUG
//#define DEBUG(unused) ((void) 0) #define DEBUG(unused) ((void) 0)
using std::cout; using std::cout;
using std::cerr; using std::cerr;
...@@ -100,11 +99,22 @@ static_assert(sizeof(cppa::detail::native_socket_type) == sizeof(std::uint32_t), ...@@ -100,11 +99,22 @@ static_assert(sizeof(cppa::detail::native_socket_type) == sizeof(std::uint32_t),
namespace cppa { namespace detail { namespace cppa { namespace detail {
inline void send2po_(network_manager*) { }
template<typename Arg0, typename... Args>
inline void send2po_(network_manager* nm, Arg0&& arg0, Args&&... args)
{
nm->send_to_post_office(make_any_tuple(std::forward<Arg0>(arg0),
std::forward<Args>(args)...));
}
template<typename... Args> template<typename... Args>
inline void send2po(Args&&... args) inline void send2po(po_message const& msg, Args&&... args)
{ {
singleton_manager::get_network_manager() auto nm = singleton_manager::get_network_manager();
->send_to_post_office(make_any_tuple(std::forward<Args>(args)...)); nm->send_to_post_office(msg);
send2po_(nm, std::forward<Args>(args)...);
} }
template<class Fun> template<class Fun>
...@@ -118,8 +128,36 @@ struct scope_guard ...@@ -118,8 +128,36 @@ struct scope_guard
template<class Fun> template<class Fun>
scope_guard<Fun> make_scope_guard(Fun fun) { return {std::move(fun)}; } scope_guard<Fun> make_scope_guard(Fun fun) { return {std::move(fun)}; }
class po_socket_handler
{
public:
po_socket_handler(native_socket_type fd) : m_socket(fd) { }
virtual ~po_socket_handler() { }
// returns bool if either done or an error occured
virtual bool read_and_continue() = 0;
native_socket_type get_socket() const
{
return m_socket;
}
virtual bool is_doorman_of(actor_id) const { return false; }
protected:
native_socket_type m_socket;
};
typedef std::unique_ptr<po_socket_handler> po_socket_handler_ptr;
typedef std::vector<po_socket_handler_ptr> po_socket_handler_vector;
// represents a TCP connection to another peer // represents a TCP connection to another peer
class po_peer class po_peer : public po_socket_handler
{ {
enum state enum state
...@@ -133,61 +171,49 @@ class po_peer ...@@ -133,61 +171,49 @@ class po_peer
}; };
state m_state; state m_state;
// TCP socket to remote peer
native_socket_type m_socket;
// caches process_information::get() // caches process_information::get()
process_information_ptr m_pself; process_information_ptr m_pself;
// the process information or our remote peer // the process information of our remote peer
process_information_ptr m_peer; process_information_ptr m_peer;
// caches uniform_typeid<addressed_message>()
thread m_thread; uniform_type_info const* m_meta_msg;
// manages socket input
buffer<512, (16 * 1024 * 1024)> m_buf;
public: public:
po_peer(native_socket_type fd, process_information_ptr peer) po_peer(native_socket_type fd, process_information_ptr peer = nullptr)
: m_state((peer) ? wait_for_msg_size : wait_for_process_info) : po_socket_handler(fd)
, m_socket(fd) , m_state((peer) ? wait_for_msg_size : wait_for_process_info)
, m_pself(process_information::get()) , m_pself(process_information::get())
, m_peer(std::move(peer)) , m_peer(std::move(peer))
, m_meta_msg(uniform_typeid<addressed_message>())
{ {
m_buf.reset(m_state == wait_for_process_info
? sizeof(std::uint32_t) + process_information::node_id_size
: sizeof(std::uint32_t));
} }
~po_peer() ~po_peer()
{ {
closesocket(m_socket); closesocket(m_socket);
m_thread.join();
} }
inline native_socket_type get_socket() const { return m_socket; } inline native_socket_type get_socket() const { return m_socket; }
void start()
{
m_thread = thread{std::bind(&po_peer::operator(), this)};
}
// @returns false if an error occured; otherwise true // @returns false if an error occured; otherwise true
void operator()() bool read_and_continue()
{ {
auto nm = singleton_manager::get_network_manager();
auto meta_msg = uniform_typeid<addressed_message>();
auto guard = make_scope_guard([&]()
{
DEBUG("po_peer loop done");
send2po(atom("RM_PEER"), m_socket);
});
DEBUG("po_peer loop started");
buffer<512, (16 * 1024 * 1024)> m_buf;
m_buf.reset(m_state == wait_for_process_info ? sizeof(std::uint32_t) + process_information::node_id_size
: sizeof(std::uint32_t));
for (;;) for (;;)
{ {
while(m_buf.ready() == false) if (m_buf.append_from(m_socket) == false)
{
if (m_buf.append_from(m_socket, 0) == false)
{ {
DEBUG("cannot read from socket"); DEBUG("cannot read from socket");
return; return false;
} }
if (m_buf.ready() == false)
{
return true; // try again later
} }
switch (m_state) switch (m_state)
{ {
...@@ -200,13 +226,14 @@ class po_peer ...@@ -200,13 +226,14 @@ class po_peer
process_information::node_id_size); process_information::node_id_size);
m_peer.reset(new process_information(process_id, node_id)); m_peer.reset(new process_information(process_id, node_id));
// inform mailman about new peer // inform mailman about new peer
nm->send_to_mailman(make_any_tuple(m_socket, m_peer)); singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(m_socket, m_peer));
m_state = wait_for_msg_size; m_state = wait_for_msg_size;
m_buf.reset(sizeof(std::uint32_t));
DEBUG("pinfo read: " DEBUG("pinfo read: "
<< m_peer->process_id() << m_peer->process_id()
<< "@" << "@"
<< to_string(m_peer->node_id())); << to_string(m_peer->node_id()));
m_buf.reset(sizeof(std::uint32_t));
break; break;
} }
case wait_for_msg_size: case wait_for_msg_size:
...@@ -223,13 +250,13 @@ class po_peer ...@@ -223,13 +250,13 @@ class po_peer
binary_deserializer bd(m_buf.data(), m_buf.size()); binary_deserializer bd(m_buf.data(), m_buf.size());
try try
{ {
meta_msg->deserialize(&msg, &bd); m_meta_msg->deserialize(&msg, &bd);
} }
catch (std::exception& e) catch (std::exception& e)
{ {
// unable to deserialize message (format error) // unable to deserialize message (format error)
DEBUG(to_uniform_name(typeid(e)) << ": " << e.what()); DEBUG(to_uniform_name(typeid(e)) << ": " << e.what());
return; return false;
} }
auto& content = msg.content(); auto& content = msg.content();
DEBUG("<-- " << to_string(msg)); DEBUG("<-- " << to_string(msg));
...@@ -249,7 +276,8 @@ class po_peer ...@@ -249,7 +276,8 @@ class po_peer
receiver->attach_functor([=](std::uint32_t reason) receiver->attach_functor([=](std::uint32_t reason)
{ {
addressed_message kmsg{receiver, receiver, make_any_tuple(atom("KILL_PROXY"), reason)}; addressed_message kmsg{receiver, receiver, make_any_tuple(atom("KILL_PROXY"), reason)};
nm->send_to_mailman(make_any_tuple(m_peer, kmsg)); singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(m_peer, kmsg));
}); });
} }
else else
...@@ -299,124 +327,49 @@ class po_peer ...@@ -299,124 +327,49 @@ class po_peer
CPPA_CRITICAL("illegal state"); CPPA_CRITICAL("illegal state");
} }
} }
// try to read more (next iteration)
} }
} }
}; };
// accepts new connections to a published actor // accepts new connections to a published actor
class po_doorman class po_doorman : public po_socket_handler
{ {
actor_ptr published_actor; actor_id m_actor_id;
// caches process_information::get() // caches process_information::get()
process_information_ptr m_pself; process_information_ptr m_pself;
po_socket_handler_vector* new_handler;
int m_pipe_rd;
int m_pipe_wr;
thread m_thread;
struct socket_aid_pair
{
native_socket_type fd;
actor_id aid;
};
public: public:
po_doorman() : m_pself(process_information::get()) po_doorman(actor_id aid, native_socket_type fd, po_socket_handler_vector* v)
{ : po_socket_handler(fd)
int mpipe[2]; , m_actor_id(aid), m_pself(process_information::get())
if (pipe(mpipe) < 0) , new_handler(v)
{
CPPA_CRITICAL("cannot open pipe");
}
m_pipe_rd = mpipe[0];
m_pipe_wr = mpipe[1];
}
~po_doorman()
{
DEBUG(__PRETTY_FUNCTION__);
socket_aid_pair msg{-1, 0};
if (write(m_pipe_wr, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
{ {
CPPA_CRITICAL("cannot write to pipe");
}
m_thread.join();
close(m_pipe_rd);
close(m_pipe_wr);
} }
void start() bool is_doorman_of(actor_id aid) const
{ {
m_thread = thread{std::bind(&po_doorman::operator(), this)}; return m_actor_id == aid;
} }
void add(native_socket_type fd, actor_id aid) ~po_doorman()
{
DEBUG("add, aid = " << aid);
CPPA_REQUIRE(fd != -1);
CPPA_REQUIRE(aid != 0);
socket_aid_pair msg{fd, aid};
if (write(m_pipe_wr, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
{ {
CPPA_CRITICAL("cannot write to pipe"); closesocket(m_socket);
}
} }
void rm(actor_id aid) bool read_and_continue()
{ {
CPPA_REQUIRE(aid != 0); for (;;)
socket_aid_pair msg{-1, aid};
if (write(m_pipe_wr, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair))
{
CPPA_CRITICAL("cannot write to pipe");
}
}
void operator()()
{ {
sockaddr addr; sockaddr addr;
socklen_t addrlen; socklen_t addrlen;
std::vector<socket_aid_pair> pvec;
int maxfd = 0;
fd_set readset;
auto guard = make_scope_guard([&]()
{
DEBUG(__PRETTY_FUNCTION__);
for (auto& pm : pvec)
{
closesocket(pm.fd);
}
});
for (;;)
{
FD_ZERO(&readset);
FD_SET(m_pipe_rd, &readset);
maxfd = m_pipe_rd;
for (auto i = pvec.begin(); i != pvec.end(); ++i)
{
maxfd = std::max(maxfd, i->fd);
FD_SET(i->fd, &readset);
}
if (select(maxfd + 1, &readset, nullptr, nullptr, nullptr) < 0)
{
// must not happen
perror("select()");
exit(3);
}
// iterate over sockets
{
auto i = pvec.begin();
while (i != pvec.end())
{
if (FD_ISSET(i->fd, &readset))
{
memset(&addr, 0, sizeof(addr)); memset(&addr, 0, sizeof(addr));
memset(&addrlen, 0, sizeof(addrlen)); memset(&addrlen, 0, sizeof(addrlen));
auto sfd = ::accept(i->fd, &addr, &addrlen); auto sfd = ::accept(m_socket, &addr, &addrlen);
if (sfd < 0) if (sfd < 0)
{ {
switch (errno) switch (errno)
...@@ -426,129 +379,162 @@ class po_doorman ...@@ -426,129 +379,162 @@ class po_doorman
case EWOULDBLOCK: case EWOULDBLOCK:
# endif # endif
// just try again // just try again
++i; return true;
break;
default: default:
// remove socket on failure perror("accept()");
i = pvec.erase(i); DEBUG("accept failed (actor unpublished?)");
break; return false;
} }
} }
else
{
int flags = 1; int flags = 1;
setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int)); setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
auto id = i->aid; flags = fcntl(sfd, F_GETFL, 0);
if (flags == -1)
{
throw network_error("unable to get socket flags");
}
if (fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0)
{
throw network_error("unable to set socket to nonblock");
}
auto id = m_actor_id;
std::uint32_t process_id = m_pself->process_id(); std::uint32_t process_id = m_pself->process_id();
::send(sfd, &id, sizeof(std::uint32_t), 0); ::send(sfd, &id, sizeof(std::uint32_t), 0);
::send(sfd, &process_id, sizeof(std::uint32_t), 0); ::send(sfd, &process_id, sizeof(std::uint32_t), 0);
::send(sfd, m_pself->node_id().data(), m_pself->node_id().size(), 0); ::send(sfd, m_pself->node_id().data(), m_pself->node_id().size(), 0);
send2po(atom("ADD_PEER"), sfd, process_information_ptr{}); new_handler->emplace_back(new po_peer(sfd));
DEBUG("socket accepted; published actor: " << id); DEBUG("socket accepted; published actor: " << id);
++i;
}
} }
} }
}
if (FD_ISSET(m_pipe_rd, &readset)) };
inline constexpr std::uint64_t valof(atom_value val)
{
return static_cast<std::uint64_t>(val);
}
void post_office_loop(int input_fd)
{
int maxfd = 0;
fd_set readset;
bool done = false;
po_socket_handler_vector handler;
po_socket_handler_vector new_handler;
do
{ {
DEBUG("po_doorman: read from pipe"); FD_ZERO(&readset);
socket_aid_pair msg; FD_SET(input_fd, &readset);
if (read(m_pipe_rd, &msg, sizeof(socket_aid_pair)) != sizeof(socket_aid_pair)) maxfd = input_fd;
for (auto& hptr : handler)
{ {
CPPA_CRITICAL("cannot read from pipe"); auto fd = hptr->get_socket();
maxfd = std::max(maxfd, fd);
FD_SET(fd, &readset);
} }
if (msg.fd == -1) if (select(maxfd + 1, &readset, nullptr, nullptr, nullptr) < 0)
{
if (msg.aid == 0)
{ {
DEBUG("fd == -1 && aid == 0 [done]"); // must not happen
return; DEBUG("select failed!");
perror("select()");
exit(3);
} }
else // iterate over all handler and remove if needed
{ {
auto i = std::find_if(pvec.begin(), pvec.end(), [&](socket_aid_pair const& m) auto i = handler.begin();
while (i != handler.end())
{ {
return m.aid == msg.aid; if ( FD_ISSET((*i)->get_socket(), &readset)
}); && (*i)->read_and_continue() == false)
if (i != pvec.end())
{ {
DEBUG("removed socket of actor" << i->aid); DEBUG("handler erased");
closesocket(i->fd); i = handler.erase(i);
pvec.erase(i);
}
}
} }
else else
{ {
DEBUG("added socket for actor" << msg.aid); ++i;
pvec.push_back(msg);
} }
} }
} }
if (FD_ISSET(input_fd, &readset))
{
DEBUG("post_office: read from pipe");
po_message msg;
if (read(input_fd, &msg, sizeof(po_message)) != sizeof(po_message))
{
CPPA_CRITICAL("cannot read from pipe");
} }
switch (valof(msg.flag))
}; {
case valof(atom("ADD_PEER")):
void post_office_loop() {
{ receive
po_doorman doorman;
doorman.start();
bool done = false;
// list of all peers to which we established a connection via remote_actor()
std::list<po_peer> peers;
do_receive
( (
on(atom("ADD_PEER"), arg_match) >> [&](native_socket_type fd, on_arg_match >> [&](native_socket_type fd,
process_information_ptr piptr) process_information_ptr piptr)
{ {
DEBUG("post_office: add_peer"); DEBUG("post_office: add_peer");
peers.emplace_back(fd, std::move(piptr)); handler.emplace_back(new po_peer(fd, piptr));
peers.back().start(); }
}, );
on(atom("RM_PEER"), arg_match) >> [&](native_socket_type fd) break;
}
case valof(atom("RM_PEER")):
{ {
DEBUG("post_office: rm_peer"); DEBUG("post_office: rm_peer");
auto i = std::find_if(peers.begin(), peers.end(), [fd](po_peer& pp) auto i = std::find_if(handler.begin(), handler.end(),
[&](po_socket_handler_ptr const& hp)
{ {
return pp.get_socket() == fd; return hp->get_socket() == msg.fd;
}); });
if (i != peers.end()) peers.erase(i); if (i != handler.end()) handler.erase(i);
}, break;
on(atom("ADD_PROXY"), arg_match) >> [&](actor_proxy_ptr) }
{ case valof(atom("PUBLISH")):
DEBUG("post_office: add_proxy");
},
on(atom("RM_PROXY"), arg_match) >> [&](actor_proxy_ptr pptr)
{ {
DEBUG("post_office: rm_proxy"); receive
CPPA_REQUIRE(pptr.get() != nullptr); (
get_actor_proxy_cache().erase(pptr); on_arg_match >> [&](native_socket_type sockfd,
},
on(atom("PUBLISH"), arg_match) >> [&](native_socket_type sockfd,
actor_ptr whom) actor_ptr whom)
{ {
DEBUG("post_office: publish_actor"); DEBUG("post_office: publish_actor");
CPPA_REQUIRE(sockfd > 0);
CPPA_REQUIRE(whom.get() != nullptr); CPPA_REQUIRE(whom.get() != nullptr);
doorman.add(sockfd, whom->id()); handler.emplace_back(new po_doorman(whom->id(), sockfd, &new_handler));
}, }
on(atom("UNPUBLISH"), arg_match) >> [&](actor_id whom) );
break;
}
case valof(atom("UNPUBLISH")):
{ {
DEBUG("post_office: unpublish_actor"); DEBUG("post_office: unpublish_actor");
doorman.rm(whom); auto i = std::find_if(handler.begin(), handler.end(),
}, [&](po_socket_handler_ptr const& hp)
on(atom("DONE")) >> [&]() {
return hp->is_doorman_of(msg.aid);
});
if (i != handler.end()) handler.erase(i);
break;
}
case valof(atom("DONE")):
{ {
done = true; done = true;
}, break;
others() >> []() }
default:
{ {
std::string str = "unexpected message in post_office: "; CPPA_CRITICAL("illegal pipe message");
str += to_string(self->last_dequeued()); }
CPPA_CRITICAL(str.c_str()); }
}
if (new_handler.empty() == false)
{
std::move(new_handler.begin(), new_handler.end(),
std::back_inserter(handler));
new_handler.clear();
}
} }
) while (done == false);
.until(gref(done));
} }
/****************************************************************************** /******************************************************************************
...@@ -559,24 +545,27 @@ void post_office_loop() ...@@ -559,24 +545,27 @@ void post_office_loop()
void post_office_add_peer(native_socket_type a0, void post_office_add_peer(native_socket_type a0,
process_information_ptr const& a1) process_information_ptr const& a1)
{ {
DEBUG("post_office_add_peer(" << a0 << ", " << to_string(a1) << ")"); po_message msg{atom("ADD_PEER"), -1, 0};
send2po(atom("ADD_PEER"), a0, a1); send2po(msg, a0, a1);
} }
void post_office_publish(native_socket_type server_socket, void post_office_publish(native_socket_type server_socket,
actor_ptr const& published_actor) actor_ptr const& published_actor)
{ {
send2po(atom("PUBLISH"), server_socket, published_actor); po_message msg{atom("PUBLISH"), -1, 0};
send2po(msg, server_socket, published_actor);
} }
void post_office_unpublish(actor_id whom) void post_office_unpublish(actor_id whom)
{ {
send2po(atom("UNPUBLISH"), whom); po_message msg{atom("UNPUBLISH"), -1, whom};
send2po(msg);
} }
void post_office_close_socket(native_socket_type sfd) void post_office_close_socket(native_socket_type sfd)
{ {
send2po(atom("RM_PEER"), sfd); po_message msg{atom("RM_PEER"), sfd, 0};
send2po(msg);
} }
} } // namespace cppa::detail } } // namespace cppa::detail
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#include "cppa/detail/post_office_msg.hpp"
namespace cppa { namespace detail {
post_office_msg::add_peer::add_peer(native_socket_type peer_socket,
const process_information_ptr& peer_ptr,
const actor_proxy_ptr& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer)
: sockfd(peer_socket)
, peer(peer_ptr)
, first_peer_actor(peer_actor_ptr)
, attachable_ptr(std::move(peer_observer))
{
}
post_office_msg::add_server_socket::add_server_socket(native_socket_type ssockfd,
const actor_ptr& whom)
: server_sockfd(ssockfd)
, published_actor(whom)
{
}
post_office_msg::post_office_msg(native_socket_type arg0,
const process_information_ptr& arg1,
const actor_proxy_ptr& arg2,
std::unique_ptr<attachable>&& arg3)
: next(nullptr)
, m_type(add_peer_type)
{
new (&m_add_peer_msg) add_peer(arg0, arg1, arg2, std::move(arg3));
}
post_office_msg::post_office_msg(native_socket_type arg0, const actor_ptr& arg1)
: next(nullptr)
, m_type(add_server_socket_type)
{
new (&m_add_server_socket) add_server_socket(arg0, arg1);
}
post_office_msg::post_office_msg(const actor_proxy_ptr& proxy_ptr)
: next(nullptr)
, m_type(proxy_exited_type)
{
new (&m_proxy_exited) proxy_exited(proxy_ptr);
}
post_office_msg::~post_office_msg()
{
switch (m_type)
{
case add_peer_type:
{
m_add_peer_msg.~add_peer();
break;
}
case add_server_socket_type:
{
m_add_server_socket.~add_server_socket();
break;
}
case proxy_exited_type:
{
m_proxy_exited.~proxy_exited();
break;
}
default: break;
}
}
} } // namespace cppa::detail
...@@ -129,6 +129,14 @@ void publish(actor_ptr& whom, std::uint16_t port) ...@@ -129,6 +129,14 @@ void publish(actor_ptr& whom, std::uint16_t port)
serv_addr.sin_family = AF_INET; serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY; serv_addr.sin_addr.s_addr = INADDR_ANY;
serv_addr.sin_port = htons(port); serv_addr.sin_port = htons(port);
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0)
{
throw bind_failure(errno);
}
if (listen(sockfd, 10) != 0)
{
throw network_error("listen() failed");
}
int flags = fcntl(sockfd, F_GETFL, 0); int flags = fcntl(sockfd, F_GETFL, 0);
if (flags == -1) if (flags == -1)
{ {
...@@ -140,14 +148,6 @@ void publish(actor_ptr& whom, std::uint16_t port) ...@@ -140,14 +148,6 @@ void publish(actor_ptr& whom, std::uint16_t port)
} }
flags = 1; flags = 1;
setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int)); setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0)
{
throw bind_failure(errno);
}
if (listen(sockfd, 10) != 0)
{
throw network_error("listen() failed");
}
// ok, no exceptions // ok, no exceptions
sguard.release(); sguard.release();
detail::post_office_publish(sockfd, whom); detail::post_office_publish(sockfd, whom);
...@@ -195,6 +195,17 @@ actor_ptr remote_actor(const char* host, std::uint16_t port) ...@@ -195,6 +195,17 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
read_from_socket(sockfd, &remote_actor_id, sizeof(remote_actor_id)); read_from_socket(sockfd, &remote_actor_id, sizeof(remote_actor_id));
read_from_socket(sockfd, &peer_pid, sizeof(std::uint32_t)); read_from_socket(sockfd, &peer_pid, sizeof(std::uint32_t));
read_from_socket(sockfd, peer_node_id.data(), peer_node_id.size()); read_from_socket(sockfd, peer_node_id.data(), peer_node_id.size());
flags = fcntl(sockfd, F_GETFL, 0);
if (flags == -1)
{
throw network_error("unable to get socket flags");
}
if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) < 0)
{
throw network_error("unable to set socket to nonblock");
}
auto peer_pinf = new process_information(peer_pid, peer_node_id); auto peer_pinf = new process_information(peer_pid, peer_node_id);
process_information_ptr pinfptr(peer_pinf); process_information_ptr pinfptr(peer_pinf);
auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id()); auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment