Commit 303e1841 authored by Dominik Charousset's avatar Dominik Charousset

ipv4 networking bugfixes and improvements

parent de0c81ce
......@@ -39,18 +39,17 @@
namespace cppa { namespace detail {
template<size_t ChunkSize, size_t MaxBufferSize, typename DataType = char>
template<size_t ChunkSize, size_t MaxBufferSize>
class buffer {
DataType* m_data;
char* m_data;
size_t m_written;
size_t m_allocated;
size_t m_final_size;
public:
buffer() : m_data(nullptr), m_written(0), m_allocated(0), m_final_size(0) {
}
buffer() : m_data(nullptr), m_written(0), m_allocated(0), m_final_size(0) {}
buffer(buffer&& other)
: m_data(other.m_data), m_written(other.m_written)
......@@ -68,12 +67,17 @@ class buffer {
}
void reset(size_t new_final_size = 0) {
m_written = 0;
m_final_size = new_final_size;
if (new_final_size > m_allocated) {
if (new_final_size > MaxBufferSize) {
m_written = 0;
m_allocated = 0;
m_final_size = 0;
delete[] m_data;
m_data = nullptr;
throw std::ios_base::failure("maximum buffer size exceeded");
}
m_written = 0;
m_final_size = new_final_size;
if (new_final_size > m_allocated) {
auto remainder = (new_final_size % ChunkSize);
if (remainder == 0) {
m_allocated = new_final_size;
......@@ -82,16 +86,12 @@ class buffer {
m_allocated = (new_final_size - remainder) + ChunkSize;
}
delete[] m_data;
m_data = new DataType[m_allocated];
m_data = new char[m_allocated];
}
}
bool ready() {
return m_written == m_final_size;
}
// pointer to the current write position
DataType* wr_ptr() {
char* wr_ptr() {
return m_data + m_written;
}
......@@ -111,7 +111,7 @@ class buffer {
m_written += value;
}
DataType* data() {
char* data() {
return m_data;
}
......@@ -119,13 +119,12 @@ class buffer {
return remaining() == 0;
}
bool append_from(util::input_stream* istream) {
void append_from(util::input_stream* istream) {
CPPA_REQUIRE(remaining() > 0);
auto num_bytes = istream->read_some(wr_ptr(), remaining());
if (num_bytes > 0) {
inc_written(num_bytes);
return true;
}
return false;
}
};
......
......@@ -42,7 +42,7 @@ class ipv4_io_stream : public util::io_stream {
static util::io_stream_ptr connect_to(const char* host, std::uint16_t port);
ipv4_io_stream(native_socket_type fd);
static util::io_stream_ptr from_native_socket(native_socket_type fd);
native_socket_type read_file_handle() const;
......@@ -58,6 +58,8 @@ class ipv4_io_stream : public util::io_stream {
private:
ipv4_io_stream(native_socket_type fd);
native_socket_type m_fd;
};
......
......@@ -34,8 +34,13 @@
#include "cppa/any_tuple.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp"
#include "cppa/detail/addressed_message.hpp"
#include "cppa/util/acceptor.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/singleton_manager.hpp"
#include "cppa/detail/addressed_message.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { namespace detail {
......@@ -63,7 +68,23 @@ struct mm_message {
}
};
void mailman_loop();
void mailman_loop(intrusive::single_reader_queue<mm_message>& q);
template<typename... Args>
inline void send2mm(Args&&... args) {
auto nm = singleton_manager::get_network_manager();
nm->send_to_mailman(mm_message::create(std::forward<Args>(args)...));
}
inline void mailman_enqueue(process_information_ptr peer,
addressed_message outgoing_message) {
send2mm(std::move(peer), std::move(outgoing_message));
}
inline void mailman_add_peer(util::io_stream_ptr_pair peer_streams,
process_information_ptr peer_ptr ) {
send2mm(std::move(peer_streams), std::move(peer_ptr));
}
}} // namespace cppa::detail
......
......@@ -76,12 +76,14 @@ struct po_message {
}
};
void post_office_loop(int input_fd, intrusive::single_reader_queue<po_message>&);
typedef intrusive::single_reader_queue<po_message> po_message_queue;
void post_office_loop(int input_fd, po_message_queue&);
template<typename... Args>
inline void send2po(Args&&... args) {
auto nm = singleton_manager::get_network_manager();
nm->send_to_post_office(std::unique_ptr<po_message>(new po_message(std::forward<Args>(args)...)));
nm->send_to_post_office(po_message::create(std::forward<Args>(args)...));
}
inline void post_office_add_peer(util::io_stream_ptr_pair peer_streams,
......
......@@ -97,7 +97,7 @@ bool accept_impl(util::io_stream_ptr_pair& result, native_socket_type fd, bool n
}
int flags = 1;
setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
util::io_stream_ptr ptr(new ipv4_io_stream(sfd));
util::io_stream_ptr ptr(ipv4_io_stream::from_native_socket(sfd));
result.first = ptr;
result.second = ptr;
return true;
......
......@@ -53,10 +53,12 @@ namespace {
template<typename T>
void handle_syscall_result(T result, size_t num_bytes, bool nonblocking) {
if (result < 0) {
if (!nonblocking || errno != EAGAIN) {
if (!nonblocking || (errno != EAGAIN && errno != EWOULDBLOCK)) {
char* cstr = strerror(errno);
std::string errmsg = cstr;
free(cstr);
errmsg += " [errno = ";
errmsg += std::to_string(errno);
errmsg += "]";
throw std::ios_base::failure(std::move(errmsg));
}
}
......@@ -68,6 +70,23 @@ void handle_syscall_result(T result, size_t num_bytes, bool nonblocking) {
}
}
int rd_flags(native_socket_type fd) {
auto flags = fcntl(fd, F_GETFL, 0);
if (flags == -1) {
throw network_error("unable to read socket flags");
}
return flags;
}
void set_nonblocking(native_socket_type fd) {
auto flags = rd_flags(fd);
if ((flags & O_NONBLOCK) != 0) {
if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) < 0) {
throw network_error("unable to set socket to nonblock");
}
}
}
} // namespace <anonymous>
ipv4_io_stream::ipv4_io_stream(native_socket_type fd) : m_fd(fd) { }
......@@ -81,25 +100,45 @@ native_socket_type ipv4_io_stream::write_file_handle() const {
}
void ipv4_io_stream::read(void* buf, size_t len) {
handle_syscall_result(::recv(m_fd, buf, len, 0), len, false);
handle_syscall_result(::recv(m_fd, buf, len, MSG_WAITALL), len, false);
}
size_t ipv4_io_stream::read_some(void* buf, size_t len) {
auto recv_result = ::recv(m_fd, buf, len, MSG_DONTWAIT);
auto recv_result = ::recv(m_fd, buf, len, 0);
handle_syscall_result(recv_result, len, true);
return static_cast<size_t>(recv_result);
return (recv_result > 0) ? static_cast<size_t>(recv_result) : 0;
}
void ipv4_io_stream::write(const void* buf, size_t len) {
handle_syscall_result(::send(m_fd, buf, len, 0), len, false);
void ipv4_io_stream::write(const void* vbuf, size_t len) {
auto buf = reinterpret_cast<const char*>(vbuf);
size_t written = 0;
while (written < len) {
auto send_result = ::send(m_fd, buf + written, len - written, 0);
handle_syscall_result(send_result, len - written, true);
written += static_cast<size_t>(send_result);
if (written < len) {
// block until socked is writable again
fd_set writeset;
FD_ZERO(&writeset);
FD_SET(m_fd, &writeset);
if (select(m_fd + 1, nullptr, &writeset, nullptr, nullptr) < 0) {
throw network_error("select() failed");
}
}
}
}
size_t ipv4_io_stream::write_some(const void* buf, size_t len) {
auto send_result = ::send(m_fd, buf, len, MSG_DONTWAIT);
auto send_result = ::send(m_fd, buf, len, 0);
handle_syscall_result(send_result, len, true);
return static_cast<size_t>(send_result);
}
util::io_stream_ptr ipv4_io_stream::from_native_socket(native_socket_type fd) {
set_nonblocking(fd);
return new ipv4_io_stream(fd);
}
util::io_stream_ptr ipv4_io_stream::connect_to(const char* host,
std::uint16_t port) {
native_socket_type sockfd;
......@@ -124,6 +163,7 @@ util::io_stream_ptr ipv4_io_stream::connect_to(const char* host,
}
int flags = 1;
setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
set_nonblocking(sockfd);
return new ipv4_io_stream(sockfd);
}
......
......@@ -103,26 +103,29 @@ mm_message::~mm_message() {
}
}
void mailman_loop() {
void mailman_loop(intrusive::single_reader_queue<mm_message>& q) {
bool done = false;
// serializes outgoing messages
binary_serializer bs;
// connected tcp peers
std::map<process_information, native_socket_type> peers;
do_receive (
on_arg_match >> [&](process_information_ptr target_peer, addressed_message msg) {
std::map<process_information, util::io_stream_ptr_pair> peers;
std::unique_ptr<mm_message> msg;
auto fetch_next = [&] { msg.reset(q.pop()); };
for (fetch_next(); !done; fetch_next()) {
switch (msg->type) {
case mm_message_type::outgoing_message: {
auto& target_peer = msg->out_msg.first;
auto& out_msg = msg->out_msg.second;
CPPA_REQUIRE(target_peer != nullptr);
auto i = peers.find(*target_peer);
if (i != peers.end()) {
bool disconnect_peer = false;
auto peer_fd = i->second;
try {
bs << msg;
DEBUG("--> " << to_string(msg));
auto sent = ::send(peer_fd, bs.sendable_data(), bs.sendable_size(), 0);
if (sent < 0 || static_cast<size_t>(sent) != bs.sendable_size()) {
disconnect_peer = true;
DEBUG("too few bytes written");
}
bs << out_msg;
DEBUG("--> " << to_string(out_msg));
DEBUG("outgoing message size: " << bs.size());
i->second.second->write(bs.sendable_data(),
bs.sendable_size());
}
// something went wrong; close connection to this peer
catch (std::exception& e) {
......@@ -138,31 +141,30 @@ void mailman_loop() {
bs.reset();
}
else {
DEBUG("message to an unknown peer: " << to_string(msg));
DEBUG("message to an unknown peer: " << to_string(out_msg));
}
break;
}
},
on_arg_match >> [&](native_socket_type sockfd, process_information_ptr pinfo) {
case mm_message_type::add_peer: {
DEBUG("mailman: add_peer");
auto& iopair = msg->peer.first;
auto& pinfo = msg->peer.second;
auto i = peers.find(*pinfo);
if (i == peers.end()) {
//cout << "mailman added " << pjob.pinfo->process_id() << "@"
// << to_string(pjob.pinfo->node_id()) << endl;
peers.insert(std::make_pair(*pinfo, sockfd));
peers.insert(std::make_pair(*pinfo, iopair));
}
else {
DEBUG("add_peer_job failed: peer already known");
DEBUG("add_peer failed: peer already known");
}
break;
}
},
on(atom("DONE")) >> [&]() {
case mm_message_type::shutdown: {
done = true;
},
others() >> [&]() {
std::string str = "unexpected message in post_office: ";
str += to_string(self->last_dequeued());
CPPA_CRITICAL(str.c_str());
}
)
.until(gref(done));
}
}
}
} } // namespace cppa::detail
......@@ -67,19 +67,14 @@ struct network_manager_impl : network_manager {
if (pipe(pipe_fd) != 0) {
CPPA_CRITICAL("cannot create pipe");
}
// create actors
//m_post_office.reset(new thread_mapped_actor);
//m_mailman.reset(new thread_mapped_actor);
// store some data in local variables for lambdas
// store pipe read handle in local variables for lambda expression
int pipe_fd0 = pipe_fd[0];
// start threads
m_post_office_thread = std::thread([this, pipe_fd0] {
//scoped_self_setter sss{po_ptr.get()};
post_office_loop(pipe_fd0, this->m_post_office_queue);
});
m_mailman_thread = std::thread([] {
//scoped_self_setter sss{mm_ptr.get()};
mailman_loop();
m_mailman_thread = std::thread([this] {
mailman_loop(this->m_mailman_queue);
});
}
......
......@@ -72,9 +72,13 @@
#include "cppa/detail/addressed_message.hpp"
#define DEBUG(arg) \
std::cout << "[process id: " \
{ \
std::ostringstream oss; \
oss << "[process id: " \
<< cppa::process_information::get()->process_id() \
<< "] " << arg << std::endl
<< "] " << arg << std::endl; \
std::cout << oss.str(); \
} (void) 0
#undef DEBUG
#define DEBUG(unused) ((void) 0)
......@@ -163,10 +167,14 @@ po_message::~po_message() {
}
}
class post_office;
class post_office_worker {
public:
post_office_worker(post_office* parent) : m_parent(parent) { }
virtual ~post_office_worker() { }
// returns false if either done or an error occured
......@@ -176,11 +184,68 @@ class post_office_worker {
virtual bool is_doorman_of(actor_id) const { return false; }
protected:
post_office* parent() { return m_parent; }
private:
post_office* m_parent;
};
typedef std::unique_ptr<post_office_worker> po_worker_ptr;
typedef std::vector<po_worker_ptr> po_worker_vector;
class post_office {
friend void post_office_loop(int, po_message_queue&);
public:
post_office() : m_done(false), m_pself(process_information::get()) {
DEBUG("started post office at "
<< m_pself->process_id() << "@" << to_string(m_pself->node_id()));
}
template<class Worker, typename... Args>
inline void add_worker(Args&&... args) {
m_new_workers.emplace_back(new Worker(this, std::forward<Args>(args)...));
}
inline void close_socket(native_socket_type fd) {
m_closed_sockets.push_back(fd);
}
inline void quit() {
m_done = true;
}
post_office_worker* doorman_of(actor_id whom) {
auto last = m_workers.end();
auto i = std::find_if(m_workers.begin(), last,
[whom](const po_worker_ptr& hp) {
return hp->is_doorman_of(whom);
});
return (i != last) ? i->get() : nullptr;
}
const process_information_ptr& pself() {
return m_pself;
}
private:
void operator()(int input_fd, po_message_queue& q);
bool m_done;
process_information_ptr m_pself;
po_worker_vector m_workers;
po_worker_vector m_new_workers;
std::vector<native_socket_type> m_closed_sockets;
};
// represents a TCP connection to another peer
class po_peer : public post_office_worker {
......@@ -197,8 +262,6 @@ class po_peer : public post_office_worker {
util::output_stream_ptr m_ostream;
state m_state;
// caches process_information::get()
process_information_ptr m_pself;
// the process information of our remote peer
process_information_ptr m_peer;
// caches uniform_typeid<addressed_message>()
......@@ -206,29 +269,20 @@ class po_peer : public post_office_worker {
// manages socket input
buffer<512, (16 * 1024 * 1024)> m_buf;
void init() {
m_state = (m_peer) ? wait_for_msg_size : wait_for_process_info;
m_pself = process_information::get();
m_meta_msg = uniform_typeid<addressed_message>();
m_buf.reset(m_state == wait_for_process_info
? sizeof(std::uint32_t) + process_information::node_id_size
: sizeof(std::uint32_t));
}
public:
po_peer(util::io_stream_ptr ios, process_information_ptr peer = nullptr)
: m_istream(ios)
, m_ostream(ios)
, m_peer(std::move(peer)) {
init();
}
po_peer(util::io_stream_ptr_pair spair, process_information_ptr peer = nullptr)
: m_istream(std::move(spair.first))
po_peer(post_office* parent,
util::io_stream_ptr_pair spair,
process_information_ptr peer = nullptr)
: post_office_worker(parent)
, m_istream(std::move(spair.first))
, m_ostream(std::move(spair.second))
, m_peer(std::move(peer)) {
init();
, m_state((peer) ? wait_for_msg_size : wait_for_process_info)
, m_peer(peer)
, m_meta_msg(uniform_typeid<addressed_message>()) {
m_buf.reset(m_state == wait_for_process_info
? sizeof(std::uint32_t) + process_information::node_id_size
: sizeof(std::uint32_t));
}
~po_peer() {
......@@ -257,15 +311,19 @@ class po_peer : public post_office_worker {
// @returns false if an error occured; otherwise true
bool read_and_continue() {
for (;;) {
if (m_buf.append_from(m_istream.get()) == false) {
DEBUG("cannot read from socket");
try {
m_buf.append_from(m_istream.get());
}
catch (std::exception& e) {
DEBUG(e.what());
return false;
}
if (m_buf.ready() == false) {
if (!m_buf.full()) {
return true; // try again later
}
switch (m_state) {
case wait_for_process_info: {
DEBUG("po_peer: read_and_continue: wait_for_process_info");
std::uint32_t process_id;
process_information::node_id_type node_id;
memcpy(&process_id, m_buf.data(), sizeof(std::uint32_t));
......@@ -273,11 +331,10 @@ class po_peer : public post_office_worker {
process_information::node_id_size);
m_peer.reset(new process_information(process_id, node_id));
util::io_stream_ptr_pair iop(m_istream, m_ostream);
DEBUG("po_peer: send new peer to mailman");
// inform mailman about new peer
singleton_manager::get_network_manager()
->send_to_mailman(mm_message::create(iop, m_peer));
// forget the output stream (initialization done)
m_ostream.reset();
mailman_add_peer(iop, m_peer);
// initialization done
m_state = wait_for_msg_size;
m_buf.reset(sizeof(std::uint32_t));
DEBUG("pinfo read: "
......@@ -287,13 +344,22 @@ class po_peer : public post_office_worker {
break;
}
case wait_for_msg_size: {
DEBUG("po_peer: read_and_continue: wait_for_msg_size");
std::uint32_t msg_size;
memcpy(&msg_size, m_buf.data(), sizeof(std::uint32_t));
DEBUG("msg_size: " << msg_size);
try {
m_buf.reset(msg_size);
}
catch (std::exception& e) {
DEBUG(e.what());
return false;
}
m_state = read_message;
break;
}
case read_message: {
DEBUG("po_peer: read_and_continue: read_message");
addressed_message msg;
binary_deserializer bd(m_buf.data(), m_buf.size());
try {
......@@ -314,11 +380,12 @@ class po_peer : public post_office_worker {
DEBUG("empty receiver");
}
else if (receiver->parent_process() == *process_information::get()) {
auto mpeer = m_peer;
// this message was send from a proxy
receiver->attach_functor([=](std::uint32_t reason) {
receiver->attach_functor([mpeer, receiver](std::uint32_t reason) {
addressed_message kmsg{receiver, receiver, make_any_tuple(atom("KILL_PROXY"), reason)};
singleton_manager::get_network_manager()
->send_to_mailman(mm_message::create(m_peer, kmsg));
->send_to_mailman(mm_message::create(mpeer, kmsg));
});
}
else {
......@@ -384,18 +451,14 @@ class po_peer : public post_office_worker {
// accepts new connections to a published actor
class po_doorman : public post_office_worker {
std::unique_ptr<util::acceptor> m_acceptor;
actor_id m_actor_id;
// caches process_information::get()
process_information_ptr m_pself;
po_worker_vector* new_handler;
public:
po_doorman(actor_id aid, std::unique_ptr<util::acceptor>&& acceptor, po_worker_vector* v)
: m_acceptor(std::move(acceptor))
, m_actor_id(aid), m_pself(process_information::get())
, new_handler(v) {
po_doorman(post_office* parent,
actor_id aid,
std::unique_ptr<util::acceptor> acceptor)
: post_office_worker(parent)
, m_actor_id(aid)
, m_acceptor(std::move(acceptor)) {
}
bool is_doorman_of(actor_id aid) const {
......@@ -407,20 +470,30 @@ class po_doorman : public post_office_worker {
}
bool read_and_continue() {
// accept as many connections as possible
for (;;) {
auto opt = m_acceptor->try_accept_connection();
if (opt) {
auto& pair = *opt;
std::uint32_t process_id = m_pself->process_id();
auto& pself = parent()->pself();
std::uint32_t process_id = pself->process_id();
pair.second->write(&m_actor_id, sizeof(actor_id));
pair.second->write(&process_id, sizeof(std::uint32_t));
pair.second->write(m_pself->node_id().data(), m_pself->node_id().size());
new_handler->emplace_back(new po_peer(pair));
DEBUG("socket accepted; published actor: " << id);
}
pair.second->write(pself->node_id().data(),
pself->node_id().size());
parent()->add_worker<po_peer>(pair);
DEBUG("connection accepted; published actor: " << m_actor_id);
}
else {
return true;
}
}
}
private:
actor_id m_actor_id;
std::unique_ptr<util::acceptor> m_acceptor;
};
......@@ -428,17 +501,11 @@ class po_overseer : public post_office_worker {
public:
po_overseer(bool& done,
po_overseer(post_office* parent,
int pipe_fd,
po_worker_vector& handler,
po_worker_vector& new_handler,
std::vector<native_socket_type>& closed_sockets,
intrusive::single_reader_queue<po_message>& q )
: m_done(done)
po_message_queue& q)
: post_office_worker(parent)
, m_pipe_fd(pipe_fd)
, m_handler(handler)
, m_new_handler(new_handler)
, m_closed_sockets(closed_sockets)
, m_queue(q) { }
native_socket_type get_socket() const {
......@@ -451,48 +518,44 @@ class po_overseer : public post_office_worker {
CPPA_CRITICAL("cannot read from pipe");
}
std::atomic_thread_fence(std::memory_order_seq_cst);
std::unique_ptr<po_message> msg;
msg.reset(m_queue.pop());
std::unique_ptr<po_message> msg(m_queue.pop());
switch (msg->type) {
case po_message_type::add_peer: {
DEBUG("post_office: add_peer");
auto& new_peer = msg->new_peer;
m_new_handler.emplace_back(new po_peer(new_peer.first, new_peer.second));
parent()->add_worker<po_peer>(new_peer.first, new_peer.second);
break;
}
case po_message_type::rm_peer: {
DEBUG("post_office: rm_peer");
auto istream = msg->peer_streams.first;
if (istream) {
m_closed_sockets.emplace_back(istream->read_file_handle());
parent()->close_socket(istream->read_file_handle());
}
break;
}
case po_message_type::publish: {
DEBUG("post_office: publish");
auto& ptrs = msg->new_published_actor;
m_new_handler.emplace_back(new po_doorman(ptrs.second->id(),
std::move(ptrs.first),
&m_new_handler));
parent()->add_worker<po_doorman>(ptrs.second->id(),
std::move(ptrs.first));
break;
}
case po_message_type::unpublish: {
DEBUG("post_office: unpublish");
if (msg->published_actor) {
auto aid = msg->published_actor->id();
auto i = std::find_if(m_handler.begin(), m_handler.end(),
[aid](const po_worker_ptr& hp) {
return hp->is_doorman_of(aid);
});
if (i != m_handler.end()) {
m_closed_sockets.emplace_back((*i)->get_socket());
auto worker = parent()->doorman_of(aid);
if (worker) {
parent()->close_socket(worker->get_socket());
}
}
break;
}
case po_message_type::shutdown: {
DEBUG("post_office: shutdown");
m_done = true;
parent()->quit();
break;
}
}
return true;
......@@ -500,49 +563,38 @@ class po_overseer : public post_office_worker {
private:
bool& m_done;
int m_pipe_fd;
po_worker_vector& m_handler;
po_worker_vector& m_new_handler;
std::vector<native_socket_type>& m_closed_sockets;
intrusive::single_reader_queue<po_message>& m_queue;
po_message_queue& m_queue;
};
inline constexpr std::uint64_t valof(atom_value val) {
return static_cast<std::uint64_t>(val);
}
void post_office_loop(int input_fd, intrusive::single_reader_queue<po_message>& q) {
void post_office::operator()(int input_fd, po_message_queue& q) {
int maxfd = 0;
fd_set readset;
bool done = false;
po_worker_vector handler;
po_worker_vector new_handler;
std::vector<native_socket_type> closed_sockets;
handler.emplace_back(new po_overseer(done, input_fd, handler,
new_handler, closed_sockets, q));
m_workers.emplace_back(new po_overseer(this, input_fd, q));
do {
FD_ZERO(&readset);
maxfd = 0;
for (auto& hptr : handler) {
auto fd = hptr->get_socket();
CPPA_REQUIRE(m_workers.size() > 0);
for (auto& worker : m_workers) {
auto fd = worker->get_socket();
maxfd = std::max(maxfd, fd);
FD_SET(fd, &readset);
}
CPPA_REQUIRE(maxfd > 0);
if (select(maxfd + 1, &readset, nullptr, nullptr, nullptr) < 0) {
// must not happen
DEBUG("select failed!");
perror("select()");
exit(3);
}
{ // iterate over all handler and remove if needed
auto i = handler.begin();
while (i != handler.end()) {
{ // iterate over all workers and remove workers as needed
auto i = m_workers.begin();
while (i != m_workers.end()) {
if ( FD_ISSET((*i)->get_socket(), &readset)
&& (*i)->read_and_continue() == false) {
DEBUG("handler erased");
i = handler.erase(i);
DEBUG("erase worker (read_and_continue() returned false)");
i = m_workers.erase(i);
}
else {
++i;
......@@ -550,23 +602,29 @@ void post_office_loop(int input_fd, intrusive::single_reader_queue<po_message>&
}
}
// erase all handlers with closed sockets
for (auto fd : closed_sockets) {
auto i = std::find_if(handler.begin(), handler.end(),
for (auto fd : m_closed_sockets) {
auto i = std::find_if(m_workers.begin(), m_workers.end(),
[fd](const po_worker_ptr& wptr) {
return wptr->get_socket() == fd;
});
if (i != handler.end()) {
handler.erase(i);
if (i != m_workers.end()) {
m_workers.erase(i);
}
}
// insert new handlers
if (new_handler.empty() == false) {
std::move(new_handler.begin(), new_handler.end(),
std::back_inserter(handler));
new_handler.clear();
if (m_new_workers.empty() == false) {
std::move(m_new_workers.begin(), m_new_workers.end(),
std::back_inserter(m_workers));
m_new_workers.clear();
}
}
while (done == false);
while (m_done == false);
DEBUG("post_office_loop: done");
}
void post_office_loop(int input_fd, po_message_queue& q) {
post_office po;
po(input_fd, q);
}
} } // namespace cppa::detail
......@@ -65,142 +65,42 @@ using std::endl;
namespace cppa {
/*
namespace {
void read_from_socket(native_socket_type sfd, void* buf, size_t buf_size) {
char* cbuf = reinterpret_cast<char*>(buf);
size_t read_bytes = 0;
size_t left = buf_size;
int rres = 0;
size_t urres = 0;
do {
rres = ::recv(sfd, cbuf + read_bytes, left, 0);
if (rres <= 0) {
throw std::ios_base::failure("cannot read from closed socket");
}
urres = static_cast<size_t>(rres);
read_bytes += urres;
left -= urres;
}
while (urres < left);
}
} // namespace <anonmyous>
struct socket_guard {
bool m_released;
native_socket_type m_socket;
public:
socket_guard(native_socket_type sfd) : m_released(false), m_socket(sfd) {
}
~socket_guard() {
if (!m_released) detail::closesocket(m_socket);
}
void release() {
m_released = true;
}
};
*/
void publish(actor_ptr whom, std::uint16_t port) {
if (!whom) return;
// throws on error
auto ptr = detail::ipv4_acceptor::create(port);
void publish(actor_ptr whom, std::unique_ptr<util::acceptor> acceptor) {
if (!whom && !acceptor) return;
detail::singleton_manager::get_actor_registry()->put(whom->id(), whom);
detail::post_office_publish(std::move(ptr), whom);
/*
native_socket_type sockfd;
struct sockaddr_in serv_addr;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd == detail::invalid_socket) {
throw network_error("could not create server socket");
}
// sguard closes the socket if an exception occurs
socket_guard sguard(sockfd);
memset((char*) &serv_addr, 0, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY;
serv_addr.sin_port = htons(port);
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0) {
throw bind_failure(errno);
}
if (listen(sockfd, 10) != 0) {
throw network_error("listen() failed");
}
int flags = fcntl(sockfd, F_GETFL, 0);
if (flags == -1) {
throw network_error("unable to get socket flags");
}
if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) < 0) {
throw network_error("unable to set socket to nonblock");
}
flags = 1;
setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
// ok, no exceptions
sguard.release();
detail::post_office_publish(sockfd, whom);
*/
detail::post_office_publish(std::move(acceptor), whom);
}
actor_ptr remote_actor(const char* host, std::uint16_t port) {
/*
native_socket_type sockfd;
struct sockaddr_in serv_addr;
struct hostent* server;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd == detail::invalid_socket) {
throw network_error("socket creation failed");
}
server = gethostbyname(host);
if (!server) {
std::string errstr = "no such host: ";
errstr += host;
throw network_error(std::move(errstr));
}
memset(&serv_addr, 0, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
memmove(&serv_addr.sin_addr.s_addr, server->h_addr, server->h_length);
serv_addr.sin_port = htons(port);
if (connect(sockfd, (const sockaddr*) &serv_addr, sizeof(serv_addr)) != 0) {
throw network_error("could not connect to host");
}
*/
actor_ptr remote_actor(util::io_stream_ptr_pair peer) {
auto pinf = process_information::get();
std::uint32_t process_id = pinf->process_id();
/*int flags = 1;
setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof(int));
*/
// throws on error
util::io_stream_ptr peer = detail::ipv4_io_stream::connect_to(host, port);
peer->write(&process_id, sizeof(std::uint32_t));
peer->write(pinf->node_id().data(), pinf->node_id().size());
peer.second->write(&process_id, sizeof(std::uint32_t));
peer.second->write(pinf->node_id().data(), pinf->node_id().size());
std::uint32_t remote_actor_id;
std::uint32_t peer_pid;
process_information::node_id_type peer_node_id;
peer->read(&remote_actor_id, sizeof(remote_actor_id));
peer->read(&peer_pid, sizeof(std::uint32_t));
peer->read(peer_node_id.data(), peer_node_id.size());
auto peer_pinf = new process_information(peer_pid, peer_node_id);
process_information_ptr pinfptr(peer_pinf);
peer.first->read(&remote_actor_id, sizeof(remote_actor_id));
peer.first->read(&peer_pid, sizeof(std::uint32_t));
peer.first->read(peer_node_id.data(), peer_node_id.size());
process_information_ptr pinfptr(new process_information(peer_pid, peer_node_id));
//auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
util::io_stream_ptr_pair io_ptrs(peer, peer);
//detail::singleton_manager::get_network_manager()
//->send_to_mailman(make_any_tuple(util::io_stream_ptr_pair(peer, peer),
// pinfptr));
detail::post_office_add_peer(io_ptrs, pinfptr);
detail::mailman_add_peer(peer, pinfptr);
detail::post_office_add_peer(peer, pinfptr);
return detail::get_actor_proxy_cache().get(remote_actor_id,
pinfptr->process_id(),
pinfptr->node_id());
//auto ptr = get_scheduler()->register_hidden_context();
}
void publish(actor_ptr whom, std::uint16_t port) {
if (whom) publish(whom, detail::ipv4_acceptor::create(port));
}
actor_ptr remote_actor(const char* host, std::uint16_t port) {
// throws on error
util::io_stream_ptr peer = detail::ipv4_io_stream::connect_to(host, port);
util::io_stream_ptr_pair ptrpair(peer, peer);
return remote_actor(ptrpair);
}
} // namespace cppa
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment