Commit 627cec59 authored by neverlord's avatar neverlord

fixed mailman implementation

parent 2583d715
......@@ -201,8 +201,6 @@ cppa/util/any_tuple_iterator.hpp
src/any_tuple_iterator.cpp
cppa/detail/boxed.hpp
cppa/detail/unboxed.hpp
cppa/detail/matcher_arguments.hpp
src/matcher_arguments.cpp
src/invoke_rules.cpp
src/abstract_tuple.cpp
cppa/util/duration.hpp
......
......@@ -172,6 +172,8 @@ class actor : public channel
*/
static intrusive_ptr<actor> by_id(std::uint32_t actor_id);
inline bool is_proxy() const { return m_is_proxy; }
};
/**
......
......@@ -32,8 +32,9 @@ class actor_proxy_cache
public:
// this callback is called if a new proxy instance is created
template<typename F>
void set_callback(F&& cb)
void set_new_proxy_callback(F&& cb)
{
m_new_cb = std::forward<F>(cb);
}
......
......@@ -17,6 +17,13 @@ class post_office_msg
public:
enum msg_type
{
add_peer_type,
add_server_socket_type,
proxy_exited_type
};
struct add_peer
{
......@@ -42,6 +49,12 @@ class post_office_msg
};
struct proxy_exited
{
actor_proxy_ptr proxy_ptr;
inline proxy_exited(const actor_proxy_ptr& who) : proxy_ptr(who) { }
};
post_office_msg(native_socket_t arg0,
const process_information_ptr& arg1,
const actor_proxy_ptr& arg2,
......@@ -49,14 +62,21 @@ class post_office_msg
post_office_msg(native_socket_t arg0, const actor_ptr& arg1);
post_office_msg(const actor_proxy_ptr& proxy_ptr);
inline bool is_add_peer_msg() const
{
return m_is_add_peer_msg;
return m_type == add_peer_type;
}
inline bool is_add_server_socket_msg() const
{
return !m_is_add_peer_msg;
return m_type == add_server_socket_type;
}
inline bool is_proxy_exited_msg() const
{
return m_type == proxy_exited_type;
}
inline add_peer& as_add_peer_msg()
......@@ -69,27 +89,32 @@ class post_office_msg
return m_add_server_socket;
}
inline proxy_exited& as_proxy_exited_msg()
{
return m_proxy_exited;
}
~post_office_msg();
private:
post_office_msg* next;
bool m_is_add_peer_msg;
msg_type m_type;
union
{
add_peer m_add_peer_msg;
add_server_socket m_add_server_socket;
proxy_exited m_proxy_exited;
};
};
constexpr std::uint32_t rd_queue_event = 0x00;
constexpr std::uint32_t unpublish_actor_event = 0x01;
constexpr std::uint32_t dec_socket_ref_event = 0x02;
constexpr std::uint32_t close_socket_event = 0x03;
constexpr std::uint32_t shutdown_event = 0x04;
constexpr std::uint32_t close_socket_event = 0x02;
constexpr std::uint32_t shutdown_event = 0x03;
typedef std::uint32_t pipe_msg[2];
constexpr size_t pipe_msg_size = 2 * sizeof(std::uint32_t);
......
......@@ -38,22 +38,27 @@ void actor_proxy::enqueue(actor* sender, any_tuple&& msg)
void actor_proxy::enqueue(actor* sender, const any_tuple& msg)
{
/*
if (msg.size() > 0 && msg.utype_info_at(0) == typeid(atom_value))
{
if (msg.size() == 2 && msg.utype_info_at(1) == typeid(actor_ptr))
{
switch (to_int(msg.get_as<atom_value>(0)))
{
// received via post_office
case to_int(atom(":Link")):
{
auto s = msg.get_as<actor_ptr>(1);
link_to(s);
(void) link_to_impl(s);
//link_to(s);
return;
}
// received via post_office
case to_int(atom(":Unlink")):
{
auto s = msg.get_as<actor_ptr>(1);
unlink_from(s);
(void) unlink_from_impl(s);
//unlink_from(s);
return;
}
default: break;
......@@ -67,6 +72,15 @@ void actor_proxy::enqueue(actor* sender, const any_tuple& msg)
return;
}
}
*/
if ( msg.size() == 2
&& msg.utype_info_at(0) == typeid(atom_value)
&& msg.get_as<atom_value>(0) == atom(":KillProxy")
&& msg.utype_info_at(1) == typeid(std::uint32_t))
{
cleanup(msg.get_as<std::uint32_t>(1));
return;
}
forward_message(parent_process_ptr(), sender, msg);
}
......
......@@ -56,11 +56,7 @@ mailman_job::~mailman_job()
/*
// implemented in post_office.cpp
util::single_reader_queue<mailman_job>& mailman_queue()
{
return *s_queue;
//return *(s_mailman_manager.m_queue);
}
util::single_reader_queue<mailman_job>& mailman_queue();
*/
// known issues: send() should be asynchronous and select() should be used
......
......@@ -17,6 +17,7 @@
#include <sys/types.h>
// used cppa classes
#include "cppa/atom.hpp"
#include "cppa/to_string.hpp"
#include "cppa/deserializer.hpp"
#include "cppa/binary_deserializer.hpp"
......@@ -37,13 +38,19 @@
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/addressed_message.hpp"
//#define DEBUG(arg) std::cout << arg << std::endl
/*
#define DEBUG(arg) \
std::cout << "[process id: " \
<< cppa::process_information::get()->process_id() \
<< "] " << arg << std::endl
*/
#define DEBUG(unused) ((void) 0)
using std::cerr;
using std::endl;
namespace cppa { namespace detail { namespace {
namespace {
// allocate in 1KB chunks (minimize reallocations)
constexpr size_t s_chunk_size = 1024;
......@@ -54,12 +61,12 @@ constexpr size_t s_max_buffer_size = (1024 * 1024);
static_assert((s_max_buffer_size % s_chunk_size) == 0,
"max_buffer_size is not a multiple of chunk_size");
static_assert(sizeof(native_socket_t) == sizeof(std::uint32_t),
static_assert(sizeof(cppa::detail::native_socket_t) == sizeof(std::uint32_t),
"sizeof(native_socket_t) != sizeof(std::uint32_t)");
constexpr int s_rdflag = MSG_DONTWAIT;
} } } // namespace cppa::detail::<anonmyous>
} // namespace <anonmyous>
namespace cppa { namespace detail {
......@@ -68,107 +75,12 @@ util::single_reader_queue<mailman_job>& mailman_queue()
return singleton_manager::get_network_manager()->mailman_queue();
}
} } // namespace cppa::detail
namespace cppa { namespace detail { namespace {
class po_doorman;
class post_office_worker
// represents a TCP connection to another peer
class po_peer
{
size_t m_rc;
native_socket_t m_parent;
post_office_worker(const post_office_worker&) = delete;
post_office_worker& operator=(const post_office_worker&) = delete;
protected:
native_socket_t m_socket;
// caches process_information::get()
process_information_ptr m_pself;
post_office_worker(native_socket_t fd, native_socket_t parent_fd = -1)
: m_rc((parent_fd != -1) ? 1 : 0)
, m_parent(parent_fd)
, m_socket(fd)
, m_pself(process_information::get())
{
}
post_office_worker(post_office_worker&& other)
: m_rc(other.m_rc)
, m_parent(other.m_parent)
, m_socket(other.m_socket)
, m_pself(process_information::get())
{
other.m_rc = 0;
other.m_socket = -1;
other.m_parent = -1;
}
public:
inline size_t ref_count() const
{
return m_rc;
}
inline void inc_ref_count()
{
++m_rc;
}
inline size_t dec_ref_count()
{
if (m_rc == 0)
{
throw std::underflow_error("post_office_worker::dec_ref_count()");
}
return --m_rc;
}
inline native_socket_t get_socket()
{
return m_socket;
}
inline bool has_parent() const
{
return m_parent != -1;
}
inline native_socket_t parent() const
{
return m_parent;
}
// @returns new reference count
size_t parent_exited(native_socket_t parent_socket)
{
if (has_parent() && parent() == parent_socket)
{
m_parent = -1;
return dec_ref_count();
}
return ref_count();
}
virtual ~post_office_worker()
{
if (m_socket != -1)
{
closesocket(m_socket);
}
}
};
class po_peer : public post_office_worker
{
typedef post_office_worker super;
enum state
{
// connection just established; waiting for process information
......@@ -180,6 +92,13 @@ class po_peer : public post_office_worker
};
state m_state;
// TCP socket to remote peer
native_socket_t m_socket;
// TCP socket identifying our parent (that accepted m_socket)
native_socket_t m_parent_socket;
// caches process_information::get()
process_information_ptr m_pself;
// the process information or our remote peer
process_information_ptr m_peer;
std::unique_ptr<attachable> m_observer;
buffer<s_chunk_size, s_max_buffer_size> m_rdbuf;
......@@ -190,60 +109,117 @@ class po_peer : public post_office_worker
public:
explicit po_peer(post_office_msg::add_peer& from)
: super(from.sockfd)
, m_state(wait_for_msg_size)
: m_state(wait_for_msg_size)
, m_socket(from.sockfd)
, m_parent_socket(-1)
, m_pself(process_information::get())
, m_peer(std::move(from.peer))
, m_observer(std::move(from.attachable_ptr))
, m_meta_msg(uniform_typeid<any_tuple>())
, m_meta_msg(uniform_typeid<detail::addressed_message>())
{
}
explicit po_peer(native_socket_t sockfd, native_socket_t parent_socket)
: super(sockfd, parent_socket)
, m_state(wait_for_process_info)
, m_meta_msg(uniform_typeid<any_tuple>())
: m_state(wait_for_process_info)
, m_socket(sockfd)
, m_parent_socket(parent_socket)
, m_pself(process_information::get())
, m_meta_msg(uniform_typeid<detail::addressed_message>())
{
m_rdbuf.reset( sizeof(std::uint32_t)
+ process_information::node_id_size);
}
po_peer(po_peer&& other)
: super(std::move(other))
, m_state(other.m_state)
: m_state(other.m_state)
, m_socket(other.m_socket)
, m_parent_socket(other.m_parent_socket)
, m_pself(process_information::get())
, m_peer(std::move(other.m_peer))
, m_observer(std::move(other.m_observer))
, m_rdbuf(std::move(other.m_rdbuf))
, m_children(std::move(other.m_children))
, m_meta_msg(uniform_typeid<addressed_message>())
, m_meta_msg(other.m_meta_msg)
{
other.m_socket = -1;
other.m_parent_socket = -1;
// other.m_children.clear();
}
native_socket_t get_socket() const
{
return m_socket;
}
// returns true if @p pod is the parent of this
inline bool parent_exited(const po_doorman& pod);
void add_child(const actor_proxy_ptr& pptr)
{
m_children.push_back(pptr);
if (pptr) m_children.push_back(pptr);
else
{
DEBUG("po_peer::add_child(nullptr) called");
}
}
inline size_t children_count() const
{
return m_children.size();
}
inline bool has_parent() const
{
return m_parent_socket != -1;
}
// removes pptr from the list of children and returns
// a <bool, size_t> pair, whereas: first = true if pptr is a child of this
// second = number of remaining children
std::pair<bool, size_t> remove_child(const actor_proxy_ptr& pptr)
{
auto end = m_children.end();
auto i = std::find(m_children.begin(), end, pptr);
if (i != end)
{
m_children.erase(i);
return { true, m_children.size() };
}
return { false, m_children.size() };
}
~po_peer()
{
if (!m_children.empty())
{
auto msg = make_tuple(atom(":KillProxy"),
exit_reason::remote_link_unreachable);
for (actor_proxy_ptr& pptr : m_children)
{
pptr->enqueue(self(),
make_tuple(atom(":KillProxy"),
exit_reason::remote_link_unreachable));
pptr->enqueue(nullptr, msg);
}
}
if (m_socket != -1) closesocket(m_socket);
}
// @returns false if an error occured; otherwise true
bool read_and_continue()
{
static constexpr size_t wfp_size = sizeof(std::uint32_t)
+ process_information::node_id_size;
switch (m_state)
{
case wait_for_process_info:
{
if (!m_rdbuf.append_from(m_socket, s_rdflag)) return false;
if (m_rdbuf.final_size() != wfp_size)
{
m_rdbuf.reset(wfp_size);
}
if (m_rdbuf.append_from(m_socket, s_rdflag) == false)
{
return false;
}
if (m_rdbuf.ready() == false)
{
break;
......@@ -263,9 +239,9 @@ class po_peer : public post_office_worker
m_rdbuf.reset();
m_state = wait_for_msg_size;
DEBUG("pinfo read: "
<< m_peer->process_id
<< m_peer->process_id()
<< "@"
<< to_string(m_peer->node_id));
<< to_string(m_peer->node_id()));
// fall through and try to read more from socket
}
}
......@@ -315,24 +291,27 @@ class po_peer : public post_office_worker
return false;
}
auto& content = msg.content();
if ( content.size() == 2
DEBUG("<-- " << to_string(content));
if ( content.size() == 1
&& content.utype_info_at(0) == typeid(atom_value)
&& content.get_as<atom_value>(0) == atom(":Monitor")
&& content.utype_info_at(1) == typeid(actor_ptr))
&& content.get_as<atom_value>(0) == atom(":Monitor"))
{
/*
actor_ptr sender = content.get_as<actor_ptr>(1);
if (sender->parent_process() == *process_information::get())
auto receiver_ch = msg.receiver();
actor_ptr receiver = dynamic_cast<actor*>(receiver_ch.get());
if (receiver->parent_process() == *process_information::get())
{
//cout << pinfo << " ':Monitor'; actor id = "
// << sender->id() << endl;
// local actor?
// this message was send from a proxy
sender->attach_functor([=](std::uint32_t reason)
receiver->attach_functor([=](std::uint32_t reason)
{
any_tuple kmsg = make_tuple(atom(":KillProxy"),
reason);
auto mjob = new detail::mailman_job(m_peer, kmsg);
auto mjob = new detail::mailman_job(m_peer,
receiver,
receiver,
kmsg);
detail::mailman_queue().push_back(mjob);
});
}
......@@ -340,13 +319,9 @@ class po_peer : public post_office_worker
{
DEBUG(":Monitor received for an remote actor");
}
*/
cerr << "NOT IMPLEMENTED YET; post_office line "
<< __LINE__ << endl;
}
else
{
DEBUG("<-- " << to_string(content));
msg.receiver()->enqueue(msg.sender().get(),
std::move(msg.content()));
}
......@@ -364,31 +339,45 @@ class po_peer : public post_office_worker
};
class po_doorman : public post_office_worker
// accepts new connections to a published actor
class po_doorman
{
typedef post_office_worker super;
// server socket
native_socket_t m_socket;
actor_ptr published_actor;
std::list<po_peer>* m_peers;
// caches process_information::get()
process_information_ptr m_pself;
public:
explicit po_doorman(post_office_msg::add_server_socket& assm,
std::list<po_peer>* peers)
: super(assm.server_sockfd)
: m_socket(assm.server_sockfd)
, published_actor(assm.published_actor)
, m_peers(peers)
, m_pself(process_information::get())
{
}
~po_doorman()
{
if (m_socket != -1) closesocket(m_socket);
}
po_doorman(po_doorman&& other)
: super(std::move(other))
: m_socket(other.m_socket)
, published_actor(std::move(other.published_actor))
, m_peers(other.m_peers)
, m_pself(process_information::get())
{
other.m_socket = -1;
}
inline native_socket_t get_socket() const
{
return m_socket;
}
// @returns false if an error occured; otherwise true
......@@ -426,6 +415,16 @@ class po_doorman : public post_office_worker
};
inline bool po_peer::parent_exited(const po_doorman& pod)
{
if (m_parent_socket == pod.get_socket())
{
m_parent_socket = -1;
return true;
}
return false;
}
// starts and stops mailman_loop
struct mailman_worker
{
......@@ -440,10 +439,6 @@ struct mailman_worker
}
};
} } } // namespace cppa::detail::<anonmyous>
namespace cppa { namespace detail {
void post_office_loop(int pipe_read_handle, int pipe_write_handle)
{
mailman_worker mworker;
......@@ -453,56 +448,44 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
std::list<po_peer> peers;
// readset for select()
fd_set readset;
// maximum number of all socket descriptors
// maximum number of all socket descriptors for select()
int maxfd = 0;
// initialize variables
// initialize variables for select()
FD_ZERO(&readset);
maxfd = pipe_read_handle;
FD_SET(pipe_read_handle, &readset);
// keeps track about what peer we are iterating at this time
// keeps track about what peer we are iterating at the moment
po_peer* selected_peer = nullptr;
// thread id of post_office
// our thread id
auto thread_id = this_thread::get_id();
// if an actor calls its quit() handler in this thread,
// we 'catch' the released socket here
std::vector<native_socket_t> released_socks;
// our event queue
auto& msg_queue = singleton_manager::get_network_manager()->post_office_queue();
// functor that releases a socket descriptor
// returns true if an element was removed from peers
auto release_socket = [&](native_socket_t sockfd)
{
auto end = peers.end();
auto i = std::find_if(peers.begin(), end, [sockfd](po_peer& pp) -> bool
{
return pp.get_socket() == sockfd;
});
if (i != end && i->dec_ref_count() == 0) peers.erase(i);
};
auto pself = process_information::get();
// needed for lookups in our proxy cache
actor_proxy_cache::key_tuple proxy_cache_key ( 0, // set on lookup
pself->process_id(),
pself->node_id() );
// initialize proxy cache
get_actor_proxy_cache().set_callback([&](actor_proxy_ptr& pptr)
get_actor_proxy_cache().set_new_proxy_callback([&](actor_proxy_ptr& pptr)
{
pptr->enqueue(nullptr, make_tuple(atom(":Monitor"), pptr));
DEBUG("new_proxy_callback, actor id = " << pptr->id());
// it's ok to access objects on the stack, since this callback
// is guaranteed to be executed in the same thread
if (selected_peer == nullptr)
{
if (!pptr) DEBUG("pptr == nullptr");
throw std::logic_error("selected_peer == nullptr");
}
pptr->enqueue(nullptr, make_tuple(atom(":Monitor")));
selected_peer->add_child(pptr);
selected_peer->inc_ref_count();
auto msock = selected_peer->get_socket();
pptr->attach_functor([msock, thread_id,
&released_socks,
pipe_write_handle] (std::uint32_t)
auto aid = pptr->id();
auto pptr_copy = pptr;
pptr->attach_functor([&msg_queue,aid,pipe_write_handle,pptr_copy] (std::uint32_t)
{
if (this_thread::get_id() == thread_id)
{
released_socks.push_back(msock);
}
else
{
pipe_msg msg = { dec_socket_ref_event,
static_cast<std::uint32_t>(msock) };
write(pipe_write_handle, msg, pipe_msg_size);
}
// this callback is not guaranteed to be executed in the same thread
msg_queue.push_back(new post_office_msg(pptr_copy));
pipe_msg msg = { rd_queue_event, 0 };
write(pipe_write_handle, msg, pipe_msg_size);
});
});
for (;;)
......@@ -513,7 +496,7 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
perror("select()");
exit(3);
}
// iterate over all peers and remove peers on error
// iterate over all peers and remove peers on errors
peers.remove_if([&](po_peer& peer) -> bool
{
if (FD_ISSET(peer.get_socket(), &readset))
......@@ -525,6 +508,7 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
});
selected_peer = nullptr;
// iterate over all doormen (accept new connections)
// and remove doormen on errors
for (auto& kvp : doormen)
{
// iterate over all doormen and remove doormen on error
......@@ -549,23 +533,21 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
post_office_msg* pom = msg_queue.pop();
if (pom->is_add_peer_msg())
{
//DEBUG("pom->is_add_peer_msg()");
DEBUG("add_peer_msg");
auto& apm = pom->as_add_peer_msg();
actor_proxy_ptr pptr = apm.first_peer_actor;
po_peer pd(apm);
selected_peer = &pd;
peers.push_back(po_peer(apm));
selected_peer = &(peers.back());
if (pptr)
{
DEBUG("proxy added via post_office_msg");
get_actor_proxy_cache().add(pptr);
}
selected_peer = nullptr;
peers.push_back(std::move(pd));
DEBUG("new peer (remote_actor)");
}
else
else if (pom->is_add_server_socket_msg())
{
//DEBUG("pom->is_add_peer_msg() == false");
DEBUG("add_server_socket_msg");
auto& assm = pom->as_add_server_socket_msg();
auto& pactor = assm.published_actor;
if (pactor)
......@@ -588,6 +570,46 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
DEBUG("nullptr published");
}
}
else if (pom->is_proxy_exited_msg())
{
DEBUG("proxy_exited_msg");
auto pptr = std::move(pom->as_proxy_exited_msg().proxy_ptr);
if (pptr)
{
// get parent of pptr
auto i = peers.begin();
auto end = peers.end();
DEBUG("search parent of exited proxy");
while (i != end)
{
auto result = i->remove_child(pptr);
if (result.first) // true if pptr is a child
{
DEBUG("found parent of proxy");
if (result.second == 0) // no more children?
{
// disconnect peer if we don't know any
// actor of it and if the published
// actor already exited
// (this is the case if the peer doesn't
// have a parent)
if (i->has_parent() == false)
{
DEBUG("removed peer");
peers.erase(i);
}
}
i = end; // done
}
else
{
DEBUG("... next iteration");
++i; // next iteration
}
}
}
else DEBUG("pptr == nullptr");
}
delete pom;
break;
}
......@@ -597,26 +619,26 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
auto kvp = doormen.find(pmsg[1]);
if (kvp != doormen.end())
{
// decrease ref count of all children of this doorman
DEBUG("erase doorman from map");
for (po_doorman& dm : kvp->second)
{
auto parent_fd = dm.get_socket();
peers.remove_if([parent_fd](po_peer& ppeer) -> bool
// remove peers with no children and no parent
// (that are peers that connected to an already
// exited actor and where we don't know any
// actor from)
peers.remove_if([&](po_peer& ppeer)
{
return ppeer.parent_exited(parent_fd) == 0;
return ppeer.parent_exited(dm)
&& ppeer.children_count() == 0;
});
}
doormen.erase(kvp);
}
break;
}
case dec_socket_ref_event:
{
release_socket(static_cast<native_socket_t>(pmsg[1]));
break;
}
case close_socket_event:
{
DEBUG("close_socket_event");
auto sockfd = static_cast<native_socket_t>(pmsg[1]);
auto end = peers.end();
auto i = std::find_if(peers.begin(), end,
......@@ -640,14 +662,6 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
}
}
}
if (released_socks.empty() == false)
{
for (native_socket_t sockfd : released_socks)
{
release_socket(sockfd);
}
released_socks.clear();
}
// recalculate readset
FD_ZERO(&readset);
FD_SET(pipe_read_handle, &readset);
......@@ -658,7 +672,7 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
if (fd > maxfd) maxfd = fd;
FD_SET(fd, &readset);
}
// iterate over key (actor id) value (doormen) pairs
// iterate over key-value (actor id / doormen) pairs
for (auto& kvp : doormen)
{
// iterate over values (doormen)
......@@ -672,6 +686,11 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
}
}
/******************************************************************************
* remaining implementation of post_office.hpp *
* (forward each function call to our queue) *
******************************************************************************/
void post_office_add_peer(native_socket_t a0,
const process_information_ptr& a1,
const actor_proxy_ptr& a2,
......
......@@ -25,27 +25,45 @@ post_office_msg::post_office_msg(native_socket_t arg0,
const actor_proxy_ptr& arg2,
std::unique_ptr<attachable>&& arg3)
: next(nullptr)
, m_is_add_peer_msg(true)
, m_type(add_peer_type)
{
new (&m_add_peer_msg) add_peer(arg0, arg1, arg2, std::move(arg3));
}
post_office_msg::post_office_msg(native_socket_t arg0, const actor_ptr& arg1)
: next(nullptr)
, m_is_add_peer_msg(false)
, m_type(add_server_socket_type)
{
new (&m_add_server_socket) add_server_socket(arg0, arg1);
}
post_office_msg::post_office_msg(const actor_proxy_ptr& proxy_ptr)
: next(nullptr)
, m_type(proxy_exited_type)
{
new (&m_proxy_exited) proxy_exited(proxy_ptr);
}
post_office_msg::~post_office_msg()
{
if (m_is_add_peer_msg)
{
m_add_peer_msg.~add_peer();
}
else
switch (m_type)
{
m_add_server_socket.~add_server_socket();
case add_peer_type:
{
m_add_peer_msg.~add_peer();
break;
}
case add_server_socket_type:
{
m_add_server_socket.~add_server_socket();
break;
}
case proxy_exited_type:
{
m_proxy_exited.~proxy_exited();
break;
}
default: throw std::logic_error("invalid post_office_msg type");
}
}
......
......@@ -88,7 +88,7 @@ void publish(actor_ptr& whom, std::uint16_t port)
{
throw network_exception("could not create server socket");
}
// closes the socket if an exception occurs
// sguard closes the socket if an exception occurs
socket_guard sguard(sockfd);
memset((char*) &serv_addr, 0, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
......
......@@ -649,7 +649,7 @@ class uniform_type_info_map_helper
insert(d, new channel_ptr_tinfo, { raw_name<channel_ptr>() });
//insert(d, new message_tinfo, { raw_name<any_tuple>() });
insert(d, new atom_value_tinfo, { raw_name<atom_value>() });
insert(d, new addr_msg_tinfo, { raw_name<addr_msg_tinfo>() });
insert(d, new addr_msg_tinfo, {raw_name<detail::addressed_message>()});
insert<float>(d);
insert<cppa::util::void_type>(d);
if (sizeof(double) == sizeof(long double))
......
......@@ -42,7 +42,6 @@ void client_part(const std::map<std::string, std::string>& args)
size_t test__remote_actor(const char* app_path, bool is_client,
const std::map<std::string, std::string>& args)
{
return 0;
if (is_client)
{
client_part(args);
......@@ -70,7 +69,7 @@ size_t test__remote_actor(const char* app_path, bool is_client,
std::string cmd;
{
std::ostringstream oss;
oss << app_path << " run=remote_actor port=" << port << " &>/dev/null";
oss << app_path << " run=remote_actor port=" << port << " &>remote.txt" ;//" &>/dev/null";
cmd = oss.str();
}
// execute client_part() in a separate process,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment