Commit 8a3e7944 authored by Dominik Charousset's avatar Dominik Charousset

improved logging & fixed mm bug

this bug fixes a bug where the MM accessed a peer object
after it has been disposed
parent 7feab2ad
......@@ -186,15 +186,19 @@ class proper_actor : public proper_actor_base<Base,
// required by event_based_resume::mixin::resume
bool invoke_message(unique_mailbox_element_pointer& ptr) {
CPPA_LOG_TRACE("");
auto bhvr = this->bhvr_stack().back();
auto mid = this->bhvr_stack().back_id();
return this->invoke_policy().invoke_message(this, ptr, bhvr, mid);
}
bool invoke_message_from_cache() {
CPPA_LOG_TRACE("");
auto bhvr = this->bhvr_stack().back();
auto mid = this->bhvr_stack().back_id();
auto e = this->cache_end();
CPPA_LOG_DEBUG(std::distance(this->cache_begin(), e)
<< " elements in cache");
for (auto i = this->cache_begin(); i != e; ++i) {
if (this->invoke_policy().invoke_message(this, *i, bhvr, mid)) {
this->cache_erase(i);
......@@ -207,6 +211,7 @@ class proper_actor : public proper_actor_base<Base,
// implement pure virtual functions from behavior_stack_based
void become_waiting_for(behavior bhvr, message_id mf) override {
CPPA_LOG_TRACE(CPPA_MARG(mf, integer_value));
if (bhvr.timeout().valid()) {
if (bhvr.timeout().valid()) {
this->reset_timeout();
......@@ -218,6 +223,7 @@ class proper_actor : public proper_actor_base<Base,
}
void do_become(behavior bhvr, bool discard_old) override {
CPPA_LOG_TRACE(CPPA_ARG(discard_old));
//if (discard_old) m_bhvr_stack.pop_async_back();
//m_bhvr_stack.push_back(std::move(bhvr));
if (discard_old) this->m_bhvr_stack.pop_async_back();
......
......@@ -61,7 +61,7 @@ class middleman_event_handler;
typedef intrusive_ptr<input_stream> input_stream_ptr;
typedef intrusive_ptr<output_stream> output_stream_ptr;
/**
* @brief Multiplexes asynchronous IO.
* @note No member function except for @p run_later is safe to call from
......@@ -74,7 +74,7 @@ class middleman {
public:
virtual ~middleman();
/**
* @brief Runs @p fun in the event loop of the middleman.
* @note This member function is thread-safe.
......@@ -119,18 +119,24 @@ class middleman {
* @brief Registers a new peer, i.e., a new node in the network.
*/
virtual void register_peer(const node_id& node, peer* ptr) = 0;
/**
* @brief Returns the peer associated with given node id.
*/
virtual peer* get_peer(const node_id& node) = 0;
/**
* @brief This callback is used by peer_acceptor implementations to
* invoke cleanup code when disposed.
*/
virtual void del_acceptor(peer_acceptor* ptr) = 0;
/**
* @brief This callback is used by peer implementations to
* invoke cleanup code when disposed.
*/
virtual void del_peer(peer* ptr) = 0;
/**
* @brief Delivers a message to given node.
*/
......@@ -143,7 +149,7 @@ class middleman {
* and causes the middleman to disconnect from the node.
*/
virtual void last_proxy_exited(peer* ptr) = 0;
/**
*
*/
......@@ -157,21 +163,21 @@ class middleman {
* @note This member function is thread-safe.
*/
virtual void register_acceptor(const actor_addr& pa, peer_acceptor* ptr) = 0;
/**
* @brief Returns the namespace that contains all remote actors
* connected to this middleman.
*/
inline actor_namespace& get_namespace();
protected:
// creates a middleman instance
static middleman* create_singleton();
// destroys uninitialized instances
inline void dispose() { delete this; }
// destroys an initialized singleton
virtual void destroy() = 0;
......@@ -180,19 +186,18 @@ class middleman {
// each middleman defines its own namespace
actor_namespace m_namespace;
// the node id of this middleman
node_id_ptr m_node;
//
std::unique_ptr<middleman_event_handler> m_handler;
};
inline actor_namespace& middleman::get_namespace() {
return m_namespace;
}
} } // namespace cppa::io
#endif // MIDDLEMAN_HPP
......@@ -51,7 +51,7 @@
namespace cppa { namespace io {
class middleman_impl;
class peer : public extend<continuable>::with<buffered_writing> {
typedef combined_type super;
......@@ -107,6 +107,7 @@ class peer : public extend<continuable>::with<buffered_writing> {
default_message_queue_ptr m_queue;
inline default_message_queue& queue() {
CPPA_REQUIRE(m_queue != nullptr);
return *m_queue;
}
......
......@@ -48,7 +48,7 @@ class basic_memory_cache;
namespace cppa { namespace io {
class middleman;
class sync_request_info : public extend<memory_managed>::with<memory_cached> {
friend class detail::memory;
......@@ -101,7 +101,7 @@ class remote_actor_proxy : public actor_proxy {
void forward_msg(const message_header& hdr, any_tuple msg);
middleman* m_parent;
middleman* m_parent;
intrusive::single_reader_queue<sync_request_info, detail::disposer> m_pending_requests;
};
......
......@@ -94,20 +94,27 @@ class event_based_resume {
auto ptr = d->next_message();
if (ptr) {
CPPA_REQUIRE(!d->bhvr_stack().empty());
bool continue_from_cache = false;
if (d->invoke_message(ptr)) {
continue_from_cache = true;
if (actor_done() && done_cb()) {
CPPA_LOG_DEBUG("actor exited");
return resume_result::done;
}
// continue from cache if current message was
// handled, because the actor might have changed
// its behavior to match 'old' messages now
while (d->invoke_message_from_cache()) {
// rinse and repeat
if (actor_done() && done_cb()) {
CPPA_LOG_DEBUG("actor exited");
return resume_result::done;
}
}
}
// add ptr to cache if invoke_message
// did not reset it (i.e. skipped, but not dropped)
if (ptr) d->push_to_cache(std::move(ptr));
if (ptr) {
CPPA_LOG_DEBUG("add message to cache");
d->push_to_cache(std::move(ptr));
}
}
else {
CPPA_LOG_DEBUG("no more element in mailbox; going to block");
......
......@@ -67,7 +67,7 @@ class not_prioritizing {
}
inline cache_iterator cache_end() {
return m_cache.begin();
return m_cache.end();
}
inline void cache_erase(cache_iterator iter) {
......
......@@ -49,10 +49,7 @@
namespace cppa {
/**
* @ingroup ActorCreation
* @{
*/
namespace detail {
template<class Impl, spawn_options Options, typename BeforeLaunch, typename... Ts>
actor spawn_impl(BeforeLaunch before_launch_fun, Ts&&... args) {
......@@ -134,6 +131,8 @@ struct spawn_fwd<scoped_actor> {
static inline actor fwd(T& arg) { return arg; }
};
// forwards the arguments to spawn_impl, replacing pointers
// to actors with instances of 'actor'
template<class Impl, spawn_options Options, typename BeforeLaunch, typename... Ts>
actor spawn_fwd_args(BeforeLaunch before_launch_fun, Ts&&... args) {
return spawn_impl<Impl, Options>(
......@@ -142,6 +141,13 @@ actor spawn_fwd_args(BeforeLaunch before_launch_fun, Ts&&... args) {
std::forward<Ts>(args))...);
}
} // namespace detail
/**
* @ingroup ActorCreation
* @{
*/
/**
* @brief Spawns an actor of type @p Impl.
* @param args Constructor arguments.
......@@ -151,7 +157,7 @@ actor spawn_fwd_args(BeforeLaunch before_launch_fun, Ts&&... args) {
*/
template<class Impl, spawn_options Options, typename... Ts>
actor spawn(Ts&&... args) {
return spawn_fwd_args<Impl, Options>(
return detail::spawn_fwd_args<Impl, Options>(
[](local_actor*) { /* no-op as BeforeLaunch callback */ },
std::forward<Ts>(args)...);
}
......@@ -190,7 +196,7 @@ actor spawn_in_group(const group_ptr& grp, Ts&&... args) {
detail::functor_based_blocking_actor,
detail::functor_based_actor
>::type;
return spawn_fwd_args<base_class, Options>(
return detail::spawn_fwd_args<base_class, Options>(
[&](local_actor* ptr) { ptr->join(grp); },
std::forward<Ts>(args)...);
}
......@@ -205,7 +211,7 @@ actor spawn_in_group(const group_ptr& grp, Ts&&... args) {
*/
template<class Impl, spawn_options Options, typename... Ts>
actor spawn_in_group(const group_ptr& grp, Ts&&... args) {
return spawn_fwd_args<Impl, Options>(
return detail::spawn_fwd_args<Impl, Options>(
[&](local_actor* ptr) { ptr->join(grp); },
std::forward<Ts>(args)...);
}
......
......@@ -177,7 +177,8 @@ actor_addr abstract_actor::address() const {
void abstract_actor::cleanup(std::uint32_t reason) {
// log as 'actor'
CPPA_LOGM_TRACE("cppa::actor", CPPA_ARG(m_id) << ", " << CPPA_ARG(reason));
CPPA_LOGM_TRACE("cppa::actor", CPPA_ARG(m_id) << ", " << CPPA_ARG(reason)
<< ", " << CPPA_ARG(m_is_proxy));
CPPA_REQUIRE(reason != exit_reason::not_exited);
// move everyhting out of the critical section before processing it
decltype(m_links) mlinks;
......@@ -207,7 +208,7 @@ void abstract_actor::cleanup(std::uint32_t reason) {
aptr->enqueue({address(), aptr, message_id{}.with_high_priority()}, msg);
}
CPPA_LOGM_DEBUG("cppa::actor", "run " << mattachables.size()
<< "attachables");
<< " attachables");
for (attachable_ptr& ptr : mattachables) {
ptr->actor_exited(reason);
}
......
......@@ -145,8 +145,9 @@ class middleman_impl : public middleman {
m_queue.enqueue(new middleman_event(move(fun)));
atomic_thread_fence(memory_order_seq_cst);
uint8_t dummy = 0;
auto res = ::write(m_pipe_write, &dummy, sizeof(dummy));
// ignore result; write error only means middleman already exited
static_cast<void>(::write(m_pipe_write, &dummy, sizeof(dummy)));
static_cast<void>(res);
}
void register_peer(const node_id& node, peer* ptr) override {
......@@ -172,6 +173,7 @@ class middleman_impl : public middleman {
CPPA_LOG_TRACE(CPPA_TARG(node, to_string));
auto i = m_peers.find(node);
if (i != m_peers.end()) {
CPPA_REQUIRE(i->second.impl != nullptr);
CPPA_LOG_DEBUG("result = " << i->second.impl);
return i->second.impl;
}
......@@ -210,23 +212,15 @@ class middleman_impl : public middleman {
void last_proxy_exited(peer* pptr) override {
CPPA_REQUIRE(pptr != nullptr);
CPPA_REQUIRE(pptr->m_queue != nullptr);
CPPA_LOG_TRACE(CPPA_ARG(pptr)
<< ", pptr->node() = " << to_string(pptr->node()));
if (pptr->erase_on_last_proxy_exited() && pptr->queue().empty()) {
stop_reader(pptr);
auto i = m_peers.find(pptr->node());
if (i != m_peers.end()) {
CPPA_LOG_DEBUG_IF(i->second.impl != pptr,
"node " << to_string(pptr->node())
<< " does not exist in m_peers");
if (i->second.impl == pptr) {
m_peers.erase(i);
}
}
del_peer(pptr);
}
}
void new_peer(const input_stream_ptr& in,
const output_stream_ptr& out,
const node_id_ptr& node = nullptr) override {
......@@ -236,6 +230,19 @@ class middleman_impl : public middleman {
if (node) register_peer(*node, ptr);
}
void del_peer(peer* pptr) override {
CPPA_LOG_TRACE(CPPA_ARG(pptr));
auto i = m_peers.find(pptr->node());
if (i != m_peers.end()) {
CPPA_LOG_DEBUG_IF(i->second.impl != pptr,
"node " << to_string(pptr->node())
<< " does not exist in m_peers");
if (i->second.impl == pptr) {
m_peers.erase(i);
}
}
}
void register_acceptor(const actor_addr& whom, peer_acceptor* ptr) override {
run_later([=] {
CPPA_LOGC_TRACE("cppa::io::middleman",
......
......@@ -257,9 +257,10 @@ void peer::kill_proxy(const actor_addr& sender,
send_as(proxy, proxy, atom("KILL_PROXY"), reason);
}
else {
CPPA_LOG_INFO("received KILL_PROXY message but "
"didn't found a matching instance "
"in proxy cache");
CPPA_LOG_INFO("received KILL_PROXY for " << aid
<< ":" << to_string(*node)
<< "but didn't found a matching instance "
<< "in proxy cache");
}
}
......@@ -378,7 +379,9 @@ void peer::enqueue(const message_header& hdr, const any_tuple& msg) {
}
void peer::dispose() {
CPPA_LOG_TRACE(CPPA_ARG(this));
parent()->get_namespace().erase(*m_node);
parent()->del_peer(this);
delete this;
}
......
......@@ -131,6 +131,7 @@ void remote_actor_proxy::forward_msg(const message_header& hdr, any_tuple msg) {
}
void remote_actor_proxy::enqueue(const message_header& hdr, any_tuple msg) {
CPPA_REQUIRE(m_parent != nullptr);
CPPA_LOG_TRACE(CPPA_TARG(hdr, to_string) << ", " << CPPA_TARG(msg, to_string));
auto& arr = detail::static_types_array<atom_value, uint32_t>::arr;
if ( msg.size() == 2
......
......@@ -59,8 +59,8 @@ void scoped_actor::init(bool hidden) {
m_self.reset(alloc());
if (!m_hidden) {
get_actor_registry()->inc_running();
m_prev = CPPA_SET_AID(m_self->id());
}
m_prev = CPPA_SET_AID(m_self->id());
}
......@@ -75,8 +75,8 @@ scoped_actor::scoped_actor(bool hidden) {
scoped_actor::~scoped_actor() {
if (!m_hidden) {
get_actor_registry()->dec_running();
CPPA_SET_AID(m_prev);
}
CPPA_SET_AID(m_prev);
}
} // namespace cppa
......@@ -45,7 +45,10 @@ void spawn5_server_impl(untyped_actor* self, actor client, group_ptr grp) {
if (vec.size() != 5) {
CPPA_PRINTERR("remote client did not spawn five reflectors!");
}
for (auto& a : vec) self->monitor(a);
for (auto& a : vec) {
CPPA_PRINT("monitor actor: " << to_string(a));
self->monitor(a);
}
},
others() >> [=] {
CPPA_UNEXPECTED_MSG();
......@@ -78,13 +81,13 @@ void spawn5_server_impl(untyped_actor* self, actor client, group_ptr grp) {
},
others() >> [=] {
CPPA_UNEXPECTED_MSG();
self->quit(exit_reason::unhandled_exception);
//self->quit(exit_reason::unhandled_exception);
},
after(chrono::seconds(2)) >> [=] {
CPPA_UNEXPECTED_TOUT();
CPPA_LOGF_ERROR("did only receive " << *downs
<< " down messages");
self->quit(exit_reason::unhandled_exception);
//self->quit(exit_reason::unhandled_exception);
}
);
}
......@@ -93,7 +96,7 @@ void spawn5_server_impl(untyped_actor* self, actor client, group_ptr grp) {
CPPA_UNEXPECTED_TOUT();
CPPA_LOGF_ERROR("did only receive " << *replies
<< " responses to 'Hello reflectors!'");
self->quit(exit_reason::unhandled_exception);
//self->quit(exit_reason::unhandled_exception);
}
);
});
......@@ -322,11 +325,17 @@ int main(int argc, char** argv) {
announce_tuple<atom_value, atom_value, int>();
string app_path = argv[0];
bool run_remote_actor = true;
bool run_as_server = false;
if (argc > 1) {
if (strcmp(argv[1], "run_remote_actor=false") == 0) {
CPPA_LOGF_INFO("don't run remote actor");
run_remote_actor = false;
}
else if (strcmp(argv[1], "run_as_server") == 0) {
CPPA_LOGF_INFO("don't run remote actor");
run_remote_actor = false;
run_as_server = true;
}
else {
run_client_part(get_kv_pairs(argc, argv), [](uint16_t port) {
scoped_actor self;
......@@ -351,8 +360,6 @@ int main(int argc, char** argv) {
return CPPA_TEST_RESULT();
}
}
CPPA_TEST(test_remote_actor);
thread child;
{ // lifetime scope of self
scoped_actor self;
auto serv = self->spawn<server, monitored>();
......@@ -370,34 +377,40 @@ int main(int argc, char** argv) {
}
}
while (!success);
ostringstream oss;
if (run_remote_actor) {
oss << app_path << " run=remote_actor port=" << port << " &>/dev/null";
// execute client_part() in a separate process,
// connected via localhost socket
child = thread([&oss]() {
CPPA_LOGC_TRACE("NONE", "main$thread_launcher", "");
string cmdstr = oss.str();
if (system(cmdstr.c_str()) != 0) {
CPPA_PRINTERR("FATAL: command \"" << cmdstr << "\" failed!");
abort();
do {
CPPA_TEST(test_remote_actor);
thread child;
ostringstream oss;
if (run_remote_actor) {
oss << app_path << " run=remote_actor port=" << port << " &>/dev/null";
// execute client_part() in a separate process,
// connected via localhost socket
child = thread([&oss]() {
CPPA_LOGC_TRACE("NONE", "main$thread_launcher", "");
string cmdstr = oss.str();
if (system(cmdstr.c_str()) != 0) {
CPPA_PRINTERR("FATAL: command \"" << cmdstr << "\" failed!");
abort();
}
});
}
});
else { CPPA_PRINT("actor published at port " << port); }
CPPA_CHECKPOINT();
self->receive (
on(atom("DOWN"), arg_match) >> [&](uint32_t rsn) {
CPPA_CHECK_EQUAL(self->last_sender(), serv);
CPPA_CHECK_EQUAL(rsn, exit_reason::normal);
}
);
// wait until separate process (in sep. thread) finished execution
CPPA_CHECKPOINT();
if (run_remote_actor) child.join();
CPPA_CHECKPOINT();
self->await_all_other_actors_done();
}
else { CPPA_PRINT("actor published at port " << port); }
CPPA_CHECKPOINT();
self->receive (
on(atom("DOWN"), arg_match) >> [&](uint32_t rsn) {
CPPA_CHECK_EQUAL(self->last_sender(), serv);
CPPA_CHECK_EQUAL(rsn, exit_reason::normal);
}
);
while (run_as_server);
} // lifetime scope of self
// wait until separate process (in sep. thread) finished execution
await_all_actors_done();
CPPA_CHECKPOINT();
if (run_remote_actor) child.join();
CPPA_CHECKPOINT();
shutdown();
return CPPA_TEST_RESULT();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment