Commit eb2b833a authored by Dominik Charousset's avatar Dominik Charousset

auto-reply EXITED to orphaned sync requests

this patch enables libcppa's runtime system to automatically reply 'EXITED'
messages to synchronous request messages if the receiver has already
finished execution; this is essential to make timeout definitions
for synchronous receive operations optional
parent e149348b
......@@ -199,6 +199,7 @@ examples/type_system/announce_2.cpp
examples/type_system/announce_3.cpp
examples/type_system/announce_4.cpp
examples/type_system/announce_5.cpp
src/abstract_scheduled_actor.cpp
src/abstract_tuple.cpp
src/actor.cpp
src/actor_addressing.cpp
......
......@@ -35,6 +35,8 @@
#include "cppa/weak_intrusive_ptr.hpp"
#include "cppa/enable_weak_ptr_mixin.hpp"
#include "cppa/network/message_header.hpp"
namespace cppa {
class actor_proxy_cache;
......@@ -59,6 +61,17 @@ class actor_proxy : public enable_weak_ptr_mixin<actor> {
*/
virtual void local_unlink_from(const actor_ptr& other) = 0;
/**
* @brief Delivers given message via this proxy instance.
*
* This function is meant to give the proxy the opportunity to keep track
* of synchronous communication or perform other bookkeeping if needed.
* The member function is called by the protocol from inside the
* middleman's thread.
* @note This function is guaranteed to be called non-concurrently.
*/
virtual void deliver(const network::message_header& hdr, any_tuple msg) = 0;
protected:
actor_proxy(actor_id mid);
......
......@@ -57,50 +57,41 @@ namespace cppa { class self_type; }
namespace cppa { namespace detail {
template<class Base, bool IsLocalActor>
class abstract_actor_base : public Base {
protected:
template<typename... Args>
abstract_actor_base(Args&&... args) : Base(std::forward<Args>(args)...) { }
inline void base_cleanup(std::uint32_t) { }
};
template<class Base>
class abstract_actor_base<Base, true> : public Base {
protected:
template<typename... Args>
abstract_actor_base(Args&&... args) : Base(std::forward<Args>(args)...) { }
typedef intrusive::single_reader_queue<recursive_queue_node,disposer>
default_mailbox_impl;
inline void base_cleanup(std::uint32_t) {
// leave groups
this->m_subscriptions.clear();
struct sync_request_bouncer {
actor* ptr;
std::uint32_t rsn;
inline sync_request_bouncer(actor* p, std::uint32_t r) : ptr(p), rsn(r) { }
inline void operator()(actor* sender, const message_id_t& mid) const {
CPPA_REQUIRE(rsn != exit_reason::not_exited);
if (mid.is_request() && sender != nullptr) {
sender->sync_enqueue(ptr, mid.response_id(),
make_cow_tuple(atom("EXITED"), rsn));
}
}
inline void operator()(const recursive_queue_node& e) const {
(*this)(e.sender.get(), e.mid);
}
};
typedef intrusive::single_reader_queue<recursive_queue_node,disposer>
default_mailbox_impl;
/*
* @brief Implements linking and monitoring for actors.
* @tparam Base Either {@link cppa::actor actor}
* or {@link cppa::local_actor local_actor}.
*/
template<class Base, class Mailbox = default_mailbox_impl>
class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_actor, Base>::value> {
class abstract_actor : public Base {
friend class ::cppa::self_type;
typedef abstract_actor_base<Base, std::is_base_of<local_actor, Base>::value> super;
typedef Base super;
typedef std::lock_guard<std::mutex> guard_type;
typedef std::unique_ptr<attachable> attachable_ptr;
static constexpr bool is_local_actor = std::is_base_of<local_actor,Base>::value;
public:
typedef Mailbox mailbox_type;
......@@ -184,7 +175,12 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac
}
~abstract_actor() {
m_mailbox.clear();
if (!m_mailbox.closed()) {
auto rsn = m_exit_reason.load();
if (rsn == exit_reason::not_exited) rsn = exit_reason::normal;
sync_request_bouncer f{this, rsn};
m_mailbox.close(f);
}
}
protected:
......@@ -204,9 +200,16 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac
: super(std::forward<Args>(args)...)
, m_exit_reason(exit_reason::not_exited){ }
inline void base_cleanup(std::true_type) {
this->m_subscriptions.clear();
}
inline void base_cleanup(std::false_type) { }
void cleanup(std::uint32_t reason) {
if (reason == exit_reason::not_exited) return;
this->base_cleanup(reason);
std::integral_constant<bool,is_local_actor> token;
base_cleanup(token);
decltype(m_links) mlinks;
decltype(m_attachables) mattachables;
{ // lifetime scope of guard
......@@ -229,6 +232,9 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac
for (attachable_ptr& ptr : mattachables) {
ptr->actor_exited(reason);
}
std::atomic_thread_fence(std::memory_order_seq_cst);
sync_request_bouncer f{this, reason};
m_mailbox.close(f);
}
bool link_to_impl(const intrusive_ptr<actor>& other) {
......@@ -261,13 +267,15 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac
return false;
}
private:
std::uint32_t exit_reason() const { return m_exit_reason.load(); }
// @pre m_mtx.locked()
bool exited() const {
return m_exit_reason.load() != exit_reason::not_exited;
}
private:
// true if the associated thread has finished execution
std::atomic<std::uint32_t> m_exit_reason;
// guards access to m_exited, m_subscriptions, and m_links
......
......@@ -56,31 +56,13 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
std::atomic<int> m_state;
bool has_pending_timeout() {
inline bool has_pending_timeout() const {
return m_has_pending_timeout_request;
}
void request_timeout(const util::duration& d) {
if (d.valid()) {
if (d.is_zero()) {
// immediately enqueue timeout
auto node = super::fetch_node(this,
make_any_tuple(atom("TIMEOUT"),
++m_active_timeout_id));
this->m_mailbox.enqueue(node);
}
else {
get_scheduler()->delayed_send(
this, d,
make_any_tuple(
atom("TIMEOUT"), ++m_active_timeout_id));
}
m_has_pending_timeout_request = true;
}
else m_has_pending_timeout_request = false;
}
void request_timeout(const util::duration& d);
void reset_timeout() {
inline void reset_timeout() {
if (m_has_pending_timeout_request) {
++m_active_timeout_id;
m_has_pending_timeout_request = false;
......@@ -117,76 +99,25 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
static constexpr int pending = 0x03;
static constexpr int about_to_block = 0x04;
abstract_scheduled_actor(int state = done)
: super(true), m_state(state)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0) {
}
abstract_scheduled_actor(int state = done);
bool chained_enqueue(actor* sender, any_tuple msg) {
return enqueue_node(super::fetch_node(sender, std::move(msg)), pending);
}
bool chained_enqueue(actor* sender, any_tuple msg);
bool chained_sync_enqueue(actor* sender, message_id_t id, any_tuple msg) {
return enqueue_node(super::fetch_node(sender, std::move(msg), id), pending);
}
bool chained_sync_enqueue(actor* sender, message_id_t id, any_tuple msg);
void quit(std::uint32_t reason = exit_reason::normal) {
this->cleanup(reason);
throw actor_exited(reason);
}
void quit(std::uint32_t reason = exit_reason::normal);
void enqueue(actor* sender, any_tuple msg) {
enqueue_node(super::fetch_node(sender, std::move(msg)));
}
void enqueue(actor* sender, any_tuple msg);
void sync_enqueue(actor* sender, message_id_t id, any_tuple msg) {
enqueue_node(super::fetch_node(sender, std::move(msg), id));
}
void sync_enqueue(actor* sender, message_id_t id, any_tuple msg);
int compare_exchange_state(int expected, int new_value) {
int e = expected;
do {
if (m_state.compare_exchange_weak(e, new_value)) {
return new_value;
}
}
while (e == expected);
return e;
}
int compare_exchange_state(int expected, int new_value);
private:
bool enqueue_node(typename super::mailbox_element* node,
int next_state = ready) {
CPPA_REQUIRE(next_state == ready || next_state == pending);
CPPA_REQUIRE(node->marked == false);
if (this->m_mailbox.enqueue(node) == intrusive::first_enqueued) {
int state = m_state.load();
for (;;) {
switch (state) {
case blocked: {
if (m_state.compare_exchange_weak(state, next_state)) {
CPPA_REQUIRE(this->m_scheduler != nullptr);
if (next_state == ready) {
this->m_scheduler->enqueue(this);
}
return true;
}
break;
}
case about_to_block: {
if (m_state.compare_exchange_weak(state, ready)) {
return false;
}
break;
}
default: return false;
}
}
}
return false;
}
bool enqueue_node(recursive_queue_node* node,
int next_state = ready,
bool* failed = nullptr);
};
......
......@@ -67,10 +67,15 @@ class blocking_single_reader_queue {
return (timed_wait_for_data(abs_time)) ? try_pop() : nullptr;
}
void push_back(pointer new_element) {
if (m_impl.enqueue(new_element) == first_enqueued) {
lock_type guard(m_mtx);
m_cv.notify_one();
bool push_back(pointer new_element) {
switch (m_impl.enqueue(new_element)) {
case first_enqueued: {
lock_type guard(m_mtx);
m_cv.notify_one();
return true;
}
default: return true;
case queue_closed: return false;
}
}
......@@ -87,6 +92,10 @@ class blocking_single_reader_queue {
m_impl.close(f);
}
inline bool closed() const {
return m_impl.closed();
}
private:
// locked on enqueue/dequeue operations to/from an empty list
......
......@@ -61,6 +61,45 @@ class single_reader_queue {
return take_head();
}
template<class UnaryPredicate>
void remove_if(UnaryPredicate f) {
pointer head = m_head;
pointer last = nullptr;
pointer p = m_head;
auto loop = [&]() -> bool {
while (p) {
if (f(*p)) {
if (last == nullptr) m_head = p->next;
else last = p->next;
m_delete(p);
return true;
}
else {
last = p;
p = p->next;
}
}
return false;
};
if (!loop()) {
// last points to the tail now
auto old_tail = last;
m_head = nullptr; // fetch_new_data assumes cached list to be empty
if (fetch_new_data()) {
last = nullptr;
p = m_head; // let p point to the first newly fetched element
loop();
// restore cached list
if (head) {
old_tail->next = m_head;
m_head = head;
}
}
else m_head = head;
}
}
// returns true if the queue was empty
enqueue_result enqueue(pointer new_element) {
pointer e = m_stack.load();
......@@ -96,15 +135,15 @@ class single_reader_queue {
*/
// closes this queue deletes all remaining elements
inline void close() {
fetch_new_data(nullptr);
clear_cached_elements();
if (fetch_new_data(nullptr)) clear_cached_elements();
}
// closes this queue and applies f to all remaining elements before deleting
template<typename F>
inline void close(const F& f) {
fetch_new_data(nullptr);
clear_cached_elements(f);
if (fetch_new_data(nullptr)) clear_cached_elements(f);
}
inline single_reader_queue() : m_head(nullptr) {
......@@ -112,10 +151,14 @@ class single_reader_queue {
}
inline void clear() {
fetch_new_data();
clear_cached_elements();
if (!closed()) {
clear_cached_elements();
if (fetch_new_data()) clear_cached_elements();
}
}
inline ~single_reader_queue() { clear(); }
private:
// exposed to "outside" access
......@@ -127,8 +170,13 @@ class single_reader_queue {
// atomically sets m_stack back and enqueues all elements to the cache
bool fetch_new_data(pointer end_ptr) {
CPPA_REQUIRE(m_head == nullptr);
CPPA_REQUIRE(end_ptr == nullptr || end_ptr == stack_end());
pointer e = m_stack.load();
// it's enough to check this once, since only the owner is allowed
// to close the queue and only the owner is allowed to call this
// member function
if (e == nullptr) return false;
while (e != end_ptr) {
if (m_stack.compare_exchange_weak(e, end_ptr)) {
while (e != stack_end()) {
......@@ -144,7 +192,9 @@ class single_reader_queue {
return false;
}
inline bool fetch_new_data() { return fetch_new_data(stack_end()); }
inline bool fetch_new_data() {
return fetch_new_data(stack_end());
}
pointer take_head() {
if (m_head != nullptr || fetch_new_data()) {
......
......@@ -37,8 +37,43 @@
#include "cppa/detail/abstract_actor.hpp"
namespace cppa { namespace detail {
class memory;
class instance_wrapper;
template<typename>
class basic_memory_cache;
} } // namespace cppa::detail
namespace cppa { namespace network {
class sync_request_info : public memory_managed {
public:
template<typename>
friend class detail::basic_memory_cache;
friend class detail::memory;
typedef sync_request_info* pointer;
pointer next; // intrusive next pointer
actor_ptr sender; // points to the sender of the message
message_id_t mid;
private:
sync_request_info(actor_ptr sptr, message_id_t id);
~sync_request_info();
// intrusive outer memory pointer
detail::instance_wrapper* outer_memory;
};
class default_actor_proxy : public detail::abstract_actor<actor_proxy> {
typedef detail::abstract_actor<actor_proxy> super;
......@@ -65,6 +100,8 @@ class default_actor_proxy : public detail::abstract_actor<actor_proxy> {
void local_unlink_from(const actor_ptr& other);
void deliver(const network::message_header& hdr, any_tuple msg);
inline const process_information_ptr& process_info() const {
return m_pinf;
}
......@@ -81,6 +118,7 @@ class default_actor_proxy : public detail::abstract_actor<actor_proxy> {
default_protocol_ptr m_proto;
process_information_ptr m_pinf;
intrusive::single_reader_queue<sync_request_info,detail::disposer> m_pending_requests;
};
......
......@@ -31,7 +31,10 @@
#ifndef CPPA_MESSAGE_HEADER_HPP
#define CPPA_MESSAGE_HEADER_HPP
#include <utility>
#include "cppa/actor.hpp"
#include "cppa/any_tuple.hpp"
#include "cppa/message_id.hpp"
namespace cppa { namespace network {
......@@ -54,6 +57,17 @@ class message_header {
const actor_ptr& receiver,
message_id_t id = message_id_t::invalid);
inline void deliver(any_tuple msg) const {
if (receiver) {
if (id.valid()) {
receiver->sync_enqueue(sender.get(), id, std::move(msg));
}
else {
receiver->enqueue(sender.get(), std::move(msg));
}
}
}
};
inline bool operator==(const message_header& lhs, const message_header& rhs) {
......
......@@ -28,85 +28,68 @@
\******************************************************************************/
#include "cppa/cppa.hpp"
#include "cppa/config.hpp"
#include "cppa/to_string.hpp"
#include "cppa/exception.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/detail/types_array.hpp"
#include "cppa/detail/yield_interface.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
/*
namespace cppa { namespace detail {
namespace {
void dummy_enqueue(void*, abstract_scheduled_actor*) { }
types_array<atom_value, std::uint32_t> t_atom_ui32_types;
void abstract_scheduled_actor::request_timeout(const util::duration& d) {
if (d.valid()) {
if (d.is_zero()) {
// immediately enqueue timeout
auto node = super::fetch_node(this,
make_any_tuple(atom("TIMEOUT"),
++m_active_timeout_id));
this->m_mailbox.enqueue(node);
}
else {
get_scheduler()->delayed_send(
this, d,
make_any_tuple(
atom("TIMEOUT"), ++m_active_timeout_id));
}
m_has_pending_timeout_request = true;
}
else m_has_pending_timeout_request = false;
}
abstract_scheduled_actor::abstract_scheduled_actor(scheduler* sched)
: next(nullptr)
, m_state(ready)
, m_scheduler(sched)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0) {
CPPA_REQUIRE(sched != nullptr);
abstract_scheduled_actor::abstract_scheduled_actor(int state)
: super(true), m_state(state), m_has_pending_timeout_request(false)
, m_active_timeout_id(0) {
}
abstract_scheduled_actor::abstract_scheduled_actor(int state)
: next(nullptr)
, m_state(state)
, m_scheduler(nullptr)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0) {
bool abstract_scheduled_actor::chained_enqueue(actor* sender, any_tuple msg) {
return enqueue_node(super::fetch_node(sender, std::move(msg)), pending);
}
abstract_scheduled_actor::resume_callback::~resume_callback() {
bool abstract_scheduled_actor::chained_sync_enqueue(actor* sender, message_id_t id, any_tuple msg) {
bool failed;
bool result = enqueue_node(super::fetch_node(sender, std::move(msg), id), pending, &failed);
if (failed) {
sync_request_bouncer f{this, exit_reason()};
f(sender, id);
}
return result;
}
void abstract_scheduled_actor::quit(std::uint32_t reason) {
cleanup(reason);
this->cleanup(reason);
throw actor_exited(reason);
}
void abstract_scheduled_actor::enqueue_node(queue_node* node) {
if (m_mailbox._push_back(node)) {
for (;;) {
int state = m_state.load();
switch (state) {
case blocked: {
if (m_state.compare_exchange_weak(state, ready)) {
CPPA_REQUIRE(m_scheduler != nullptr);
m_scheduler->enqueue(this);
return;
}
break;
}
case about_to_block: {
if (m_state.compare_exchange_weak(state, ready)) {
return;
}
break;
}
default: return;
}
}
}
}
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple&& msg) {
enqueue_node(fetch_node(sender, std::move(msg)));
//enqueue_node(new queue_node(sender, std::move(msg)));
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple msg) {
enqueue_node(super::fetch_node(sender, std::move(msg)));
}
void abstract_scheduled_actor::enqueue(actor* sender, const any_tuple& msg) {
enqueue_node(fetch_node(sender, msg));
//enqueue_node(new queue_node(sender, msg));
void abstract_scheduled_actor::sync_enqueue(actor* sender, message_id_t id, any_tuple msg) {
bool failed;
enqueue_node(super::fetch_node(sender, std::move(msg), id), ready, &failed);
if (failed) {
sync_request_bouncer f{this, exit_reason()};
f(sender, id);
}
}
int abstract_scheduled_actor::compare_exchange_state(int expected,
int new_value) {
int abstract_scheduled_actor::compare_exchange_state(int expected, int new_value) {
int e = expected;
do {
if (m_state.compare_exchange_weak(e, new_value)) {
......@@ -117,114 +100,44 @@ int abstract_scheduled_actor::compare_exchange_state(int expected,
return e;
}
void abstract_scheduled_actor::request_timeout(const util::duration& d) {
if (d.valid()) {
future_send(this, d, atom(":Timeout"), ++m_active_timeout_id);
m_has_pending_timeout_request = true;
}
}
auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result {
if ( msg.size() == 2
&& msg.type_at(0) == t_atom_ui32_types[0]
&& msg.type_at(1) == t_atom_ui32_types[1]) {
auto v0 = *reinterpret_cast<const atom_value*>(msg.at(0));
auto v1 = *reinterpret_cast<const std::uint32_t*>(msg.at(1));
if (v0 == atom(":Exit")) {
if (m_trap_exit == false) {
if (v1 != exit_reason::normal) {
quit(v1);
bool abstract_scheduled_actor::enqueue_node(recursive_queue_node* node,
int next_state,
bool* failed) {
CPPA_REQUIRE(next_state == ready || next_state == pending);
CPPA_REQUIRE(node->marked == false);
switch (this->m_mailbox.enqueue(node)) {
case intrusive::first_enqueued: {
int state = m_state.load();
for (;;) {
switch (state) {
case blocked: {
if (m_state.compare_exchange_weak(state, next_state)) {
CPPA_REQUIRE(this->m_scheduler != nullptr);
if (next_state == ready) {
this->m_scheduler->enqueue(this);
}
return true;
}
break;
}
case about_to_block: {
if (m_state.compare_exchange_weak(state, ready)) {
return false;
}
break;
}
default: return false;
}
return normal_exit_signal;
}
break;
}
else if (v0 == atom(":Timeout")) {
return (v1 == m_active_timeout_id) ? timeout_message
: expired_timeout_message;
}
}
return ordinary_message;
}
auto abstract_scheduled_actor::dq(queue_node& node,
partial_function& fun) -> dq_result {
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg)) {
case normal_exit_signal:
case expired_timeout_message: {
// skip message
return dq_indeterminate;
}
case timeout_message: {
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
return dq_timeout_occured;
case intrusive::queue_closed: {
if (failed) *failed = true;
break;
}
default: break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard {
// make sure nested receives do not process this node again
queue_node_guard qguard{&node};
// try to invoke given function
if (fun(m_last_dequeued)) {
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return dq_indeterminate;
}
// dummy
void scheduled_actor_dummy::resume(util::fiber*, resume_callback*) {
}
void scheduled_actor_dummy::quit(std::uint32_t) {
}
void scheduled_actor_dummy::dequeue(behavior&) {
}
void scheduled_actor_dummy::dequeue(partial_function&) {
}
void scheduled_actor_dummy::link_to(intrusive_ptr<actor>&) {
}
void scheduled_actor_dummy::unlink_from(intrusive_ptr<actor>&) {
}
bool scheduled_actor_dummy::establish_backlink(intrusive_ptr<actor>&) {
return false;
}
bool scheduled_actor_dummy::remove_backlink(intrusive_ptr<actor>&) {
return false;
}
void scheduled_actor_dummy::detach(const attachable::token&) {
}
bool scheduled_actor_dummy::attach(attachable*) {
return false;
}
} } // namespace cppa::detail
*/
......@@ -34,12 +34,22 @@
#include "cppa/network/middleman.hpp"
#include "cppa/network/default_actor_proxy.hpp"
#include "cppa/detail/memory.hpp"
#include "cppa/detail/singleton_manager.hpp"
using namespace std;
namespace cppa { namespace network {
inline sync_request_info* new_req_info(actor_ptr sptr, message_id_t id) {
return detail::memory::create<sync_request_info>(std::move(sptr), id);
}
sync_request_info::sync_request_info(actor_ptr sptr, message_id_t id)
: next(nullptr), sender(std::move(sptr)), mid(id) { }
sync_request_info::~sync_request_info() { }
default_actor_proxy::default_actor_proxy(actor_id mid,
const process_information_ptr& pinfo,
const default_protocol_ptr& parent)
......@@ -64,8 +74,36 @@ default_actor_proxy::~default_actor_proxy() {
});
}
void default_actor_proxy::forward_msg(const actor_ptr& sender, any_tuple msg, message_id_t mid) {
void default_actor_proxy::deliver(const network::message_header& hdr, any_tuple msg) {
// this member function is exclusively called from default_peer from inside
// the middleman's thread, therefore we can safely access
// m_pending_requests here
if (hdr.id.is_response()) {
// remove this request from list of pending requests
auto req = hdr.id.request_id();
m_pending_requests.remove_if([&](const sync_request_info& e) -> bool {
return e.mid == req;
});
}
hdr.deliver(std::move(msg));
}
void default_actor_proxy::forward_msg(const actor_ptr& sender,
any_tuple msg,
message_id_t mid) {
CPPA_LOG_TRACE("");
if (sender && mid.is_request()) {
switch (m_pending_requests.enqueue(new_req_info(sender, mid))) {
case intrusive::queue_closed: {
if (sender) {
detail::sync_request_bouncer f{this, exit_reason()};
f(sender.get(), mid);
}
return; // no need to forward message
}
default: break;
}
}
message_header hdr{sender, this, mid};
auto node = m_pinf;
auto proto = m_proto;
......@@ -83,7 +121,17 @@ void default_actor_proxy::enqueue(actor* sender, any_tuple msg) {
&& msg.get_as<atom_value>(0) == atom("KILL_PROXY")
&& msg.type_at(1) == arr[1]) {
CPPA_LOG_DEBUG("received KILL_PROXY message");
cleanup(msg.get_as<uint32_t>(1));
intrusive_ptr<default_actor_proxy> _this{this};
auto reason = msg.get_as<uint32_t>(1);
m_proto->run_later([_this, reason] {
_this->cleanup(reason);
// make sure cleanup is done before closing requests queue
std::atomic_thread_fence(std::memory_order_seq_cst);
detail::sync_request_bouncer f{_this.get(), reason};
_this->m_pending_requests.close([&](const sync_request_info& e) {
f(e.sender.get(), e.mid);
});
});
return;
}
forward_msg(sender, move(msg));
......
......@@ -262,6 +262,11 @@ void default_peer::kill_proxy(const actor_ptr& sender,
void default_peer::deliver(const message_header& hdr, any_tuple msg) {
CPPA_LOG_TRACE("");
if (hdr.sender && hdr.sender->is_proxy()) {
hdr.sender.downcast<actor_proxy>()->deliver(hdr, std::move(msg));
}
else hdr.deliver(std::move(msg));
/*
auto receiver = hdr.receiver.get();
if (receiver) {
if (hdr.id.valid()) {
......@@ -278,9 +283,9 @@ void default_peer::deliver(const message_header& hdr, any_tuple msg) {
else {
CPPA_LOG_ERROR("received message with invalid receiver");
}
*/
}
void default_peer::link(const actor_ptr& sender, const actor_ptr& ptr) {
// this message is sent from default_actor_proxy in link_to and
// establish_backling to cause the original actor (sender) to establish
......
......@@ -63,7 +63,11 @@ void thread_mapped_actor::enqueue(actor* sender, any_tuple msg) {
void thread_mapped_actor::sync_enqueue(actor* sender,
message_id_t id,
any_tuple msg ) {
m_mailbox.push_back(fetch_node(sender, std::move(msg), id));
if (!m_mailbox.push_back(fetch_node(sender, std::move(msg), id))) {
detail::sync_request_bouncer f{this, exit_reason()};
f(sender, id);
}
}
bool thread_mapped_actor::initialized() {
......
......@@ -537,12 +537,16 @@ int main() {
await_all_others_done();
CPPA_IF_VERBOSE(cout << "ok" << endl);
receive_response(sync_send(sync_testee, "!?")) (
others() >> [&]() {
CPPA_ERROR("'sync_testee' still alive?");
sync_send(sync_testee, "!?").await(
on(atom("EXITED"), any_vals) >> [&] {
CPPA_CHECK(true);
},
others() >> [&] {
CPPA_ERROR("'sync_testee' still alive?; received: "
<< to_string(self->last_dequeued()));
},
after(chrono::milliseconds(5)) >> [&] {
CPPA_CHECK(true);
CPPA_CHECK(false);
}
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment