Commit 1eb9f07e authored by Dominik Charousset's avatar Dominik Charousset

more efficient message handling

parent 48ce31cd
...@@ -132,7 +132,7 @@ struct ebaf_from_functor { ...@@ -132,7 +132,7 @@ struct ebaf_from_functor {
static_assert( std::is_same<arg_types, arg_types2>::value static_assert( std::is_same<arg_types, arg_types2>::value
|| std::is_same<util::type_list<>, arg_types2>::value, || std::is_same<util::type_list<>, arg_types2>::value,
"Second functor must provide either the same signature " "Second functor must provide either the same signature "
" as the first one or must take zero arguments"); "as the first one or must take zero arguments");
typedef typename util::tl_map<arg_types, std::remove_pointer>::type mems; typedef typename util::tl_map<arg_types, std::remove_pointer>::type mems;
typedef typename ebaf_from_type_list<Init, Cleanup, mems>::type type; typedef typename ebaf_from_type_list<Init, Cleanup, mems>::type type;
}; };
......
...@@ -43,12 +43,15 @@ ...@@ -43,12 +43,15 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
enum receive_policy_flag { enum receive_policy_flag {
// blocking message processing: thread-mapped & context-switching actors // receives can be nested
rp_nestable, rp_nestable,
// callback-based message processing: event-based actors // receives are guaranteed to be sequential
rp_callback rp_sequential
}; };
template<receive_policy_flag X>
struct rp_flag { typedef std::integral_constant<receive_policy_flag, X> type; };
class receive_policy { class receive_policy {
public: public:
...@@ -67,7 +70,7 @@ class receive_policy { ...@@ -67,7 +70,7 @@ class receive_policy {
auto i = m_cache.begin(); auto i = m_cache.begin();
auto e = m_cache.end(); auto e = m_cache.end();
while (i != e) { while (i != e) {
switch (this->handle_message(client, *(*i), fun, token)) { switch (this->handle_message(client, i->get(), fun, token)) {
case hm_msg_handled: { case hm_msg_handled: {
client->release_node(i->release()); client->release_node(i->release());
m_cache.erase(i); m_cache.erase(i);
...@@ -92,9 +95,9 @@ class receive_policy { ...@@ -92,9 +95,9 @@ class receive_policy {
} }
template<class Client, class FunOrBehavior> template<class Client, class FunOrBehavior>
bool invoke(Client* client, recursive_queue_node* node, FunOrBehavior& fun) { bool invoke(Client* client, recursive_queue_node* node, FunOrBehavior& fun){
std::integral_constant<receive_policy_flag, Client::receive_flag> token; std::integral_constant<receive_policy_flag, Client::receive_flag> token;
switch (this->handle_message(client, *node, fun, token)) { switch (this->handle_message(client, node, fun, token)) {
case hm_msg_handled: { case hm_msg_handled: {
client->release_node(node); client->release_node(node);
return true; return true;
...@@ -157,8 +160,8 @@ class receive_policy { ...@@ -157,8 +160,8 @@ class receive_policy {
private: private:
typedef std::integral_constant<receive_policy_flag, rp_nestable> nestable; typedef typename rp_flag<rp_nestable>::type nestable;
typedef std::integral_constant<receive_policy_flag, rp_callback> callback; typedef typename rp_flag<rp_sequential>::type sequential;
std::list<std::unique_ptr<recursive_queue_node> > m_cache; std::list<std::unique_ptr<recursive_queue_node> > m_cache;
...@@ -174,13 +177,13 @@ class receive_policy { ...@@ -174,13 +177,13 @@ class receive_policy {
template<class Client, class FunOrBehavior> template<class Client, class FunOrBehavior>
handle_message_result handle_message(Client* client, handle_message_result handle_message(Client* client,
recursive_queue_node& node, recursive_queue_node* node,
FunOrBehavior& fun, FunOrBehavior& fun,
nestable) { nestable) {
if (node.marked) { if (node->marked) {
return hm_skip_msg; return hm_skip_msg;
} }
switch (client->filter_msg(node.msg)) { switch (client->filter_msg(node->msg)) {
case normal_exit_signal: case normal_exit_signal:
case expired_timeout_message: { case expired_timeout_message: {
return hm_drop_msg; return hm_drop_msg;
...@@ -190,20 +193,18 @@ class receive_policy { ...@@ -190,20 +193,18 @@ class receive_policy {
return hm_msg_handled; return hm_msg_handled;
} }
case ordinary_message: { case ordinary_message: {
std::swap(client->m_last_dequeued, node.msg); auto previous_node = client->m_current_node;
std::swap(client->m_last_sender, node.sender); client->m_current_node = node;
client->push_timeout(); client->push_timeout();
node.marked = true; node->marked = true;
if (fun(client->m_last_dequeued)) { if (fun(node->msg)) {
client->m_last_dequeued.reset(); client->m_current_node = &(client->m_dummy_node);
client->m_last_sender.reset();
return hm_msg_handled; return hm_msg_handled;
} }
// no match (restore client members) // no match (restore client members)
std::swap(client->m_last_dequeued, node.msg); client->m_current_node = previous_node;
std::swap(client->m_last_sender, node.sender);
client->pop_timeout(); client->pop_timeout();
node.marked = false; node->marked = false;
return hm_cache_msg; return hm_cache_msg;
} }
default: CPPA_CRITICAL("illegal result of filter_msg"); default: CPPA_CRITICAL("illegal result of filter_msg");
...@@ -212,11 +213,11 @@ class receive_policy { ...@@ -212,11 +213,11 @@ class receive_policy {
template<class Client, class FunOrBehavior> template<class Client, class FunOrBehavior>
handle_message_result handle_message(Client* client, handle_message_result handle_message(Client* client,
recursive_queue_node& node, recursive_queue_node* node,
FunOrBehavior& fun, FunOrBehavior& fun,
callback) { sequential) {
CPPA_REQUIRE(node.marked == false); CPPA_REQUIRE(node->marked == false);
switch (client->filter_msg(node.msg)) { switch (client->filter_msg(node->msg)) {
case normal_exit_signal: case normal_exit_signal:
case expired_timeout_message: { case expired_timeout_message: {
return hm_drop_msg; return hm_drop_msg;
...@@ -226,18 +227,16 @@ class receive_policy { ...@@ -226,18 +227,16 @@ class receive_policy {
return hm_msg_handled; return hm_msg_handled;
} }
case ordinary_message: { case ordinary_message: {
std::swap(client->m_last_dequeued, node.msg); auto previous_node = client->m_current_node;
std::swap(client->m_last_sender, node.sender); client->m_current_node = node;
if (fun(client->m_last_dequeued)) { if (fun(node->msg)) {
client->m_last_dequeued.reset(); client->m_current_node = &(client->m_dummy_node);
client->m_last_sender.reset();
// we definitely don't have a pending timeout now // we definitely don't have a pending timeout now
client->m_has_pending_timeout_request = false; client->m_has_pending_timeout_request = false;
return hm_msg_handled; return hm_msg_handled;
} }
// no match, restore members // no match, restore members
std::swap(client->m_last_dequeued, node.msg); client->m_current_node = previous_node;
std::swap(client->m_last_sender, node.sender);
return hm_cache_msg; return hm_cache_msg;
} }
default: CPPA_CRITICAL("illegal result of filter_msg"); default: CPPA_CRITICAL("illegal result of filter_msg");
......
...@@ -37,13 +37,13 @@ ...@@ -37,13 +37,13 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
struct recursive_queue_node { struct recursive_queue_node {
recursive_queue_node* next; // intrusive next pointer recursive_queue_node* next; // intrusive next pointer
bool marked; // denotes if this node is currently processed bool marked; // denotes if this node is currently processed
actor_ptr sender; actor_ptr sender;
any_tuple msg; any_tuple msg;
inline recursive_queue_node() : next(nullptr), marked(false) { inline recursive_queue_node() : next(nullptr), marked(false) { }
}
inline recursive_queue_node(actor* from, any_tuple&& content) inline recursive_queue_node(actor* from, any_tuple&& content)
: next(nullptr) : next(nullptr)
......
...@@ -137,7 +137,7 @@ class event_based_actor : public detail::abstract_scheduled_actor { ...@@ -137,7 +137,7 @@ class event_based_actor : public detail::abstract_scheduled_actor {
} }
// required by detail::nestable_receive_policy // required by detail::nestable_receive_policy
static const detail::receive_policy_flag receive_flag = detail::rp_callback; static const detail::receive_policy_flag receive_flag = detail::rp_sequential;
inline void handle_timeout(behavior& bhvr) { inline void handle_timeout(behavior& bhvr) {
CPPA_REQUIRE(bhvr.timeout().valid()); CPPA_REQUIRE(bhvr.timeout().valid());
m_has_pending_timeout_request = false; m_has_pending_timeout_request = false;
......
...@@ -37,16 +37,18 @@ ...@@ -37,16 +37,18 @@
#include "cppa/match_expr.hpp" #include "cppa/match_expr.hpp"
#include "cppa/exit_reason.hpp" #include "cppa/exit_reason.hpp"
#include "cppa/partial_function.hpp" #include "cppa/partial_function.hpp"
#include "cppa/intrusive/single_reader_queue.hpp" #include "cppa/detail/recursive_queue_node.hpp"
namespace cppa { namespace cppa {
// forward declarations
class scheduler; class scheduler;
class local_scheduler; class local_scheduler;
struct discard_behavior_t { }; struct discard_behavior_t { };
struct keep_behavior_t { }; struct keep_behavior_t { };
// doxygen doesn't parse anonymous namespaces correctly
#ifndef CPPA_DOCUMENTATION #ifndef CPPA_DOCUMENTATION
namespace { namespace {
#endif // CPPA_DOCUMENTATION #endif // CPPA_DOCUMENTATION
...@@ -162,19 +164,20 @@ class local_actor : public actor { ...@@ -162,19 +164,20 @@ class local_actor : public actor {
/** /**
* @brief Returns the last message that was dequeued * @brief Returns the last message that was dequeued
* from the actor's mailbox. * from the actor's mailbox.
* @note Only set during callback invocation. * @warning Only set during callback invocation.
*/ */
inline any_tuple& last_dequeued() { inline any_tuple& last_dequeued() {
return m_last_dequeued; return m_current_node->msg;
} }
/** /**
* @brief Returns the sender of the last dequeued message. * @brief Returns the sender of the last dequeued message.
* @note Only set during callback invocation. * @warning Only set during callback invocation.
* @note Implicitly used by the function {@link cppa::reply}. * @note Implicitly used by the function {@link cppa::reply}.
* @see cppa::reply()
*/ */
inline actor_ptr& last_sender() { inline actor_ptr& last_sender() {
return m_last_sender; return m_current_node->sender;
} }
/** /**
...@@ -326,9 +329,9 @@ class local_actor : public actor { ...@@ -326,9 +329,9 @@ class local_actor : public actor {
bool m_chaining; bool m_chaining;
bool m_trap_exit; bool m_trap_exit;
bool m_is_scheduled; bool m_is_scheduled;
actor_ptr m_last_sender;
actor_ptr m_chained_actor; actor_ptr m_chained_actor;
any_tuple m_last_dequeued; detail::recursive_queue_node m_dummy_node;
detail::recursive_queue_node* m_current_node;
# endif // CPPA_DOCUMENTATION # endif // CPPA_DOCUMENTATION
......
...@@ -68,7 +68,8 @@ class down_observer : public attachable { ...@@ -68,7 +68,8 @@ class down_observer : public attachable {
} // namespace <anonymous> } // namespace <anonymous>
local_actor::local_actor(bool sflag) local_actor::local_actor(bool sflag)
: m_chaining(sflag), m_trap_exit(false), m_is_scheduled(sflag) { } : m_chaining(sflag), m_trap_exit(false)
, m_is_scheduled(sflag), m_dummy_node(), m_current_node(&m_dummy_node) { }
void local_actor::monitor(actor_ptr whom) { void local_actor::monitor(actor_ptr whom) {
if (whom) whom->attach(new down_observer(this, whom)); if (whom) whom->attach(new down_observer(this, whom));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment