Commit 1eb9f07e authored by Dominik Charousset's avatar Dominik Charousset

more efficient message handling

parent 48ce31cd
......@@ -132,7 +132,7 @@ struct ebaf_from_functor {
static_assert( std::is_same<arg_types, arg_types2>::value
|| std::is_same<util::type_list<>, arg_types2>::value,
"Second functor must provide either the same signature "
" as the first one or must take zero arguments");
"as the first one or must take zero arguments");
typedef typename util::tl_map<arg_types, std::remove_pointer>::type mems;
typedef typename ebaf_from_type_list<Init, Cleanup, mems>::type type;
};
......
......@@ -43,12 +43,15 @@
namespace cppa { namespace detail {
enum receive_policy_flag {
// blocking message processing: thread-mapped & context-switching actors
// receives can be nested
rp_nestable,
// callback-based message processing: event-based actors
rp_callback
// receives are guaranteed to be sequential
rp_sequential
};
template<receive_policy_flag X>
struct rp_flag { typedef std::integral_constant<receive_policy_flag, X> type; };
class receive_policy {
public:
......@@ -67,7 +70,7 @@ class receive_policy {
auto i = m_cache.begin();
auto e = m_cache.end();
while (i != e) {
switch (this->handle_message(client, *(*i), fun, token)) {
switch (this->handle_message(client, i->get(), fun, token)) {
case hm_msg_handled: {
client->release_node(i->release());
m_cache.erase(i);
......@@ -92,9 +95,9 @@ class receive_policy {
}
template<class Client, class FunOrBehavior>
bool invoke(Client* client, recursive_queue_node* node, FunOrBehavior& fun) {
bool invoke(Client* client, recursive_queue_node* node, FunOrBehavior& fun){
std::integral_constant<receive_policy_flag, Client::receive_flag> token;
switch (this->handle_message(client, *node, fun, token)) {
switch (this->handle_message(client, node, fun, token)) {
case hm_msg_handled: {
client->release_node(node);
return true;
......@@ -157,8 +160,8 @@ class receive_policy {
private:
typedef std::integral_constant<receive_policy_flag, rp_nestable> nestable;
typedef std::integral_constant<receive_policy_flag, rp_callback> callback;
typedef typename rp_flag<rp_nestable>::type nestable;
typedef typename rp_flag<rp_sequential>::type sequential;
std::list<std::unique_ptr<recursive_queue_node> > m_cache;
......@@ -174,13 +177,13 @@ class receive_policy {
template<class Client, class FunOrBehavior>
handle_message_result handle_message(Client* client,
recursive_queue_node& node,
recursive_queue_node* node,
FunOrBehavior& fun,
nestable) {
if (node.marked) {
if (node->marked) {
return hm_skip_msg;
}
switch (client->filter_msg(node.msg)) {
switch (client->filter_msg(node->msg)) {
case normal_exit_signal:
case expired_timeout_message: {
return hm_drop_msg;
......@@ -190,20 +193,18 @@ class receive_policy {
return hm_msg_handled;
}
case ordinary_message: {
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
auto previous_node = client->m_current_node;
client->m_current_node = node;
client->push_timeout();
node.marked = true;
if (fun(client->m_last_dequeued)) {
client->m_last_dequeued.reset();
client->m_last_sender.reset();
node->marked = true;
if (fun(node->msg)) {
client->m_current_node = &(client->m_dummy_node);
return hm_msg_handled;
}
// no match (restore client members)
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->m_current_node = previous_node;
client->pop_timeout();
node.marked = false;
node->marked = false;
return hm_cache_msg;
}
default: CPPA_CRITICAL("illegal result of filter_msg");
......@@ -212,11 +213,11 @@ class receive_policy {
template<class Client, class FunOrBehavior>
handle_message_result handle_message(Client* client,
recursive_queue_node& node,
recursive_queue_node* node,
FunOrBehavior& fun,
callback) {
CPPA_REQUIRE(node.marked == false);
switch (client->filter_msg(node.msg)) {
sequential) {
CPPA_REQUIRE(node->marked == false);
switch (client->filter_msg(node->msg)) {
case normal_exit_signal:
case expired_timeout_message: {
return hm_drop_msg;
......@@ -226,18 +227,16 @@ class receive_policy {
return hm_msg_handled;
}
case ordinary_message: {
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
if (fun(client->m_last_dequeued)) {
client->m_last_dequeued.reset();
client->m_last_sender.reset();
auto previous_node = client->m_current_node;
client->m_current_node = node;
if (fun(node->msg)) {
client->m_current_node = &(client->m_dummy_node);
// we definitely don't have a pending timeout now
client->m_has_pending_timeout_request = false;
return hm_msg_handled;
}
// no match, restore members
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->m_current_node = previous_node;
return hm_cache_msg;
}
default: CPPA_CRITICAL("illegal result of filter_msg");
......
......@@ -37,13 +37,13 @@
namespace cppa { namespace detail {
struct recursive_queue_node {
recursive_queue_node* next; // intrusive next pointer
bool marked; // denotes if this node is currently processed
actor_ptr sender;
any_tuple msg;
inline recursive_queue_node() : next(nullptr), marked(false) {
}
inline recursive_queue_node() : next(nullptr), marked(false) { }
inline recursive_queue_node(actor* from, any_tuple&& content)
: next(nullptr)
......
......@@ -137,7 +137,7 @@ class event_based_actor : public detail::abstract_scheduled_actor {
}
// required by detail::nestable_receive_policy
static const detail::receive_policy_flag receive_flag = detail::rp_callback;
static const detail::receive_policy_flag receive_flag = detail::rp_sequential;
inline void handle_timeout(behavior& bhvr) {
CPPA_REQUIRE(bhvr.timeout().valid());
m_has_pending_timeout_request = false;
......
......@@ -37,16 +37,18 @@
#include "cppa/match_expr.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/partial_function.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
namespace cppa {
// forward declarations
class scheduler;
class local_scheduler;
struct discard_behavior_t { };
struct keep_behavior_t { };
// doxygen doesn't parse anonymous namespaces correctly
#ifndef CPPA_DOCUMENTATION
namespace {
#endif // CPPA_DOCUMENTATION
......@@ -162,19 +164,20 @@ class local_actor : public actor {
/**
* @brief Returns the last message that was dequeued
* from the actor's mailbox.
* @note Only set during callback invocation.
* @warning Only set during callback invocation.
*/
inline any_tuple& last_dequeued() {
return m_last_dequeued;
return m_current_node->msg;
}
/**
* @brief Returns the sender of the last dequeued message.
* @note Only set during callback invocation.
* @warning Only set during callback invocation.
* @note Implicitly used by the function {@link cppa::reply}.
* @see cppa::reply()
*/
inline actor_ptr& last_sender() {
return m_last_sender;
return m_current_node->sender;
}
/**
......@@ -326,9 +329,9 @@ class local_actor : public actor {
bool m_chaining;
bool m_trap_exit;
bool m_is_scheduled;
actor_ptr m_last_sender;
actor_ptr m_chained_actor;
any_tuple m_last_dequeued;
detail::recursive_queue_node m_dummy_node;
detail::recursive_queue_node* m_current_node;
# endif // CPPA_DOCUMENTATION
......
......@@ -68,7 +68,8 @@ class down_observer : public attachable {
} // namespace <anonymous>
local_actor::local_actor(bool sflag)
: m_chaining(sflag), m_trap_exit(false), m_is_scheduled(sflag) { }
: m_chaining(sflag), m_trap_exit(false)
, m_is_scheduled(sflag), m_dummy_node(), m_current_node(&m_dummy_node) { }
void local_actor::monitor(actor_ptr whom) {
if (whom) whom->attach(new down_observer(this, whom));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment