Commit 3d7e4d06 authored by neverlord's avatar neverlord

policy based message handling

parent 08de1c38
...@@ -267,3 +267,4 @@ cppa/match_expr.hpp ...@@ -267,3 +267,4 @@ cppa/match_expr.hpp
cppa/detail/pseudo_tuple.hpp cppa/detail/pseudo_tuple.hpp
cppa/detail/recursive_queue_node.hpp cppa/detail/recursive_queue_node.hpp
cppa/detail/scheduled_actor_dummy.hpp cppa/detail/scheduled_actor_dummy.hpp
cppa/detail/nestable_invoke_policy.hpp
...@@ -56,8 +56,7 @@ namespace cppa { ...@@ -56,8 +56,7 @@ namespace cppa {
* @tparam Base Either {@link cppa::actor actor} * @tparam Base Either {@link cppa::actor actor}
* or {@link cppa::local_actor local_actor}. * or {@link cppa::local_actor local_actor}.
*/ */
template<class Base, template<class Base>
class MailboxType = intrusive::single_reader_queue<detail::recursive_queue_node> >
class abstract_actor : public Base class abstract_actor : public Base
{ {
...@@ -66,10 +65,8 @@ class abstract_actor : public Base ...@@ -66,10 +65,8 @@ class abstract_actor : public Base
public: public:
typedef MailboxType mailbox_type; typedef detail::recursive_queue_node mailbox_element;
typedef typename mailbox_type::value_type mailbox_element; typedef intrusive::single_reader_queue<mailbox_element> mailbox_type;
typedef typename mailbox_type::cache_type mailbox_cache_type;
typedef typename mailbox_cache_type::value_type mailbox_cache_element;
bool attach(attachable* ptr) // override bool attach(attachable* ptr) // override
{ {
......
...@@ -44,49 +44,13 @@ ...@@ -44,49 +44,13 @@
namespace cppa { namespace cppa {
struct vec_append
{
inline std::vector<detail::recursive_queue_node>::iterator
operator()(std::vector<detail::recursive_queue_node>& result,
detail::recursive_queue_node* e) const
{
std::vector<std::unique_ptr<detail::recursive_queue_node> > tmp;
while (e)
{
auto next = e->next;
tmp.emplace_back(e);
e = next;
}
auto old_size = result.size();
for (auto i = tmp.rbegin(); i != tmp.rend(); ++i)
{
result.emplace_back(std::move(*(*i)));
}
return result.begin() + old_size;
}
};
/** /**
* @brief Base class for all event-based actor implementations. * @brief Base class for all event-based actor implementations.
*/ */
class abstract_event_based_actor class abstract_event_based_actor : public detail::abstract_scheduled_actor
: public detail::abstract_scheduled_actor<
intrusive::single_reader_queue<
detail::recursive_queue_node,
std::vector<detail::recursive_queue_node>,
vec_append
>
>
{ {
typedef detail::abstract_scheduled_actor< typedef detail::abstract_scheduled_actor super;
intrusive::single_reader_queue<
detail::recursive_queue_node,
std::vector<detail::recursive_queue_node>,
vec_append
>
>
super;
public: public:
...@@ -108,6 +72,17 @@ class abstract_event_based_actor ...@@ -108,6 +72,17 @@ class abstract_event_based_actor
protected: protected:
std::vector<std::unique_ptr<detail::recursive_queue_node> > m_cache;
enum handle_message_result
{
drop_msg,
msg_handled,
cache_msg
};
auto handle_message(mailbox_element& iter) -> handle_message_result;
abstract_event_based_actor(); abstract_event_based_actor();
// ownership flag + pointer // ownership flag + pointer
...@@ -157,10 +132,6 @@ class abstract_event_based_actor ...@@ -157,10 +132,6 @@ class abstract_event_based_actor
receive(std::forward<Args>(args)...); receive(std::forward<Args>(args)...);
} }
private:
bool handle_message(mailbox_element& iter);
}; };
} // namespace cppa } // namespace cppa
......
...@@ -47,11 +47,10 @@ ...@@ -47,11 +47,10 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
// A spawned, scheduled Actor. // A spawned, scheduled Actor.
template<class MailboxType = intrusive::single_reader_queue<detail::recursive_queue_node> > class abstract_scheduled_actor : public abstract_actor<scheduled_actor>
class abstract_scheduled_actor : public abstract_actor<scheduled_actor, MailboxType>
{ {
typedef abstract_actor<scheduled_actor, MailboxType> super; typedef abstract_actor<scheduled_actor> super;
protected: protected:
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "cppa/exit_reason.hpp" #include "cppa/exit_reason.hpp"
#include "cppa/abstract_actor.hpp" #include "cppa/abstract_actor.hpp"
#include "cppa/intrusive/singly_linked_list.hpp" #include "cppa/intrusive/singly_linked_list.hpp"
#include "cppa/detail/nestable_invoke_policy.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
...@@ -60,6 +61,36 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -60,6 +61,36 @@ class converted_thread_context : public abstract_actor<local_actor>
typedef abstract_actor<local_actor> super; typedef abstract_actor<local_actor> super;
struct filter_policy;
friend struct filter_policy;
struct filter_policy
{
converted_thread_context* m_parent;
inline filter_policy(converted_thread_context* ptr) : m_parent(ptr)
{
}
inline bool operator()(any_tuple const& msg)
{
if ( m_parent->m_trap_exit == false
&& matches(msg, m_parent->m_exit_msg_pattern))
{
auto reason = msg.get_as<std::uint32_t>(1);
if (reason != exit_reason::normal)
{
m_parent->quit(reason);
}
return true;
}
return false;
}
};
public: public:
converted_thread_context(); converted_thread_context();
...@@ -75,7 +106,7 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -75,7 +106,7 @@ class converted_thread_context : public abstract_actor<local_actor>
void dequeue(behavior& rules); //override void dequeue(behavior& rules); //override
void dequeue(partial_function& rules) ; //override void dequeue(partial_function& rules); //override
inline decltype(m_mailbox)& mailbox() inline decltype(m_mailbox)& mailbox()
{ {
...@@ -84,20 +115,11 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -84,20 +115,11 @@ class converted_thread_context : public abstract_actor<local_actor>
private: private:
//typedef intrusive::singly_linked_list<queue_node> queue_node_buffer; // a list is safe to use in a nested receive
typedef std::unique_ptr<recursive_queue_node> queue_node_ptr;
enum throw_on_exit_result
{
not_an_exit_signal,
normal_exit_signal
};
// returns true if node->msg was accepted by rules
bool dq(mailbox_element& node, partial_function& rules);
throw_on_exit_result throw_on_exit(any_tuple const& msg);
pattern<atom_value, std::uint32_t> m_exit_msg_pattern; pattern<atom_value, std::uint32_t> m_exit_msg_pattern;
nestable_invoke_policy<filter_policy> m_invoke;
}; };
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef NESTABLE_INVOKE_POLICY_HPP
#define NESTABLE_INVOKE_POLICY_HPP
#include <list>
#include <memory>
#include "cppa/behavior.hpp"
#include "cppa/local_actor.hpp"
#include "cppa/partial_function.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
namespace cppa { namespace detail {
template<class FilterPolicy>
class nestable_invoke_policy
{
public:
typedef std::unique_ptr<recursive_queue_node> queue_node_ptr;
template<typename... Args>
nestable_invoke_policy(local_actor* parent, Args&&... args)
: m_last_dequeued(parent->last_dequeued())
, m_last_sender(parent->last_sender())
, m_filter_policy(std::forward<Args>(args)...)
{
}
template<typename... Args>
bool invoke_from_cache(partial_function& fun, Args... args)
{
auto i = m_cache.begin();
auto e = m_cache.end();
while (i != e)
{
switch (handle_message(*(*i), fun, args...))
{
case hm_drop_msg:
{
i = m_cache.erase(i);
break;
}
case hm_success:
{
m_cache.erase(i);
return true;
}
case hm_skip_msg:
case hm_cache_msg:
{
++i;
break;
}
default: exit(7); // illegal state
}
}
return false;
}
template<typename... Args>
bool invoke(queue_node_ptr& ptr, partial_function& fun, Args... args)
{
switch (handle_message(*ptr, fun, args...))
{
case hm_drop_msg:
{
break;
}
case hm_success:
{
return true; // done
}
case hm_cache_msg:
{
m_cache.push_back(std::move(ptr));
break;
}
case hm_skip_msg:
default:
{
exit(7); // illegal state
}
}
return false;
}
private:
enum handle_message_result
{
hm_timeout_msg,
hm_skip_msg,
hm_drop_msg,
hm_cache_msg,
hm_success
};
any_tuple& m_last_dequeued;
actor_ptr& m_last_sender;
FilterPolicy m_filter_policy;
std::list<queue_node_ptr> m_cache;
template<typename... Args>
handle_message_result handle_message(recursive_queue_node& node,
partial_function& fun,
Args... args)
{
if (node.marked)
{
return hm_skip_msg;
}
if (m_filter_policy(node.msg, args...))
{
return hm_drop_msg;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
{
typename recursive_queue_node::guard qguard{&node};
if (fun(m_last_dequeued))
{
// client calls erase(iter)
qguard.release();
m_last_dequeued.reset();
m_last_sender.reset();
return hm_success;
}
}
// no match (restore members)
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return hm_cache_msg;
}
};
} }
#endif // NESTABLE_INVOKE_POLICY_HPP
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
struct scheduled_actor_dummy : abstract_scheduled_actor<> struct scheduled_actor_dummy : abstract_scheduled_actor
{ {
void resume(util::fiber*, scheduler::callback*); void resume(util::fiber*, scheduler::callback*);
void quit(std::uint32_t); void quit(std::uint32_t);
......
...@@ -41,11 +41,12 @@ ...@@ -41,11 +41,12 @@
#include "cppa/pattern.hpp" #include "cppa/pattern.hpp"
#include "cppa/detail/yield_interface.hpp" #include "cppa/detail/yield_interface.hpp"
#include "cppa/detail/nestable_invoke_policy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp" #include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
class yielding_actor : public abstract_scheduled_actor<> class yielding_actor : public abstract_scheduled_actor
{ {
typedef abstract_scheduled_actor super; typedef abstract_scheduled_actor super;
...@@ -55,44 +56,70 @@ class yielding_actor : public abstract_scheduled_actor<> ...@@ -55,44 +56,70 @@ class yielding_actor : public abstract_scheduled_actor<>
static void run(void* _this); static void run(void* _this);
void exec_loop_stack();
void yield_until_not_empty(); void yield_until_not_empty();
public: struct filter_policy;
yielding_actor(std::function<void()> fun); friend struct filter_policy;
void dequeue(behavior& bhvr); //override struct filter_policy
{
void dequeue(partial_function& fun); //override yielding_actor* m_parent;
void resume(util::fiber* from, scheduler::callback* callback); //override inline filter_policy(yielding_actor* parent) : m_parent(parent) { }
private: inline bool operator()(any_tuple const& msg)
{
return m_parent->filter_msg(msg) != ordinary_message;
}
template<typename Fun> inline bool operator()(any_tuple const& msg,
void dequeue_impl(Fun rm_fun) behavior* bhvr,
bool* timeout_occured)
{
switch (m_parent->filter_msg(msg))
{
case normal_exit_signal:
{ {
auto& mbox_cache = m_mailbox.cache(); return m_parent->m_trap_exit == false;
auto mbox_end = mbox_cache.end(); }
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun); case timeout_message:
while (iter == mbox_end)
{ {
yield_until_not_empty(); bhvr->handle_timeout();
iter = std::find_if(m_mailbox.try_fetch_more(), mbox_end, rm_fun); *timeout_occured = true;
return true;
} }
mbox_cache.erase(iter); case expired_timeout_message:
{
return true;
} }
case ordinary_message:
enum dq_result
{ {
dq_done, return false;
dq_indeterminate, }
dq_timeout_occured default: exit(7); // illegal state
}
return false;
}
}; };
auto dq(mailbox_element& node, partial_function& rules) -> dq_result; public:
yielding_actor(std::function<void()> fun);
void dequeue(behavior& bhvr); //override
void dequeue(partial_function& fun); //override
void resume(util::fiber* from, scheduler::callback* callback); //override
private:
typedef std::unique_ptr<recursive_queue_node> queue_node_ptr;
nestable_invoke_policy<filter_policy> m_invoke;
}; };
......
...@@ -40,41 +40,12 @@ ...@@ -40,41 +40,12 @@
namespace cppa { namespace intrusive { namespace cppa { namespace intrusive {
template<typename List>
struct default_list_append
{
template<typename T>
typename List::iterator operator()(List& l, T* e)
{
CPPA_REQUIRE(e != nullptr);
// temporary list to convert LIFO to FIFO order
List tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
{
// next iteration element
T* next = e->next;
// insert e to private cache (convert to LIFO order)
tmp.emplace_front(e);
e = next;
}
CPPA_REQUIRE(tmp.empty() == false);
auto result = tmp.begin();
l.splice(l.end(), tmp);
return result;
}
};
/** /**
* @brief An intrusive, thread safe queue implementation. * @brief An intrusive, thread safe queue implementation.
* @note For implementation details see * @note For implementation details see
* http://libcppa.blogspot.com/2011/04/mailbox-part-1.html * http://libcppa.blogspot.com/2011/04/mailbox-part-1.html
*/ */
template<typename T, template<typename T>
class CacheType = std::list<std::unique_ptr<T> >,
class CacheAppend = default_list_append<std::list<std::unique_ptr<T> > > >
class single_reader_queue class single_reader_queue
{ {
...@@ -85,36 +56,30 @@ class single_reader_queue ...@@ -85,36 +56,30 @@ class single_reader_queue
typedef T value_type; typedef T value_type;
typedef value_type* pointer; typedef value_type* pointer;
typedef CacheType cache_type;
typedef typename cache_type::value_type cache_value_type;
typedef typename cache_type::iterator cache_iterator;
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
cache_value_type pop() pointer pop()
{ {
wait_for_data(); wait_for_data();
cache_value_type result; return take_head();
take_head(result);
return result;
} }
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
bool try_pop(cache_value_type& result) pointer try_pop()
{ {
return take_head(result); return take_head();
} }
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
template<typename TimePoint> template<typename TimePoint>
bool try_pop(cache_value_type& result, TimePoint const& abs_time) pointer try_pop(TimePoint const& abs_time)
{ {
return (timed_wait_for_data(abs_time)) ? take_head(result) : false; return (timed_wait_for_data(abs_time)) ? take_head() : nullptr;
} }
// returns true if the queue was empty // returns true if the queue was empty
...@@ -156,8 +121,6 @@ class single_reader_queue ...@@ -156,8 +121,6 @@ class single_reader_queue
} }
} }
inline cache_type& cache() { return m_cache; }
inline bool can_fetch_more() const inline bool can_fetch_more() const
{ {
return m_stack.load() != nullptr; return m_stack.load() != nullptr;
...@@ -168,7 +131,7 @@ class single_reader_queue ...@@ -168,7 +131,7 @@ class single_reader_queue
*/ */
inline bool empty() const inline bool empty() const
{ {
return m_cache.empty() && m_stack.load() == nullptr; return !m_head && m_stack.load() == nullptr;
} }
/** /**
...@@ -179,7 +142,7 @@ class single_reader_queue ...@@ -179,7 +142,7 @@ class single_reader_queue
return !empty(); return !empty();
} }
single_reader_queue() : m_stack(nullptr) single_reader_queue() : m_stack(nullptr), m_head(nullptr)
{ {
} }
...@@ -189,37 +152,13 @@ class single_reader_queue ...@@ -189,37 +152,13 @@ class single_reader_queue
(void) fetch_new_data(); (void) fetch_new_data();
} }
cache_iterator try_fetch_more()
{
cache_iterator result = m_cache.end();
fetch_new_data(&result);
return result;
}
template<typename TimePoint>
cache_iterator try_fetch_more(TimePoint const& abs_time)
{
cache_iterator result = m_cache.end();
if (timed_wait_for_data(abs_time)) fetch_new_data(&result);
return result;
}
cache_iterator fetch_more()
{
cache_iterator result = m_cache.end();
wait_for_data();
fetch_new_data(&result);
return result;
}
private: private:
// exposed to "outside" access // exposed to "outside" access
std::atomic<pointer> m_stack; std::atomic<pointer> m_stack;
// accessed only by the owner // accessed only by the owner
cache_type m_cache; pointer m_head;
CacheAppend m_append;
// locked on enqueue/dequeue operations to/from an empty list // locked on enqueue/dequeue operations to/from an empty list
detail::mutex m_mtx; detail::mutex m_mtx;
...@@ -228,7 +167,7 @@ class single_reader_queue ...@@ -228,7 +167,7 @@ class single_reader_queue
template<typename TimePoint> template<typename TimePoint>
bool timed_wait_for_data(TimePoint const& timeout) bool timed_wait_for_data(TimePoint const& timeout)
{ {
if (m_cache.empty() && !(m_stack.load())) if (empty())
{ {
lock_type guard(m_mtx); lock_type guard(m_mtx);
while (!(m_stack.load())) while (!(m_stack.load()))
...@@ -244,7 +183,7 @@ class single_reader_queue ...@@ -244,7 +183,7 @@ class single_reader_queue
void wait_for_data() void wait_for_data()
{ {
if (m_cache.empty() && !(m_stack.load())) if (empty())
{ {
lock_type guard(m_mtx); lock_type guard(m_mtx);
while (!(m_stack.load())) m_cv.wait(guard); while (!(m_stack.load())) m_cv.wait(guard);
...@@ -252,16 +191,21 @@ class single_reader_queue ...@@ -252,16 +191,21 @@ class single_reader_queue
} }
// atomically sets m_stack to nullptr and enqueues all elements to the cache // atomically sets m_stack to nullptr and enqueues all elements to the cache
bool fetch_new_data(cache_iterator* iter = nullptr) bool fetch_new_data()
{ {
CPPA_REQUIRE(m_head == nullptr);
pointer e = m_stack.load(); pointer e = m_stack.load();
while (e) while (e)
{ {
if (m_stack.compare_exchange_weak(e, 0)) if (m_stack.compare_exchange_weak(e, 0))
{ {
auto i = m_append(m_cache, e); while (e)
if (iter) *iter = i; {
return true; auto next = e->next;
e->next = m_head;
m_head = e;
e = next;
}
} }
// next iteration // next iteration
} }
...@@ -269,15 +213,15 @@ class single_reader_queue ...@@ -269,15 +213,15 @@ class single_reader_queue
return false; return false;
} }
bool take_head(cache_value_type& result) pointer take_head()
{ {
if (!m_cache.empty() || fetch_new_data()) if (m_head != nullptr || fetch_new_data())
{ {
result = std::move(m_cache.front()); auto result = m_head;
m_cache.pop_front(); m_head = m_head->next;
return true; return result;
} }
return false; return nullptr;
} }
}; };
......
...@@ -53,17 +53,15 @@ void abstract_event_based_actor::dequeue(partial_function&) ...@@ -53,17 +53,15 @@ void abstract_event_based_actor::dequeue(partial_function&)
quit(exit_reason::unallowed_function_call); quit(exit_reason::unallowed_function_call);
} }
bool abstract_event_based_actor::handle_message(mailbox_element& node) auto abstract_event_based_actor::handle_message(mailbox_element& node) -> handle_message_result
{ {
CPPA_REQUIRE(m_loop_stack.empty() == false); CPPA_REQUIRE(m_loop_stack.empty() == false);
if (node.marked) return false;
auto& bhvr = *(m_loop_stack.back()); auto& bhvr = *(m_loop_stack.back());
switch (filter_msg(node.msg)) switch (filter_msg(node.msg))
{ {
case normal_exit_signal: case normal_exit_signal:
case expired_timeout_message: case expired_timeout_message:
node.marked = true; return drop_msg;
return false;
case timeout_message: case timeout_message:
m_has_pending_timeout_request = false; m_has_pending_timeout_request = false;
...@@ -74,7 +72,7 @@ bool abstract_event_based_actor::handle_message(mailbox_element& node) ...@@ -74,7 +72,7 @@ bool abstract_event_based_actor::handle_message(mailbox_element& node)
auto& next_bhvr = *(m_loop_stack.back()); auto& next_bhvr = *(m_loop_stack.back());
request_timeout(next_bhvr.timeout()); request_timeout(next_bhvr.timeout());
} }
return true; return msg_handled;
default: default:
break; break;
...@@ -87,47 +85,29 @@ bool abstract_event_based_actor::handle_message(mailbox_element& node) ...@@ -87,47 +85,29 @@ bool abstract_event_based_actor::handle_message(mailbox_element& node)
++m_active_timeout_id; ++m_active_timeout_id;
if ((bhvr.get_partial_function())(m_last_dequeued)) if ((bhvr.get_partial_function())(m_last_dequeued))
{ {
node.marked = true;
m_last_dequeued.reset(); m_last_dequeued.reset();
m_last_sender.reset(); m_last_sender.reset();
// we definitely don't have a pending timeout now // we definitely don't have a pending timeout now
m_has_pending_timeout_request = false; m_has_pending_timeout_request = false;
return true; return msg_handled;
} }
// no match, restore members // no match, restore members
--m_active_timeout_id; --m_active_timeout_id;
std::swap(m_last_dequeued, node.msg); std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender); std::swap(m_last_sender, node.sender);
return false; return cache_msg;
} }
void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb) void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb)
{ {
self.set(this); self.set(this);
auto& mbox_cache = m_mailbox.cache();
auto pos = mbox_cache.end();
try try
{ {
for (;;) for (;;)
{ {
if (m_loop_stack.empty()) std::unique_ptr<detail::recursive_queue_node> e{m_mailbox.try_pop()};
if (!e)
{ {
cleanup(exit_reason::normal);
m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear();
on_exit();
cb->exec_done();
return;
}
while (pos == mbox_cache.end())
{
// try fetch more
if (m_mailbox.can_fetch_more() == false)
{
// sweep marked elements
auto new_end = std::remove_if(mbox_cache.begin(), mbox_cache.end(),
[](detail::recursive_queue_node const& n) { return n.marked; });
mbox_cache.resize(std::distance(mbox_cache.begin(), new_end));
m_state.store(abstract_scheduled_actor::about_to_block); m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER(); CPPA_MEMORY_BARRIER();
if (m_mailbox.can_fetch_more() == false) if (m_mailbox.can_fetch_more() == false)
...@@ -137,8 +117,6 @@ void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb) ...@@ -137,8 +117,6 @@ void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb)
{ {
case abstract_scheduled_actor::ready: case abstract_scheduled_actor::ready:
{ {
// someone preempt us, set position to new end()
pos = mbox_cache.end();
break; break;
} }
case abstract_scheduled_actor::blocked: case abstract_scheduled_actor::blocked:
...@@ -149,14 +127,49 @@ void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb) ...@@ -149,14 +127,49 @@ void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb)
}; };
} }
} }
pos = m_mailbox.try_fetch_more(); else
{
switch (handle_message(*e))
{
case drop_msg:
{
break; // nop
}
case msg_handled:
{
// try to match cached messages
auto i = m_cache.begin();
while (i != m_cache.end() && !m_loop_stack.empty())
{
switch (handle_message(*(*i)))
{
case drop_msg:
{
i = m_cache.erase(i);
break;
}
case msg_handled:
{
m_cache.erase(i);
i = m_cache.begin();
break;
}
case cache_msg:
{
++i;
break;
}
default: exit(7); // illegal state
}
}
} }
pos = std::find_if(pos, mbox_cache.end(), case cache_msg:
[&](mailbox_element& e) { return handle_message(e); });
if (pos != mbox_cache.end())
{ {
// handled a message, scan mailbox from start again m_cache.push_back(std::move(e));
pos = mbox_cache.begin(); break;
}
default: exit(7); // illegal state
}
} }
} }
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <memory> #include <memory>
#include <iostream>
#include <algorithm> #include <algorithm>
#include "cppa/self.hpp" #include "cppa/self.hpp"
...@@ -40,7 +41,7 @@ ...@@ -40,7 +41,7 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
converted_thread_context::converted_thread_context() converted_thread_context::converted_thread_context()
: m_exit_msg_pattern(atom(":Exit")) : m_exit_msg_pattern(atom(":Exit")), m_invoke(this, this)
{ {
} }
...@@ -68,93 +69,38 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg) ...@@ -68,93 +69,38 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
m_mailbox.push_back(fetch_node(sender, msg)); m_mailbox.push_back(fetch_node(sender, msg));
} }
void converted_thread_context::dequeue(partial_function& rules) /*override*/ void converted_thread_context::dequeue(partial_function& fun) // override
{ {
auto rm_fun = [&](mailbox_cache_element& node) { return dq(*node, rules); }; if (m_invoke.invoke_from_cache(fun) == false)
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end)
{ {
iter = std::find_if(m_mailbox.fetch_more(), mbox_end, rm_fun); queue_node_ptr e{m_mailbox.pop()};
} while (m_invoke.invoke(e, fun) == false)
mbox_cache.erase(iter);
}
void converted_thread_context::dequeue(behavior& rules) /*override*/
{
if (rules.timeout().valid())
{
auto timeout = now();
timeout += rules.timeout();
auto rm_fun = [&](mailbox_cache_element& node)
{ {
return dq(*node, rules.get_partial_function()); e.reset(m_mailbox.pop());
};
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end)
{
auto next = m_mailbox.try_fetch_more(timeout);
if (next == mbox_end)
{
rules.handle_timeout();
return;
} }
iter = std::find_if(next, mbox_end, rm_fun);
}
mbox_cache.erase(iter);
}
else
{
converted_thread_context::dequeue(rules.get_partial_function());
} }
} }
converted_thread_context::throw_on_exit_result void converted_thread_context::dequeue(behavior& bhvr) // override
converted_thread_context::throw_on_exit(any_tuple const& msg)
{ {
if (matches(msg, m_exit_msg_pattern)) auto& fun = bhvr.get_partial_function();
if (bhvr.timeout().valid() == false)
{ {
auto reason = msg.get_as<std::uint32_t>(1); dequeue(fun);
if (reason != exit_reason::normal) return;
{
// throws
quit(reason);
}
else
{
return normal_exit_signal;
}
}
return not_an_exit_signal;
}
bool converted_thread_context::dq(mailbox_element& node, partial_function& rules)
{
if ( m_trap_exit == false
&& throw_on_exit(node.msg) == normal_exit_signal)
{
return false;
} }
std::swap(m_last_dequeued, node.msg); if (m_invoke.invoke_from_cache(fun) == false)
std::swap(m_last_sender, node.sender);
{ {
mailbox_element::guard qguard{&node}; auto timeout = now();
if (rules(m_last_dequeued)) timeout += bhvr.timeout();
queue_node_ptr e{m_mailbox.try_pop(timeout)};
while (e)
{ {
// client calls erase(iter) if (m_invoke.invoke(e, fun)) return;
qguard.release(); else e.reset(m_mailbox.try_pop(timeout));
m_last_dequeued.reset();
m_last_sender.reset();
return true;
} }
bhvr.handle_timeout();
} }
// no match (restore members)
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return false;
} }
} } // namespace cppa::detail } } // namespace cppa::detail
...@@ -103,7 +103,7 @@ void mailman_loop() ...@@ -103,7 +103,7 @@ void mailman_loop()
std::map<process_information, native_socket_type> peers; std::map<process_information, native_socket_type> peers;
for (;;) for (;;)
{ {
job = mqueue.pop(); job.reset(mqueue.pop());
if (job->is_send_job()) if (job->is_send_job())
{ {
mailman_send_job& sjob = job->send_job(); mailman_send_job& sjob = job->send_job();
......
...@@ -100,7 +100,7 @@ struct scheduler_helper ...@@ -100,7 +100,7 @@ struct scheduler_helper
void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{ {
typedef abstract_actor<local_actor> impl_type; typedef abstract_actor<local_actor> impl_type;
typedef impl_type::mailbox_type::cache_value_type queue_node_ptr; typedef std::unique_ptr<detail::recursive_queue_node> queue_node_ptr;
// setup & local variables // setup & local variables
self.set(m_self.get()); self.set(m_self.get());
auto& queue = m_self->mailbox(); auto& queue = m_self->mailbox();
...@@ -142,7 +142,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -142,7 +142,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{ {
if (messages.empty()) if (messages.empty())
{ {
msg_ptr = queue.pop(); msg_ptr.reset(queue.pop());
} }
else else
{ {
...@@ -167,8 +167,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -167,8 +167,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
// wait for next message or next timeout // wait for next message or next timeout
if (it != messages.end()) if (it != messages.end())
{ {
msg_ptr.reset(); msg_ptr.reset(queue.try_pop(it->first));
queue.try_pop(msg_ptr, it->first);
} }
} }
} }
......
...@@ -42,6 +42,7 @@ namespace cppa { namespace detail { ...@@ -42,6 +42,7 @@ namespace cppa { namespace detail {
yielding_actor::yielding_actor(std::function<void()> fun) yielding_actor::yielding_actor(std::function<void()> fun)
: m_fiber(&yielding_actor::run, this) : m_fiber(&yielding_actor::run, this)
, m_behavior(fun) , m_behavior(fun)
, m_invoke(this, this)
{ {
} }
...@@ -88,37 +89,46 @@ void yielding_actor::yield_until_not_empty() ...@@ -88,37 +89,46 @@ void yielding_actor::yield_until_not_empty()
void yielding_actor::dequeue(partial_function& fun) void yielding_actor::dequeue(partial_function& fun)
{ {
auto rm_fun = [&](mailbox_cache_element& node) if (m_invoke.invoke_from_cache(fun) == false)
{ {
return dq(*node, fun) == dq_done; for (;;)
}; {
dequeue_impl(rm_fun); queue_node_ptr e{m_mailbox.try_pop()};
while (!e)
{
yield_until_not_empty();
e.reset(m_mailbox.try_pop());
}
if (m_invoke.invoke(e, fun)) return;
}
}
} }
void yielding_actor::dequeue(behavior& bhvr) void yielding_actor::dequeue(behavior& bhvr)
{ {
if (bhvr.timeout().valid()) auto& fun = bhvr.get_partial_function();
if (bhvr.timeout().valid() == false)
{ {
request_timeout(bhvr.timeout()); dequeue(bhvr.get_partial_function());
auto rm_fun = [&](mailbox_cache_element& node) -> bool return;
}
if (m_invoke.invoke_from_cache(fun) == false)
{ {
switch (dq(*node, bhvr.get_partial_function())) bool timeout_occured = false;
for (;;)
{ {
case dq_timeout_occured: queue_node_ptr e{m_mailbox.try_pop()};
bhvr.handle_timeout(); while (!e)
return true; {
case dq_done: yield_until_not_empty();
return true; e.reset(m_mailbox.try_pop());
default:
return false;
}
};
dequeue_impl(rm_fun);
} }
else if ( m_invoke.invoke(e, fun, &bhvr, &timeout_occured)
|| timeout_occured)
{ {
// suppress virtual function call return;
yielding_actor::dequeue(bhvr.get_partial_function()); }
}
} }
} }
...@@ -169,57 +179,6 @@ void yielding_actor::resume(util::fiber* from, scheduler::callback* callback) ...@@ -169,57 +179,6 @@ void yielding_actor::resume(util::fiber* from, scheduler::callback* callback)
} }
} }
auto yielding_actor::dq(mailbox_element& node,
partial_function& fun) -> dq_result
{
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg))
{
case normal_exit_signal:
case expired_timeout_message:
{
// skip message
return dq_indeterminate;
}
case timeout_message:
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
return dq_timeout_occured;
}
default: break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard
{
// make sure nested receives do not process this node again
mailbox_element::guard qguard{&node};
// try to invoke given function
if (fun(m_last_dequeued))
{
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return dq_indeterminate;
}
} } // namespace cppa::detail } } // namespace cppa::detail
#else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING #else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment