Commit 08de1c38 authored by neverlord's avatar neverlord

refactoring of scheduled_actor and single_reader_queue

parent e50faf50
...@@ -4,7 +4,6 @@ lib_LTLIBRARIES = libcppa.la ...@@ -4,7 +4,6 @@ lib_LTLIBRARIES = libcppa.la
libcppa_la_SOURCES = \ libcppa_la_SOURCES = \
src/abstract_event_based_actor.cpp \ src/abstract_event_based_actor.cpp \
src/abstract_scheduled_actor.cpp \
src/abstract_tuple.cpp \ src/abstract_tuple.cpp \
src/actor.cpp \ src/actor.cpp \
src/actor_count.cpp \ src/actor_count.cpp \
...@@ -46,6 +45,7 @@ libcppa_la_SOURCES = \ ...@@ -46,6 +45,7 @@ libcppa_la_SOURCES = \
src/receive.cpp \ src/receive.cpp \
src/ripemd_160.cpp \ src/ripemd_160.cpp \
src/scheduled_actor.cpp \ src/scheduled_actor.cpp \
src/scheduled_actor_dummy.cpp \
src/scheduler.cpp \ src/scheduler.cpp \
src/self.cpp \ src/self.cpp \
src/serializer.cpp \ src/serializer.cpp \
...@@ -123,7 +123,9 @@ nobase_library_include_HEADERS = \ ...@@ -123,7 +123,9 @@ nobase_library_include_HEADERS = \
cppa/detail/pseudo_tuple.hpp \ cppa/detail/pseudo_tuple.hpp \
cppa/detail/ptype_to_type.hpp \ cppa/detail/ptype_to_type.hpp \
cppa/detail/receive_loop_helper.hpp \ cppa/detail/receive_loop_helper.hpp \
cppa/detail/recursive_queue_node.hpp \
cppa/detail/ref_counted_impl.hpp \ cppa/detail/ref_counted_impl.hpp \
cppa/detail/scheduled_actor_dummy.hpp \
cppa/detail/serialize_tuple.hpp \ cppa/detail/serialize_tuple.hpp \
cppa/detail/singleton_manager.hpp \ cppa/detail/singleton_manager.hpp \
cppa/detail/swap_bytes.hpp \ cppa/detail/swap_bytes.hpp \
......
...@@ -149,7 +149,7 @@ src/fiber.cpp ...@@ -149,7 +149,7 @@ src/fiber.cpp
cppa/detail/yield_interface.hpp cppa/detail/yield_interface.hpp
src/yield_interface.cpp src/yield_interface.cpp
cppa/detail/abstract_scheduled_actor.hpp cppa/detail/abstract_scheduled_actor.hpp
src/abstract_scheduled_actor.cpp src/scheduled_actor_dummy.cpp
src/invokable.cpp src/invokable.cpp
cppa/detail/thread_pool_scheduler.hpp cppa/detail/thread_pool_scheduler.hpp
src/thread_pool_scheduler.cpp src/thread_pool_scheduler.cpp
...@@ -265,3 +265,5 @@ cppa/detail/value_guard.hpp ...@@ -265,3 +265,5 @@ cppa/detail/value_guard.hpp
cppa/detail/tuple_iterator.hpp cppa/detail/tuple_iterator.hpp
cppa/match_expr.hpp cppa/match_expr.hpp
cppa/detail/pseudo_tuple.hpp cppa/detail/pseudo_tuple.hpp
cppa/detail/recursive_queue_node.hpp
cppa/detail/scheduled_actor_dummy.hpp
...@@ -45,6 +45,9 @@ ...@@ -45,6 +45,9 @@
#include "cppa/attachable.hpp" #include "cppa/attachable.hpp"
#include "cppa/exit_reason.hpp" #include "cppa/exit_reason.hpp"
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { namespace cppa {
...@@ -53,7 +56,8 @@ namespace cppa { ...@@ -53,7 +56,8 @@ namespace cppa {
* @tparam Base Either {@link cppa::actor actor} * @tparam Base Either {@link cppa::actor actor}
* or {@link cppa::local_actor local_actor}. * or {@link cppa::local_actor local_actor}.
*/ */
template<class Base> template<class Base,
class MailboxType = intrusive::single_reader_queue<detail::recursive_queue_node> >
class abstract_actor : public Base class abstract_actor : public Base
{ {
...@@ -62,33 +66,12 @@ class abstract_actor : public Base ...@@ -62,33 +66,12 @@ class abstract_actor : public Base
public: public:
struct queue_node typedef MailboxType mailbox_type;
{ typedef typename mailbox_type::value_type mailbox_element;
queue_node* next; // intrusive next pointer typedef typename mailbox_type::cache_type mailbox_cache_type;
bool marked; // denotes if this node is currently processed typedef typename mailbox_cache_type::value_type mailbox_cache_element;
actor_ptr sender;
any_tuple msg;
queue_node() : next(nullptr), marked(false) { }
queue_node(actor* from, any_tuple content)
: next(nullptr), marked(false), sender(from), msg(std::move(content))
{
}
};
struct queue_node_guard
{
queue_node* m_node;
queue_node_guard(queue_node* ptr) : m_node(ptr) { ptr->marked = true; }
inline void release() { m_node = nullptr; }
~queue_node_guard() { if (m_node) m_node->marked = false; }
};
typedef intrusive::single_reader_queue<queue_node> mailbox_type;
typedef std::unique_ptr<queue_node> queue_node_ptr;
typedef typename mailbox_type::cache_type mailbox_cache_type;
typedef typename mailbox_cache_type::iterator queue_node_iterator;
bool attach(attachable* ptr) /*override*/ bool attach(attachable* ptr) // override
{ {
if (ptr == nullptr) if (ptr == nullptr)
{ {
...@@ -114,7 +97,7 @@ class abstract_actor : public Base ...@@ -114,7 +97,7 @@ class abstract_actor : public Base
} }
} }
void detach(attachable::token const& what) /*override*/ void detach(attachable::token const& what) // override
{ {
attachable_ptr uptr; attachable_ptr uptr;
// lifetime scope of guard // lifetime scope of guard
...@@ -133,17 +116,17 @@ class abstract_actor : public Base ...@@ -133,17 +116,17 @@ class abstract_actor : public Base
// uptr will be destroyed here, without locked mutex // uptr will be destroyed here, without locked mutex
} }
void link_to(intrusive_ptr<actor>& other) /*override*/ void link_to(intrusive_ptr<actor>& other) // override
{ {
(void) link_to_impl(other); (void) link_to_impl(other);
} }
void unlink_from(intrusive_ptr<actor>& other) /*override*/ void unlink_from(intrusive_ptr<actor>& other) // override
{ {
(void) unlink_from_impl(other); (void) unlink_from_impl(other);
} }
bool remove_backlink(intrusive_ptr<actor>& other) /*override*/ bool remove_backlink(intrusive_ptr<actor>& other) // override
{ {
if (other && other != this) if (other && other != this)
{ {
...@@ -158,7 +141,7 @@ class abstract_actor : public Base ...@@ -158,7 +141,7 @@ class abstract_actor : public Base
return false; return false;
} }
bool establish_backlink(intrusive_ptr<actor>& other) /*override*/ bool establish_backlink(intrusive_ptr<actor>& other) // override
{ {
std::uint32_t reason = exit_reason::not_exited; std::uint32_t reason = exit_reason::not_exited;
if (other && other != this) if (other && other != this)
...@@ -188,9 +171,9 @@ class abstract_actor : public Base ...@@ -188,9 +171,9 @@ class abstract_actor : public Base
mailbox_type m_mailbox; mailbox_type m_mailbox;
template<typename T> template<typename T>
inline queue_node* fetch_node(actor* sender, T&& msg) static inline mailbox_element* fetch_node(actor* sender, T&& msg)
{ {
return new queue_node(sender, std::forward<T>(msg)); return new mailbox_element(sender, std::forward<T>(msg));
} }
template<typename... Args> template<typename... Args>
......
...@@ -44,14 +44,49 @@ ...@@ -44,14 +44,49 @@
namespace cppa { namespace cppa {
struct vec_append
{
inline std::vector<detail::recursive_queue_node>::iterator
operator()(std::vector<detail::recursive_queue_node>& result,
detail::recursive_queue_node* e) const
{
std::vector<std::unique_ptr<detail::recursive_queue_node> > tmp;
while (e)
{
auto next = e->next;
tmp.emplace_back(e);
e = next;
}
auto old_size = result.size();
for (auto i = tmp.rbegin(); i != tmp.rend(); ++i)
{
result.emplace_back(std::move(*(*i)));
}
return result.begin() + old_size;
}
};
/** /**
* @brief Base class for all event-based actor implementations. * @brief Base class for all event-based actor implementations.
*/ */
class abstract_event_based_actor : public detail::abstract_scheduled_actor class abstract_event_based_actor
: public detail::abstract_scheduled_actor<
intrusive::single_reader_queue<
detail::recursive_queue_node,
std::vector<detail::recursive_queue_node>,
vec_append
>
>
{ {
typedef detail::abstract_scheduled_actor super; typedef detail::abstract_scheduled_actor<
typedef super::queue_node queue_node; intrusive::single_reader_queue<
detail::recursive_queue_node,
std::vector<detail::recursive_queue_node>,
vec_append
>
>
super;
public: public:
...@@ -59,7 +94,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor ...@@ -59,7 +94,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
void dequeue(partial_function&); //override void dequeue(partial_function&); //override
void resume(util::fiber*, resume_callback* callback); //override void resume(util::fiber*, scheduler::callback* cb); //override
/** /**
* @brief Initializes the actor by defining an initial behavior. * @brief Initializes the actor by defining an initial behavior.
...@@ -71,14 +106,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor ...@@ -71,14 +106,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
*/ */
virtual void on_exit(); virtual void on_exit();
inline abstract_event_based_actor* attach_to_scheduler(scheduler* sched)
{
CPPA_REQUIRE(sched != nullptr);
m_scheduler = sched;
init();
return this;
}
protected: protected:
abstract_event_based_actor(); abstract_event_based_actor();
...@@ -89,9 +116,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor ...@@ -89,9 +116,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
std::vector<stack_element> m_loop_stack; std::vector<stack_element> m_loop_stack;
// current position in mailbox
mailbox_cache_type::iterator m_mailbox_pos;
// provoke compiler errors for usage of receive() and related functions // provoke compiler errors for usage of receive() and related functions
/** /**
...@@ -135,8 +159,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor ...@@ -135,8 +159,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
private: private:
bool handle_message(queue_node& iter); bool handle_message(mailbox_element& iter);
bool invoke_from_cache();
}; };
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <tuple> #include <tuple>
#include <cstdint> #include <cstdint>
#include <functional>
#include <type_traits> #include <type_traits>
#include "cppa/on.hpp" #include "cppa/on.hpp"
...@@ -474,7 +475,7 @@ void demonitor(actor_ptr& whom); ...@@ -474,7 +475,7 @@ void demonitor(actor_ptr& whom);
*/ */
inline actor_ptr spawn(scheduled_actor* what) inline actor_ptr spawn(scheduled_actor* what)
{ {
return get_scheduler()->spawn(what, scheduled); return get_scheduler()->spawn(what);
} }
/** /**
...@@ -484,9 +485,9 @@ inline actor_ptr spawn(scheduled_actor* what) ...@@ -484,9 +485,9 @@ inline actor_ptr spawn(scheduled_actor* what)
* @returns A pointer to the spawned {@link actor Actor}. * @returns A pointer to the spawned {@link actor Actor}.
*/ */
template<scheduling_hint Hint> template<scheduling_hint Hint>
inline actor_ptr spawn(scheduled_actor* what) inline actor_ptr spawn(std::function<void()> what)
{ {
return get_scheduler()->spawn(what, Hint); return get_scheduler()->spawn(std::move(what), Hint);
} }
/** /**
...@@ -494,19 +495,43 @@ inline actor_ptr spawn(scheduled_actor* what) ...@@ -494,19 +495,43 @@ inline actor_ptr spawn(scheduled_actor* what)
* @brief Spans a new event-based actor. * @brief Spans a new event-based actor.
* @returns A pointer to the spawned {@link actor Actor}. * @returns A pointer to the spawned {@link actor Actor}.
*/ */
inline actor_ptr spawn(abstract_event_based_actor* what) inline actor_ptr spawn(std::function<void()> what)
{ {
return get_scheduler()->spawn(what); return get_scheduler()->spawn(std::move(what), scheduled);
} }
/** template<typename T>
struct spawn_fwd_
{
static inline T&& _(T&& arg) { return std::move(arg); }
static inline T& _(T& arg) { return arg; }
static inline T const& _(T const& arg) { return arg; }
};
template<>
struct spawn_fwd_<self_type>
{
static inline actor_ptr _(self_type const&) { return self; }
};
template<typename F, typename Arg0, typename... Args>
inline actor_ptr spawn(F&& what, Arg0&& arg0, Args&&... args)
{
return spawn(std::bind(std::move(what),
spawn_fwd_<typename util::rm_ref<Arg0>::type>::_(arg0),
spawn_fwd_<typename util::rm_ref<Args>::type>::_(args)...));
}
/*
/ **
* @ingroup ActorManagement * @ingroup ActorManagement
* @brief Spawns a new actor that executes @p what with given arguments. * @brief Spawns a new actor that executes @p what with given arguments.
* @tparam Hint Hint to the scheduler for the best scheduling strategy. * @tparam Hint Hint to the scheduler for the best scheduling strategy.
* @param what Function or functor that the spawned Actor should execute. * @param what Function or functor that the spawned Actor should execute.
* @param args Arguments needed to invoke @p what. * @param args Arguments needed to invoke @p what.
* @returns A pointer to the spawned {@link actor actor}. * @returns A pointer to the spawned {@link actor actor}.
*/ * /
template<scheduling_hint Hint, typename F, typename... Args> template<scheduling_hint Hint, typename F, typename... Args>
auto //actor_ptr auto //actor_ptr
spawn(F&& what, Args const&... args) spawn(F&& what, Args const&... args)
...@@ -522,10 +547,10 @@ spawn(F&& what, Args const&... args) ...@@ -522,10 +547,10 @@ spawn(F&& what, Args const&... args)
return get_scheduler()->spawn(ptr, Hint); return get_scheduler()->spawn(ptr, Hint);
} }
/** / **
* @ingroup ActorManagement * @ingroup ActorManagement
* @brief Alias for <tt>spawn<scheduled>(what, args...)</tt>. * @brief Alias for <tt>spawn<scheduled>(what, args...)</tt>.
*/ * /
template<typename F, typename... Args> template<typename F, typename... Args>
auto // actor_ptr auto // actor_ptr
spawn(F&& what, Args const&... args) spawn(F&& what, Args const&... args)
...@@ -537,6 +562,7 @@ spawn(F&& what, Args const&... args) ...@@ -537,6 +562,7 @@ spawn(F&& what, Args const&... args)
{ {
return spawn<scheduled>(std::forward<F>(what), args...); return spawn<scheduled>(std::forward<F>(what), args...);
} }
*/
#ifdef CPPA_DOCUMENTATION #ifdef CPPA_DOCUMENTATION
......
...@@ -31,46 +31,31 @@ ...@@ -31,46 +31,31 @@
#ifndef SCHEDULED_ACTOR_HPP #ifndef SCHEDULED_ACTOR_HPP
#define SCHEDULED_ACTOR_HPP #define SCHEDULED_ACTOR_HPP
#include <atomic>
#include "cppa/any_tuple.hpp"
#include "cppa/scheduler.hpp" #include "cppa/scheduler.hpp"
#include "cppa/local_actor.hpp" #include "cppa/local_actor.hpp"
#include "cppa/abstract_actor.hpp" #include "cppa/abstract_actor.hpp"
#include "cppa/scheduled_actor.hpp" #include "cppa/scheduled_actor.hpp"
#include "cppa/util/fiber.hpp" #include "cppa/util/fiber.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/intrusive/singly_linked_list.hpp"
#include "cppa/intrusive/single_reader_queue.hpp" #include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { class scheduler; }
namespace cppa { namespace detail { namespace cppa { namespace detail {
// A spawned, scheduled Actor. // A spawned, scheduled Actor.
class abstract_scheduled_actor : public abstract_actor<local_actor> template<class MailboxType = intrusive::single_reader_queue<detail::recursive_queue_node> >
class abstract_scheduled_actor : public abstract_actor<scheduled_actor, MailboxType>
{ {
friend class intrusive::single_reader_queue<abstract_scheduled_actor>; typedef abstract_actor<scheduled_actor, MailboxType> super;
abstract_scheduled_actor* next; // intrusive next pointer
void enqueue_node(queue_node* node);
protected: protected:
std::atomic<int> m_state; std::atomic<int> m_state;
scheduler* m_scheduler;
typedef abstract_actor super;
typedef super::queue_node_guard queue_node_guard;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
enum dq_result
{
dq_done,
dq_indeterminate,
dq_timeout_occured
};
enum filter_result enum filter_result
{ {
...@@ -80,16 +65,48 @@ class abstract_scheduled_actor : public abstract_actor<local_actor> ...@@ -80,16 +65,48 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
ordinary_message ordinary_message
}; };
filter_result filter_msg(any_tuple const& msg); filter_result filter_msg(any_tuple const& msg)
{
auto dq(queue_node& node, partial_function& rules) -> dq_result; auto& arr = detail::static_types_array<atom_value, std::uint32_t>::arr;
if ( msg.size() == 2
&& msg.type_at(0) == arr[0]
&& msg.type_at(1) == arr[1])
{
auto v0 = *reinterpret_cast<const atom_value*>(msg.at(0));
auto v1 = *reinterpret_cast<const std::uint32_t*>(msg.at(1));
if (v0 == atom(":Exit"))
{
if (this->m_trap_exit == false)
{
if (v1 != exit_reason::normal)
{
quit(v1);
}
return normal_exit_signal;
}
}
else if (v0 == atom(":Timeout"))
{
return (v1 == m_active_timeout_id) ? timeout_message
: expired_timeout_message;
}
}
return ordinary_message;
}
bool has_pending_timeout() bool has_pending_timeout()
{ {
return m_has_pending_timeout_request; return m_has_pending_timeout_request;
} }
void request_timeout(util::duration const& d); void request_timeout(util::duration const& d)
{
if (d.valid())
{
get_scheduler()->future_send(this, d, atom(":Timeout"), ++m_active_timeout_id);
m_has_pending_timeout_request = true;
}
}
void reset_timeout() void reset_timeout()
{ {
...@@ -100,8 +117,6 @@ class abstract_scheduled_actor : public abstract_actor<local_actor> ...@@ -100,8 +117,6 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
} }
} }
private:
bool m_has_pending_timeout_request; bool m_has_pending_timeout_request;
std::uint32_t m_active_timeout_id; std::uint32_t m_active_timeout_id;
...@@ -112,42 +127,78 @@ class abstract_scheduled_actor : public abstract_actor<local_actor> ...@@ -112,42 +127,78 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
static constexpr int blocked = 0x02; static constexpr int blocked = 0x02;
static constexpr int about_to_block = 0x04; static constexpr int about_to_block = 0x04;
abstract_scheduled_actor(int state = done); abstract_scheduled_actor(int state = done)
: m_state(state)
abstract_scheduled_actor(scheduler* sched); , m_has_pending_timeout_request(false)
, m_active_timeout_id(0)
void quit(std::uint32_t reason); {
}
void enqueue(actor* sender, any_tuple&& msg); void quit(std::uint32_t reason)
{
this->cleanup(reason);
throw actor_exited(reason);
}
void enqueue(actor* sender, any_tuple const& msg); void enqueue(actor* sender, any_tuple&& msg)
{
enqueue_node(super::fetch_node(sender, std::move(msg)));
}
int compare_exchange_state(int expected, int new_value); void enqueue(actor* sender, any_tuple const& msg)
{
enqueue_node(super::fetch_node(sender, msg));
}
struct resume_callback int compare_exchange_state(int expected, int new_value)
{ {
virtual ~resume_callback(); int e = expected;
// called if an actor finished execution do
virtual void exec_done() = 0; {
}; if (m_state.compare_exchange_weak(e, new_value))
{
return new_value;
}
}
while (e == expected);
return e;
}
// from = calling worker private:
virtual void resume(util::fiber* from, resume_callback* callback) = 0;
}; void enqueue_node(typename super::mailbox_element* node)
{
if (this->m_mailbox._push_back(node))
{
for (;;)
{
int state = m_state.load();
switch (state)
{
case blocked:
{
if (m_state.compare_exchange_weak(state, ready))
{
CPPA_REQUIRE(this->m_scheduler != nullptr);
this->m_scheduler->enqueue(this);
return;
}
break;
}
case about_to_block:
{
if (m_state.compare_exchange_weak(state, ready))
{
return;
}
break;
}
default: return;
}
}
}
}
struct scheduled_actor_dummy : abstract_scheduled_actor
{
void resume(util::fiber*, resume_callback*);
void quit(std::uint32_t);
void dequeue(behavior&);
void dequeue(partial_function&);
void link_to(intrusive_ptr<actor>&);
void unlink_from(intrusive_ptr<actor>&);
bool establish_backlink(intrusive_ptr<actor>&);
bool remove_backlink(intrusive_ptr<actor>&);
void detach(attachable::token const&);
bool attach(attachable*);
}; };
} } // namespace cppa::detail } } // namespace cppa::detail
......
...@@ -59,8 +59,6 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -59,8 +59,6 @@ class converted_thread_context : public abstract_actor<local_actor>
{ {
typedef abstract_actor<local_actor> super; typedef abstract_actor<local_actor> super;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
public: public:
...@@ -86,7 +84,7 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -86,7 +84,7 @@ class converted_thread_context : public abstract_actor<local_actor>
private: private:
typedef intrusive::singly_linked_list<queue_node> queue_node_buffer; //typedef intrusive::singly_linked_list<queue_node> queue_node_buffer;
enum throw_on_exit_result enum throw_on_exit_result
{ {
...@@ -95,7 +93,7 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -95,7 +93,7 @@ class converted_thread_context : public abstract_actor<local_actor>
}; };
// returns true if node->msg was accepted by rules // returns true if node->msg was accepted by rules
bool dq(queue_node& node, partial_function& rules); bool dq(mailbox_element& node, partial_function& rules);
throw_on_exit_result throw_on_exit(any_tuple const& msg); throw_on_exit_result throw_on_exit(any_tuple const& msg);
......
...@@ -67,9 +67,6 @@ struct mailman_add_peer ...@@ -67,9 +67,6 @@ struct mailman_add_peer
class mailman_job class mailman_job
{ {
friend class intrusive::singly_linked_list<mailman_job>;
friend class intrusive::single_reader_queue<mailman_job>;
public: public:
enum job_type enum job_type
...@@ -123,9 +120,10 @@ class mailman_job ...@@ -123,9 +120,10 @@ class mailman_job
return m_type == kill_type; return m_type == kill_type;
} }
mailman_job* next;
private: private:
mailman_job* next;
job_type m_type; job_type m_type;
// unrestricted union // unrestricted union
union union
......
...@@ -40,13 +40,13 @@ class mock_scheduler : public scheduler ...@@ -40,13 +40,13 @@ class mock_scheduler : public scheduler
public: public:
actor_ptr spawn(abstract_event_based_actor* what); actor_ptr spawn(scheduled_actor* what);
actor_ptr spawn(scheduled_actor*, scheduling_hint); actor_ptr spawn(std::function<void()> what, scheduling_hint);
static actor_ptr spawn(scheduled_actor*); static actor_ptr spawn(std::function<void()> what);
void enqueue(detail::abstract_scheduled_actor*); void enqueue(scheduled_actor* what);
}; };
......
...@@ -43,9 +43,6 @@ namespace cppa { namespace detail { ...@@ -43,9 +43,6 @@ namespace cppa { namespace detail {
class post_office_msg class post_office_msg
{ {
friend class intrusive::singly_linked_list<post_office_msg>;
friend class intrusive::single_reader_queue<post_office_msg>;
public: public:
enum msg_type enum msg_type
...@@ -130,10 +127,10 @@ class post_office_msg ...@@ -130,10 +127,10 @@ class post_office_msg
~post_office_msg(); ~post_office_msg();
private:
post_office_msg* next; post_office_msg* next;
private:
msg_type m_type; msg_type m_type;
union union
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef RECURSIVE_QUEUE_NODE_HPP
#define RECURSIVE_QUEUE_NODE_HPP
#include "cppa/actor.hpp"
#include "cppa/any_tuple.hpp"
namespace cppa { namespace detail {
struct recursive_queue_node
{
recursive_queue_node* next; // intrusive next pointer
bool marked; // denotes if this node is currently processed
actor_ptr sender;
any_tuple msg;
inline recursive_queue_node()
: next(nullptr)
, marked(false)
{
}
inline recursive_queue_node(actor* from, any_tuple content)
: next(nullptr)
, marked(false)
, sender(from)
, msg(std::move(content))
{
}
inline recursive_queue_node(recursive_queue_node&& other)
: next(nullptr)
, marked(false)
, sender(std::move(other.sender))
, msg(std::move(other.msg))
{
}
struct guard
{
recursive_queue_node* m_node;
inline guard(recursive_queue_node* ptr) : m_node(ptr)
{
ptr->marked = true;
}
inline void release()
{
m_node = nullptr;
}
inline ~guard()
{
if (m_node) m_node->marked = false;
}
};
};
} } // namespace cppa::detail
#endif // RECURSIVE_QUEUE_NODE_HPP
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef SCHEDULED_ACTOR_DUMMY_HPP
#define SCHEDULED_ACTOR_DUMMY_HPP
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail {
struct scheduled_actor_dummy : abstract_scheduled_actor<>
{
void resume(util::fiber*, scheduler::callback*);
void quit(std::uint32_t);
void dequeue(behavior&);
void dequeue(partial_function&);
void link_to(intrusive_ptr<actor>&);
void unlink_from(intrusive_ptr<actor>&);
bool establish_backlink(intrusive_ptr<actor>&);
bool remove_backlink(intrusive_ptr<actor>&);
void detach(attachable::token const&);
bool attach(attachable*);
};
} } // namespace cppa::detail
#endif // SCHEDULED_ACTOR_DUMMY_HPP
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "cppa/scheduler.hpp" #include "cppa/scheduler.hpp"
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
#include "cppa/util/producer_consumer_list.hpp" #include "cppa/util/producer_consumer_list.hpp"
#include "cppa/detail/scheduled_actor_dummy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp" #include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
...@@ -51,26 +52,26 @@ class thread_pool_scheduler : public scheduler ...@@ -51,26 +52,26 @@ class thread_pool_scheduler : public scheduler
void stop() /*override*/; void stop() /*override*/;
void enqueue(abstract_scheduled_actor* what) /*override*/; void enqueue(scheduled_actor* what) /*override*/;
actor_ptr spawn(abstract_event_based_actor* what); actor_ptr spawn(scheduled_actor* what);
actor_ptr spawn(scheduled_actor* behavior, scheduling_hint hint); actor_ptr spawn(std::function<void()> what, scheduling_hint hint);
private: private:
//typedef util::single_reader_queue<abstract_scheduled_actor> job_queue; //typedef util::single_reader_queue<abstract_scheduled_actor> job_queue;
typedef util::producer_consumer_list<abstract_scheduled_actor> job_queue; typedef util::producer_consumer_list<scheduled_actor> job_queue;
job_queue m_queue; job_queue m_queue;
scheduled_actor_dummy m_dummy; scheduled_actor_dummy m_dummy;
thread m_supervisor; thread m_supervisor;
actor_ptr spawn_impl(abstract_scheduled_actor* what, actor_ptr spawn_impl(scheduled_actor* what,
bool push_to_queue = true); bool push_to_queue = true);
static void worker_loop(worker*); static void worker_loop(worker*);
static void supervisor_loop(job_queue*, abstract_scheduled_actor*); static void supervisor_loop(job_queue*, scheduled_actor*);
}; };
......
...@@ -45,15 +45,13 @@ ...@@ -45,15 +45,13 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
class yielding_actor : public abstract_scheduled_actor class yielding_actor : public abstract_scheduled_actor<>
{ {
typedef abstract_scheduled_actor super; typedef abstract_scheduled_actor super;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
util::fiber m_fiber; util::fiber m_fiber;
scheduled_actor* m_behavior; std::function<void()> m_behavior;
static void run(void* _this); static void run(void* _this);
...@@ -63,15 +61,13 @@ class yielding_actor : public abstract_scheduled_actor ...@@ -63,15 +61,13 @@ class yielding_actor : public abstract_scheduled_actor
public: public:
yielding_actor(scheduled_actor* behavior, scheduler* sched); yielding_actor(std::function<void()> fun);
~yielding_actor(); //override
void dequeue(behavior& bhvr); //override void dequeue(behavior& bhvr); //override
void dequeue(partial_function& fun); //override void dequeue(partial_function& fun); //override
void resume(util::fiber* from, resume_callback* callback); //override void resume(util::fiber* from, scheduler::callback* callback); //override
private: private:
...@@ -89,6 +85,15 @@ class yielding_actor : public abstract_scheduled_actor ...@@ -89,6 +85,15 @@ class yielding_actor : public abstract_scheduled_actor
mbox_cache.erase(iter); mbox_cache.erase(iter);
} }
enum dq_result
{
dq_done,
dq_indeterminate,
dq_timeout_occured
};
auto dq(mailbox_element& node, partial_function& rules) -> dq_result;
}; };
} } // namespace cppa::detail } } // namespace cppa::detail
......
...@@ -35,16 +35,46 @@ ...@@ -35,16 +35,46 @@
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include "cppa/config.hpp"
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
namespace cppa { namespace intrusive { namespace cppa { namespace intrusive {
template<typename List>
struct default_list_append
{
template<typename T>
typename List::iterator operator()(List& l, T* e)
{
CPPA_REQUIRE(e != nullptr);
// temporary list to convert LIFO to FIFO order
List tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
{
// next iteration element
T* next = e->next;
// insert e to private cache (convert to LIFO order)
tmp.emplace_front(e);
e = next;
}
CPPA_REQUIRE(tmp.empty() == false);
auto result = tmp.begin();
l.splice(l.end(), tmp);
return result;
}
};
/** /**
* @brief An intrusive, thread safe queue implementation. * @brief An intrusive, thread safe queue implementation.
* @note For implementation details see * @note For implementation details see
* http://libcppa.blogspot.com/2011/04/mailbox-part-1.html * http://libcppa.blogspot.com/2011/04/mailbox-part-1.html
*/ */
template<typename T> template<typename T,
class CacheType = std::list<std::unique_ptr<T> >,
class CacheAppend = default_list_append<std::list<std::unique_ptr<T> > > >
class single_reader_queue class single_reader_queue
{ {
...@@ -52,42 +82,39 @@ class single_reader_queue ...@@ -52,42 +82,39 @@ class single_reader_queue
public: public:
typedef T value_type; typedef T value_type;
typedef size_t size_type; typedef value_type* pointer;
typedef ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
typedef std::unique_ptr<value_type> unique_value_ptr; typedef CacheType cache_type;
typedef std::list<unique_value_ptr> cache_type; typedef typename cache_type::value_type cache_value_type;
typedef typename cache_type::iterator cache_iterator; typedef typename cache_type::iterator cache_iterator;
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
pointer pop() cache_value_type pop()
{ {
wait_for_data(); wait_for_data();
return take_head(); cache_value_type result;
take_head(result);
return result;
} }
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
pointer try_pop() bool try_pop(cache_value_type& result)
{ {
return take_head(); return take_head(result);
} }
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
template<typename TimePoint> template<typename TimePoint>
pointer try_pop(TimePoint const& abs_time) bool try_pop(cache_value_type& result, TimePoint const& abs_time)
{ {
return (timed_wait_for_data(abs_time)) ? take_head() : nullptr; return (timed_wait_for_data(abs_time)) ? take_head(result) : false;
} }
// returns true if the queue was empty // returns true if the queue was empty
...@@ -192,6 +219,7 @@ class single_reader_queue ...@@ -192,6 +219,7 @@ class single_reader_queue
// accessed only by the owner // accessed only by the owner
cache_type m_cache; cache_type m_cache;
CacheAppend m_append;
// locked on enqueue/dequeue operations to/from an empty list // locked on enqueue/dequeue operations to/from an empty list
detail::mutex m_mtx; detail::mutex m_mtx;
...@@ -231,22 +259,8 @@ class single_reader_queue ...@@ -231,22 +259,8 @@ class single_reader_queue
{ {
if (m_stack.compare_exchange_weak(e, 0)) if (m_stack.compare_exchange_weak(e, 0))
{ {
// temporary list to convert LIFO to FIFO order auto i = m_append(m_cache, e);
cache_type tmp; if (iter) *iter = i;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
{
// next iteration element
pointer next = e->next;
// insert e to private cache (convert to LIFO order)
tmp.push_front(unique_value_ptr{e});
//m_cache.insert(iter, unique_value_ptr{e});
// next iteration
e = next;
}
if (iter) *iter = tmp.begin();
m_cache.splice(m_cache.end(), tmp);
return true; return true;
} }
// next iteration // next iteration
...@@ -255,16 +269,15 @@ class single_reader_queue ...@@ -255,16 +269,15 @@ class single_reader_queue
return false; return false;
} }
pointer take_head() bool take_head(cache_value_type& result)
{ {
if (!m_cache.empty() || fetch_new_data()) if (!m_cache.empty() || fetch_new_data())
{ {
auto result = m_cache.front().release(); result = std::move(m_cache.front());
m_cache.pop_front(); m_cache.pop_front();
return result; return true;
//return m_cache.take_after(m_cache.before_begin());
} }
return nullptr; return false;
} }
}; };
......
...@@ -31,8 +31,14 @@ ...@@ -31,8 +31,14 @@
#ifndef ACTOR_BEHAVIOR_HPP #ifndef ACTOR_BEHAVIOR_HPP
#define ACTOR_BEHAVIOR_HPP #define ACTOR_BEHAVIOR_HPP
#include "cppa/config.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/local_actor.hpp"
namespace cppa { namespace cppa {
namespace util { class fiber; }
/** /**
* @brief A base class for context-switching or thread-mapped actor * @brief A base class for context-switching or thread-mapped actor
* implementations. * implementations.
...@@ -43,12 +49,14 @@ namespace cppa { ...@@ -43,12 +49,14 @@ namespace cppa {
* blocking functions, or need to have your own thread for other reasons, * blocking functions, or need to have your own thread for other reasons,
* this class can be used to define a class-based actor. * this class can be used to define a class-based actor.
*/ */
class scheduled_actor class scheduled_actor : public local_actor
{ {
public: public:
virtual ~scheduled_actor(); scheduled_actor();
scheduled_actor* next; // intrusive next pointer
/** /**
* @brief Can be overridden to perform cleanup code after an actor * @brief Can be overridden to perform cleanup code after an actor
...@@ -59,10 +67,19 @@ class scheduled_actor ...@@ -59,10 +67,19 @@ class scheduled_actor
virtual void on_exit(); virtual void on_exit();
/** /**
* @brief Implements the behavior of a context-switching or thread-mapped * @brief Can be overridden to initialize and actor before any
* actor. * message is handled.
*/ */
virtual void act() = 0; virtual void init();
// called from worker thread
virtual void resume(util::fiber* from, scheduler::callback* cb) = 0;
scheduled_actor* attach_to_scheduler(scheduler* sched);
protected:
scheduler* m_scheduler;
}; };
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <chrono> #include <chrono>
#include <memory> #include <memory>
#include <cstdint> #include <cstdint>
#include <functional>
#include "cppa/self.hpp" #include "cppa/self.hpp"
#include "cppa/atom.hpp" #include "cppa/atom.hpp"
...@@ -50,10 +51,6 @@ namespace cppa { ...@@ -50,10 +51,6 @@ namespace cppa {
class scheduled_actor; class scheduled_actor;
class scheduler_helper; class scheduler_helper;
class abstract_event_based_actor;
namespace detail { class abstract_scheduled_actor; }
/** /**
* @brief * @brief
*/ */
...@@ -70,6 +67,13 @@ class scheduler ...@@ -70,6 +67,13 @@ class scheduler
public: public:
struct callback
{
virtual ~callback();
// called if an actor finished execution during resume()
virtual void exec_done() = 0;
};
virtual ~scheduler(); virtual ~scheduler();
/** /**
...@@ -82,19 +86,19 @@ class scheduler ...@@ -82,19 +86,19 @@ class scheduler
*/ */
virtual void stop(); virtual void stop();
virtual void enqueue(detail::abstract_scheduled_actor*) = 0; virtual void enqueue(scheduled_actor*) = 0;
/** /**
* @brief Spawns a new actor that executes <code>behavior->act()</code> * @brief Spawns a new actor that executes <code>behavior->act()</code>
* with the scheduling policy @p hint if possible. * with the scheduling policy @p hint if possible.
*/ */
virtual actor_ptr spawn(scheduled_actor* behavior, virtual actor_ptr spawn(std::function<void()> behavior,
scheduling_hint hint) = 0; scheduling_hint hint) = 0;
/** /**
* @brief Spawns a new event-based actor. * @brief Spawns a new event-based actor.
*/ */
virtual actor_ptr spawn(abstract_event_based_actor* what) = 0; virtual actor_ptr spawn(scheduled_actor* what) = 0;
/** /**
* @brief Informs the scheduler about a converted context * @brief Informs the scheduler about a converted context
......
...@@ -38,8 +38,7 @@ ...@@ -38,8 +38,7 @@
namespace cppa { namespace cppa {
abstract_event_based_actor::abstract_event_based_actor() abstract_event_based_actor::abstract_event_based_actor()
: super(abstract_event_based_actor::blocked) : super(super::blocked)
, m_mailbox_pos(m_mailbox.cache().end())
{ {
//m_mailbox_pos = m_mailbox.cache().end(); //m_mailbox_pos = m_mailbox.cache().end();
} }
...@@ -54,58 +53,59 @@ void abstract_event_based_actor::dequeue(partial_function&) ...@@ -54,58 +53,59 @@ void abstract_event_based_actor::dequeue(partial_function&)
quit(exit_reason::unallowed_function_call); quit(exit_reason::unallowed_function_call);
} }
bool abstract_event_based_actor::handle_message(queue_node& node) bool abstract_event_based_actor::handle_message(mailbox_element& node)
{ {
CPPA_REQUIRE(m_loop_stack.empty() == false); CPPA_REQUIRE(m_loop_stack.empty() == false);
if (node.marked) return false;
auto& bhvr = *(m_loop_stack.back()); auto& bhvr = *(m_loop_stack.back());
if (bhvr.timeout().valid()) switch (filter_msg(node.msg))
{ {
switch (dq(node, bhvr.get_partial_function())) case normal_exit_signal:
{ case expired_timeout_message:
case dq_timeout_occured: node.marked = true;
{ return false;
bhvr.handle_timeout();
// fall through case timeout_message:
} m_has_pending_timeout_request = false;
case dq_done: CPPA_REQUIRE(bhvr.timeout().valid());
bhvr.handle_timeout();
if (!m_loop_stack.empty())
{ {
// callback might have called become()/unbecome() auto& next_bhvr = *(m_loop_stack.back());
// request next timeout if needed request_timeout(next_bhvr.timeout());
if (!m_loop_stack.empty())
{
auto& next_bhvr = *(m_loop_stack.back());
request_timeout(next_bhvr.timeout());
}
return true;
} }
default: return false; return true;
}
}
else
{
return dq(node, bhvr.get_partial_function()) == dq_done;
}
}
bool abstract_event_based_actor::invoke_from_cache() default:
{ break;
for (auto i = m_mailbox_pos; i != m_mailbox.cache().end(); ++i) }
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
if ((bhvr.get_partial_function())(m_last_dequeued))
{ {
auto& ptr = *i; node.marked = true;
CPPA_REQUIRE(ptr.get() != nullptr); m_last_dequeued.reset();
if (handle_message(*ptr)) m_last_sender.reset();
{ // we definitely don't have a pending timeout now
m_mailbox.cache().erase(i); m_has_pending_timeout_request = false;
return true; return true;
}
} }
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return false; return false;
} }
void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb)
{ {
self.set(this); self.set(this);
auto& mbox_cache = m_mailbox.cache(); auto& mbox_cache = m_mailbox.cache();
auto pos = mbox_cache.end();
try try
{ {
for (;;) for (;;)
...@@ -116,14 +116,18 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -116,14 +116,18 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
m_state.store(abstract_scheduled_actor::done); m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear(); m_loop_stack.clear();
on_exit(); on_exit();
callback->exec_done(); cb->exec_done();
return; return;
} }
while (m_mailbox_pos == mbox_cache.end()) while (pos == mbox_cache.end())
{ {
// try fetch more // try fetch more
if (m_mailbox.can_fetch_more() == false) if (m_mailbox.can_fetch_more() == false)
{ {
// sweep marked elements
auto new_end = std::remove_if(mbox_cache.begin(), mbox_cache.end(),
[](detail::recursive_queue_node const& n) { return n.marked; });
mbox_cache.resize(std::distance(mbox_cache.begin(), new_end));
m_state.store(abstract_scheduled_actor::about_to_block); m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER(); CPPA_MEMORY_BARRIER();
if (m_mailbox.can_fetch_more() == false) if (m_mailbox.can_fetch_more() == false)
...@@ -133,22 +137,27 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -133,22 +137,27 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{ {
case abstract_scheduled_actor::ready: case abstract_scheduled_actor::ready:
{ {
// someone preempt us // someone preempt us, set position to new end()
pos = mbox_cache.end();
break; break;
} }
case abstract_scheduled_actor::blocked: case abstract_scheduled_actor::blocked:
{ {
// done
return; return;
} }
default: exit(7); // illegal state default: exit(7); // illegal state
}; };
} }
} }
m_mailbox_pos = m_mailbox.try_fetch_more(); pos = m_mailbox.try_fetch_more();
}
pos = std::find_if(pos, mbox_cache.end(),
[&](mailbox_element& e) { return handle_message(e); });
if (pos != mbox_cache.end())
{
// handled a message, scan mailbox from start again
pos = mbox_cache.begin();
} }
m_mailbox_pos = (invoke_from_cache()) ? mbox_cache.begin()
: mbox_cache.end();
} }
} }
catch (actor_exited& what) catch (actor_exited& what)
...@@ -162,7 +171,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -162,7 +171,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
m_state.store(abstract_scheduled_actor::done); m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear(); m_loop_stack.clear();
on_exit(); on_exit();
callback->exec_done(); cb->exec_done();
} }
void abstract_event_based_actor::on_exit() void abstract_event_based_actor::on_exit()
......
...@@ -70,7 +70,7 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg) ...@@ -70,7 +70,7 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
void converted_thread_context::dequeue(partial_function& rules) /*override*/ void converted_thread_context::dequeue(partial_function& rules) /*override*/
{ {
auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, rules); }; auto rm_fun = [&](mailbox_cache_element& node) { return dq(*node, rules); };
auto& mbox_cache = m_mailbox.cache(); auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end(); auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun); auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
...@@ -87,7 +87,7 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/ ...@@ -87,7 +87,7 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/
{ {
auto timeout = now(); auto timeout = now();
timeout += rules.timeout(); timeout += rules.timeout();
auto rm_fun = [&](queue_node_ptr& node) auto rm_fun = [&](mailbox_cache_element& node)
{ {
return dq(*node, rules.get_partial_function()); return dq(*node, rules.get_partial_function());
}; };
...@@ -131,7 +131,7 @@ converted_thread_context::throw_on_exit(any_tuple const& msg) ...@@ -131,7 +131,7 @@ converted_thread_context::throw_on_exit(any_tuple const& msg)
return not_an_exit_signal; return not_an_exit_signal;
} }
bool converted_thread_context::dq(queue_node& node, partial_function& rules) bool converted_thread_context::dq(mailbox_element& node, partial_function& rules)
{ {
if ( m_trap_exit == false if ( m_trap_exit == false
&& throw_on_exit(node.msg) == normal_exit_signal) && throw_on_exit(node.msg) == normal_exit_signal)
...@@ -141,7 +141,7 @@ bool converted_thread_context::dq(queue_node& node, partial_function& rules) ...@@ -141,7 +141,7 @@ bool converted_thread_context::dq(queue_node& node, partial_function& rules)
std::swap(m_last_dequeued, node.msg); std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender); std::swap(m_last_sender, node.sender);
{ {
queue_node_guard qguard{&node}; mailbox_element::guard qguard{&node};
if (rules(m_last_dequeued)) if (rules(m_last_dequeued))
{ {
// client calls erase(iter) // client calls erase(iter)
......
...@@ -103,7 +103,7 @@ void mailman_loop() ...@@ -103,7 +103,7 @@ void mailman_loop()
std::map<process_information, native_socket_type> peers; std::map<process_information, native_socket_type> peers;
for (;;) for (;;)
{ {
job.reset(mqueue.pop()); job = mqueue.pop();
if (job->is_send_job()) if (job->is_send_job())
{ {
mailman_send_job& sjob = job->send_job(); mailman_send_job& sjob = job->send_job();
......
...@@ -54,18 +54,12 @@ using std::endl; ...@@ -54,18 +54,12 @@ using std::endl;
namespace { namespace {
void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self, void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
cppa::scheduled_actor* behavior) std::function<void()> what)
{ {
cppa::self.set(m_self.get()); cppa::self.set(m_self.get());
if (behavior) try { what(); }
{ catch (...) { }
try { behavior->act(); } cppa::self.set(nullptr);
catch (...) { }
try { behavior->on_exit(); }
catch (...) { }
delete behavior;
cppa::self.set(nullptr);
}
cppa::detail::dec_actor_count(); cppa::detail::dec_actor_count();
} }
...@@ -73,30 +67,30 @@ void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self, ...@@ -73,30 +67,30 @@ void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
namespace cppa { namespace detail { namespace cppa { namespace detail {
actor_ptr mock_scheduler::spawn(scheduled_actor* behavior) actor_ptr mock_scheduler::spawn(std::function<void()> what)
{ {
inc_actor_count(); inc_actor_count();
CPPA_MEMORY_BARRIER(); CPPA_MEMORY_BARRIER();
intrusive_ptr<local_actor> ctx(new detail::converted_thread_context); intrusive_ptr<local_actor> ctx(new detail::converted_thread_context);
thread(run_actor, ctx, behavior).detach(); thread(run_actor, ctx, std::move(what)).detach();
return ctx; return ctx;
} }
actor_ptr mock_scheduler::spawn(abstract_event_based_actor* what) actor_ptr mock_scheduler::spawn(scheduled_actor*)
{ {
// TODO: don't delete what :) cerr << "mock_scheduler::spawn(scheduled_actor*)" << endl;
delete what; abort();
return nullptr; return nullptr;
} }
actor_ptr mock_scheduler::spawn(scheduled_actor* behavior, scheduling_hint) actor_ptr mock_scheduler::spawn(std::function<void()> what, scheduling_hint)
{ {
return spawn(behavior); return spawn(std::move(what));
} }
void mock_scheduler::enqueue(detail::abstract_scheduled_actor*) void mock_scheduler::enqueue(scheduled_actor*)
{ {
cerr << "mock_scheduler::enqueue" << endl; cerr << "mock_scheduler::enqueue(scheduled_actor)" << endl;
abort(); abort();
} }
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
namespace cppa { namespace cppa {
scheduled_actor::~scheduled_actor() scheduled_actor::scheduled_actor() : next(nullptr), m_scheduler(nullptr)
{ {
} }
...@@ -40,4 +40,17 @@ void scheduled_actor::on_exit() ...@@ -40,4 +40,17 @@ void scheduled_actor::on_exit()
{ {
} }
void scheduled_actor::init()
{
}
scheduled_actor* scheduled_actor::attach_to_scheduler(scheduler* sched)
{
CPPA_REQUIRE(sched != nullptr);
m_scheduler = sched;
init();
return this;
}
} // namespace cppa } // namespace cppa
...@@ -28,202 +28,11 @@ ...@@ -28,202 +28,11 @@
\******************************************************************************/ \******************************************************************************/
#include "cppa/cppa.hpp" #include "cppa/detail/scheduled_actor_dummy.hpp"
#include "cppa/config.hpp"
#include "cppa/to_string.hpp"
#include "cppa/exception.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/detail/types_array.hpp"
#include "cppa/detail/yield_interface.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
namespace { void scheduled_actor_dummy::resume(util::fiber*, scheduler::callback*)
void dummy_enqueue(void*, abstract_scheduled_actor*) { }
types_array<atom_value, std::uint32_t> t_atom_ui32_types;
}
abstract_scheduled_actor::abstract_scheduled_actor(scheduler* sched)
: next(nullptr)
, m_state(ready)
, m_scheduler(sched)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0)
{
CPPA_REQUIRE(sched != nullptr);
}
abstract_scheduled_actor::abstract_scheduled_actor(int state)
: next(nullptr)
, m_state(state)
, m_scheduler(nullptr)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0)
{
}
abstract_scheduled_actor::resume_callback::~resume_callback()
{
}
void abstract_scheduled_actor::quit(std::uint32_t reason)
{
cleanup(reason);
throw actor_exited(reason);
}
void abstract_scheduled_actor::enqueue_node(queue_node* node)
{
if (m_mailbox._push_back(node))
{
for (;;)
{
int state = m_state.load();
switch (state)
{
case blocked:
{
if (m_state.compare_exchange_weak(state, ready))
{
CPPA_REQUIRE(m_scheduler != nullptr);
m_scheduler->enqueue(this);
return;
}
break;
}
case about_to_block:
{
if (m_state.compare_exchange_weak(state, ready))
{
return;
}
break;
}
default: return;
}
}
}
}
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple&& msg)
{
enqueue_node(fetch_node(sender, std::move(msg)));
//enqueue_node(new queue_node(sender, std::move(msg)));
}
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple const& msg)
{
enqueue_node(fetch_node(sender, msg));
//enqueue_node(new queue_node(sender, msg));
}
int abstract_scheduled_actor::compare_exchange_state(int expected,
int new_value)
{
int e = expected;
do
{
if (m_state.compare_exchange_weak(e, new_value))
{
return new_value;
}
}
while (e == expected);
return e;
}
void abstract_scheduled_actor::request_timeout(util::duration const& d)
{
if (d.valid())
{
future_send(this, d, atom(":Timeout"), ++m_active_timeout_id);
m_has_pending_timeout_request = true;
}
}
auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result
{
if ( msg.size() == 2
&& msg.type_at(0) == t_atom_ui32_types[0]
&& msg.type_at(1) == t_atom_ui32_types[1])
{
auto v0 = *reinterpret_cast<const atom_value*>(msg.at(0));
auto v1 = *reinterpret_cast<const std::uint32_t*>(msg.at(1));
if (v0 == atom(":Exit"))
{
if (m_trap_exit == false)
{
if (v1 != exit_reason::normal)
{
quit(v1);
}
return normal_exit_signal;
}
}
else if (v0 == atom(":Timeout"))
{
return (v1 == m_active_timeout_id) ? timeout_message
: expired_timeout_message;
}
}
return ordinary_message;
}
auto abstract_scheduled_actor::dq(queue_node& node,
partial_function& fun) -> dq_result
{
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg))
{
case normal_exit_signal:
case expired_timeout_message:
{
// skip message
return dq_indeterminate;
}
case timeout_message:
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
return dq_timeout_occured;
}
default: break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard
{
// make sure nested receives do not process this node again
queue_node_guard qguard{&node};
// try to invoke given function
if (fun(m_last_dequeued))
{
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return dq_indeterminate;
}
// dummy
void scheduled_actor_dummy::resume(util::fiber*, resume_callback*)
{ {
} }
......
...@@ -99,7 +99,8 @@ struct scheduler_helper ...@@ -99,7 +99,8 @@ struct scheduler_helper
void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{ {
typedef abstract_actor<local_actor>::queue_node_ptr queue_node_ptr; typedef abstract_actor<local_actor> impl_type;
typedef impl_type::mailbox_type::cache_value_type queue_node_ptr;
// setup & local variables // setup & local variables
self.set(m_self.get()); self.set(m_self.get());
auto& queue = m_self->mailbox(); auto& queue = m_self->mailbox();
...@@ -141,7 +142,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -141,7 +142,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{ {
if (messages.empty()) if (messages.empty())
{ {
msg_ptr.reset(queue.pop()); msg_ptr = queue.pop();
} }
else else
{ {
...@@ -150,8 +151,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -150,8 +151,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
auto it = messages.begin(); auto it = messages.begin();
while (it != messages.end() && (it->first) <= now) while (it != messages.end() && (it->first) <= now)
{ {
abstract_actor<local_actor>::queue_node_ptr ptr(std::move(it->second)); queue_node_ptr ptr{std::move(it->second)};
//auto ptr = it->second;
auto whom = const_cast<actor_ptr*>( auto whom = const_cast<actor_ptr*>(
reinterpret_cast<actor_ptr const*>( reinterpret_cast<actor_ptr const*>(
ptr->msg.at(1))); ptr->msg.at(1)));
...@@ -163,17 +163,16 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -163,17 +163,16 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
} }
messages.erase(it); messages.erase(it);
it = messages.begin(); it = messages.begin();
//delete ptr;
} }
// wait for next message or next timeout // wait for next message or next timeout
if (it != messages.end()) if (it != messages.end())
{ {
msg_ptr.reset(queue.try_pop(it->first)); msg_ptr.reset();
queue.try_pop(msg_ptr, it->first);
} }
} }
} }
handle_msg(msg_ptr->msg); handle_msg(msg_ptr->msg);
//delete msg_ptr;
} }
} }
...@@ -243,4 +242,6 @@ scheduler* get_scheduler() ...@@ -243,4 +242,6 @@ scheduler* get_scheduler()
return result; return result;
} }
scheduler::callback::~callback() { }
} // namespace cppa } // namespace cppa
...@@ -56,7 +56,7 @@ typedef intrusive::single_reader_queue<thread_pool_scheduler::worker> worker_que ...@@ -56,7 +56,7 @@ typedef intrusive::single_reader_queue<thread_pool_scheduler::worker> worker_que
struct thread_pool_scheduler::worker struct thread_pool_scheduler::worker
{ {
typedef abstract_scheduled_actor* job_ptr; typedef scheduled_actor* job_ptr;
job_queue* m_job_queue; job_queue* m_job_queue;
job_ptr m_dummy; job_ptr m_dummy;
...@@ -134,11 +134,10 @@ struct thread_pool_scheduler::worker ...@@ -134,11 +134,10 @@ struct thread_pool_scheduler::worker
void operator()() void operator()()
{ {
util::fiber fself; util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback struct handler : scheduler::callback
{ {
abstract_scheduled_actor* job; scheduled_actor* job;
handler() : job(nullptr) { } handler() : job(nullptr) { }
bool still_ready() { return true; }
void exec_done() void exec_done()
{ {
if (!job->deref()) delete job; if (!job->deref()) delete job;
...@@ -179,7 +178,7 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w) ...@@ -179,7 +178,7 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w)
} }
void thread_pool_scheduler::supervisor_loop(job_queue* jqueue, void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
abstract_scheduled_actor* dummy) scheduled_actor* dummy)
{ {
std::vector<worker_ptr> workers; std::vector<worker_ptr> workers;
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 8); size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 8);
...@@ -210,46 +209,48 @@ void thread_pool_scheduler::stop() ...@@ -210,46 +209,48 @@ void thread_pool_scheduler::stop()
super::stop(); super::stop();
} }
void thread_pool_scheduler::enqueue(abstract_scheduled_actor* what) void thread_pool_scheduler::enqueue(scheduled_actor* what)
{ {
m_queue.push_back(what); m_queue.push_back(what);
} }
actor_ptr thread_pool_scheduler::spawn_impl(abstract_scheduled_actor* what, actor_ptr thread_pool_scheduler::spawn_impl(scheduled_actor* what,
bool push_to_queue) bool push_to_queue)
{ {
inc_actor_count(); inc_actor_count();
CPPA_MEMORY_BARRIER(); CPPA_MEMORY_BARRIER();
intrusive_ptr<abstract_scheduled_actor> ctx(what); intrusive_ptr<scheduled_actor> ctx(what);
ctx->ref(); ctx->ref();
if (push_to_queue) m_queue.push_back(ctx.get()); if (push_to_queue) m_queue.push_back(ctx.get());
return std::move(ctx); return std::move(ctx);
} }
actor_ptr thread_pool_scheduler::spawn(abstract_event_based_actor* what) actor_ptr thread_pool_scheduler::spawn(scheduled_actor* what)
{ {
// do NOT push event-based actors to the queue on startup // do NOT push event-based actors to the queue on startup
return spawn_impl(what->attach_to_scheduler(this), false); return spawn_impl(what->attach_to_scheduler(this), false);
} }
#ifndef CPPA_DISABLE_CONTEXT_SWITCHING #ifndef CPPA_DISABLE_CONTEXT_SWITCHING
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, actor_ptr thread_pool_scheduler::spawn(std::function<void()> what,
scheduling_hint hint) scheduling_hint hint)
{ {
if (hint == detached) if (hint == detached)
{ {
return mock_scheduler::spawn(bhvr); return mock_scheduler::spawn(std::move(what));
} }
else else
{ {
return spawn_impl(new yielding_actor(bhvr, this)); auto new_actor = new yielding_actor(std::move(what));
return spawn_impl(new_actor->attach_to_scheduler(this));
} }
} }
#else #else
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, scheduling_hint) actor_ptr thread_pool_scheduler::spawn(std::function<void()> what,
scheduling_hint)
{ {
return mock_scheduler::spawn(bhvr); return mock_scheduler::spawn(what);
} }
#endif #endif
......
...@@ -39,40 +39,30 @@ ...@@ -39,40 +39,30 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
yielding_actor::yielding_actor(scheduled_actor* behavior, scheduler* sched) yielding_actor::yielding_actor(std::function<void()> fun)
: super(sched) : m_fiber(&yielding_actor::run, this)
, m_fiber(&yielding_actor::run, this) , m_behavior(fun)
, m_behavior(behavior)
{ {
} }
yielding_actor::~yielding_actor()
{
delete m_behavior;
}
void yielding_actor::run(void* ptr_arg) void yielding_actor::run(void* ptr_arg)
{ {
auto this_ptr = reinterpret_cast<yielding_actor*>(ptr_arg); auto this_ptr = reinterpret_cast<yielding_actor*>(ptr_arg);
auto behavior_ptr = this_ptr->m_behavior; CPPA_REQUIRE(static_cast<bool>(this_ptr->m_behavior));
if (behavior_ptr) bool cleanup_called = false;
try { this_ptr->m_behavior(); }
catch (actor_exited&)
{ {
bool cleanup_called = false; // cleanup already called by scheduled_actor::quit
try { behavior_ptr->act(); } cleanup_called = true;
catch (actor_exited&) }
{ catch (...)
// cleanup already called by scheduled_actor::quit {
cleanup_called = true; this_ptr->cleanup(exit_reason::unhandled_exception);
} cleanup_called = true;
catch (...)
{
this_ptr->cleanup(exit_reason::unhandled_exception);
cleanup_called = true;
}
if (!cleanup_called) this_ptr->cleanup(exit_reason::normal);
try { behavior_ptr->on_exit(); }
catch (...) { }
} }
if (!cleanup_called) this_ptr->cleanup(exit_reason::normal);
this_ptr->on_exit();
yield(yield_state::done); yield(yield_state::done);
} }
...@@ -98,7 +88,10 @@ void yielding_actor::yield_until_not_empty() ...@@ -98,7 +88,10 @@ void yielding_actor::yield_until_not_empty()
void yielding_actor::dequeue(partial_function& fun) void yielding_actor::dequeue(partial_function& fun)
{ {
auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, fun) == dq_done; }; auto rm_fun = [&](mailbox_cache_element& node)
{
return dq(*node, fun) == dq_done;
};
dequeue_impl(rm_fun); dequeue_impl(rm_fun);
} }
...@@ -107,7 +100,7 @@ void yielding_actor::dequeue(behavior& bhvr) ...@@ -107,7 +100,7 @@ void yielding_actor::dequeue(behavior& bhvr)
if (bhvr.timeout().valid()) if (bhvr.timeout().valid())
{ {
request_timeout(bhvr.timeout()); request_timeout(bhvr.timeout());
auto rm_fun = [&](queue_node_ptr& node) -> bool auto rm_fun = [&](mailbox_cache_element& node) -> bool
{ {
switch (dq(*node, bhvr.get_partial_function())) switch (dq(*node, bhvr.get_partial_function()))
{ {
...@@ -129,7 +122,7 @@ void yielding_actor::dequeue(behavior& bhvr) ...@@ -129,7 +122,7 @@ void yielding_actor::dequeue(behavior& bhvr)
} }
} }
void yielding_actor::resume(util::fiber* from, resume_callback* callback) void yielding_actor::resume(util::fiber* from, scheduler::callback* callback)
{ {
self.set(this); self.set(this);
for (;;) for (;;)
...@@ -176,6 +169,57 @@ void yielding_actor::resume(util::fiber* from, resume_callback* callback) ...@@ -176,6 +169,57 @@ void yielding_actor::resume(util::fiber* from, resume_callback* callback)
} }
} }
auto yielding_actor::dq(mailbox_element& node,
partial_function& fun) -> dq_result
{
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg))
{
case normal_exit_signal:
case expired_timeout_message:
{
// skip message
return dq_indeterminate;
}
case timeout_message:
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
return dq_timeout_occured;
}
default: break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard
{
// make sure nested receives do not process this node again
mailbox_element::guard qguard{&node};
// try to invoke given function
if (fun(m_last_dequeued))
{
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return dq_indeterminate;
}
} } // namespace cppa::detail } } // namespace cppa::detail
#else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING #else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
......
...@@ -195,7 +195,7 @@ struct chopstick : public fsm_actor<chopstick> ...@@ -195,7 +195,7 @@ struct chopstick : public fsm_actor<chopstick>
}; };
class testee_actor : public scheduled_actor class testee_actor
{ {
void wait4string() void wait4string()
...@@ -235,7 +235,7 @@ class testee_actor : public scheduled_actor ...@@ -235,7 +235,7 @@ class testee_actor : public scheduled_actor
public: public:
void act() void operator()()
{ {
receive_loop receive_loop
( (
...@@ -295,11 +295,10 @@ void testee3(actor_ptr parent) ...@@ -295,11 +295,10 @@ void testee3(actor_ptr parent)
} }
template<class Testee> template<class Testee>
std::string behavior_test() std::string behavior_test(actor_ptr et)
{ {
std::string result; std::string result;
std::string testee_name = detail::to_uniform_name(typeid(Testee)); std::string testee_name = detail::to_uniform_name(typeid(Testee));
auto et = spawn(new Testee);
send(et, 1); send(et, 1);
send(et, 2); send(et, 2);
send(et, 3); send(et, 3);
...@@ -368,8 +367,8 @@ size_t test__spawn() ...@@ -368,8 +367,8 @@ size_t test__spawn()
await_all_others_done(); await_all_others_done();
CPPA_IF_VERBOSE(cout << "ok" << endl); CPPA_IF_VERBOSE(cout << "ok" << endl);
CPPA_CHECK_EQUAL(behavior_test<testee_actor>(), "wait4int"); CPPA_CHECK_EQUAL(behavior_test<testee_actor>(spawn(testee_actor{})), "wait4int");
CPPA_CHECK_EQUAL(behavior_test<event_testee>(), "wait4int"); CPPA_CHECK_EQUAL(behavior_test<event_testee>(spawn(new event_testee)), "wait4int");
// create 20,000 actors linked to one single actor // create 20,000 actors linked to one single actor
// and kill them all through killing the link // and kill them all through killing the link
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment