Commit 08de1c38 authored by neverlord's avatar neverlord

refactoring of scheduled_actor and single_reader_queue

parent e50faf50
......@@ -4,7 +4,6 @@ lib_LTLIBRARIES = libcppa.la
libcppa_la_SOURCES = \
src/abstract_event_based_actor.cpp \
src/abstract_scheduled_actor.cpp \
src/abstract_tuple.cpp \
src/actor.cpp \
src/actor_count.cpp \
......@@ -46,6 +45,7 @@ libcppa_la_SOURCES = \
src/receive.cpp \
src/ripemd_160.cpp \
src/scheduled_actor.cpp \
src/scheduled_actor_dummy.cpp \
src/scheduler.cpp \
src/self.cpp \
src/serializer.cpp \
......@@ -123,7 +123,9 @@ nobase_library_include_HEADERS = \
cppa/detail/pseudo_tuple.hpp \
cppa/detail/ptype_to_type.hpp \
cppa/detail/receive_loop_helper.hpp \
cppa/detail/recursive_queue_node.hpp \
cppa/detail/ref_counted_impl.hpp \
cppa/detail/scheduled_actor_dummy.hpp \
cppa/detail/serialize_tuple.hpp \
cppa/detail/singleton_manager.hpp \
cppa/detail/swap_bytes.hpp \
......
......@@ -149,7 +149,7 @@ src/fiber.cpp
cppa/detail/yield_interface.hpp
src/yield_interface.cpp
cppa/detail/abstract_scheduled_actor.hpp
src/abstract_scheduled_actor.cpp
src/scheduled_actor_dummy.cpp
src/invokable.cpp
cppa/detail/thread_pool_scheduler.hpp
src/thread_pool_scheduler.cpp
......@@ -265,3 +265,5 @@ cppa/detail/value_guard.hpp
cppa/detail/tuple_iterator.hpp
cppa/match_expr.hpp
cppa/detail/pseudo_tuple.hpp
cppa/detail/recursive_queue_node.hpp
cppa/detail/scheduled_actor_dummy.hpp
......@@ -45,6 +45,9 @@
#include "cppa/attachable.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa {
......@@ -53,7 +56,8 @@ namespace cppa {
* @tparam Base Either {@link cppa::actor actor}
* or {@link cppa::local_actor local_actor}.
*/
template<class Base>
template<class Base,
class MailboxType = intrusive::single_reader_queue<detail::recursive_queue_node> >
class abstract_actor : public Base
{
......@@ -62,33 +66,12 @@ class abstract_actor : public Base
public:
struct queue_node
{
queue_node* next; // intrusive next pointer
bool marked; // denotes if this node is currently processed
actor_ptr sender;
any_tuple msg;
queue_node() : next(nullptr), marked(false) { }
queue_node(actor* from, any_tuple content)
: next(nullptr), marked(false), sender(from), msg(std::move(content))
{
}
};
struct queue_node_guard
{
queue_node* m_node;
queue_node_guard(queue_node* ptr) : m_node(ptr) { ptr->marked = true; }
inline void release() { m_node = nullptr; }
~queue_node_guard() { if (m_node) m_node->marked = false; }
};
typedef intrusive::single_reader_queue<queue_node> mailbox_type;
typedef std::unique_ptr<queue_node> queue_node_ptr;
typedef typename mailbox_type::cache_type mailbox_cache_type;
typedef typename mailbox_cache_type::iterator queue_node_iterator;
typedef MailboxType mailbox_type;
typedef typename mailbox_type::value_type mailbox_element;
typedef typename mailbox_type::cache_type mailbox_cache_type;
typedef typename mailbox_cache_type::value_type mailbox_cache_element;
bool attach(attachable* ptr) /*override*/
bool attach(attachable* ptr) // override
{
if (ptr == nullptr)
{
......@@ -114,7 +97,7 @@ class abstract_actor : public Base
}
}
void detach(attachable::token const& what) /*override*/
void detach(attachable::token const& what) // override
{
attachable_ptr uptr;
// lifetime scope of guard
......@@ -133,17 +116,17 @@ class abstract_actor : public Base
// uptr will be destroyed here, without locked mutex
}
void link_to(intrusive_ptr<actor>& other) /*override*/
void link_to(intrusive_ptr<actor>& other) // override
{
(void) link_to_impl(other);
}
void unlink_from(intrusive_ptr<actor>& other) /*override*/
void unlink_from(intrusive_ptr<actor>& other) // override
{
(void) unlink_from_impl(other);
}
bool remove_backlink(intrusive_ptr<actor>& other) /*override*/
bool remove_backlink(intrusive_ptr<actor>& other) // override
{
if (other && other != this)
{
......@@ -158,7 +141,7 @@ class abstract_actor : public Base
return false;
}
bool establish_backlink(intrusive_ptr<actor>& other) /*override*/
bool establish_backlink(intrusive_ptr<actor>& other) // override
{
std::uint32_t reason = exit_reason::not_exited;
if (other && other != this)
......@@ -188,9 +171,9 @@ class abstract_actor : public Base
mailbox_type m_mailbox;
template<typename T>
inline queue_node* fetch_node(actor* sender, T&& msg)
static inline mailbox_element* fetch_node(actor* sender, T&& msg)
{
return new queue_node(sender, std::forward<T>(msg));
return new mailbox_element(sender, std::forward<T>(msg));
}
template<typename... Args>
......
......@@ -44,14 +44,49 @@
namespace cppa {
struct vec_append
{
inline std::vector<detail::recursive_queue_node>::iterator
operator()(std::vector<detail::recursive_queue_node>& result,
detail::recursive_queue_node* e) const
{
std::vector<std::unique_ptr<detail::recursive_queue_node> > tmp;
while (e)
{
auto next = e->next;
tmp.emplace_back(e);
e = next;
}
auto old_size = result.size();
for (auto i = tmp.rbegin(); i != tmp.rend(); ++i)
{
result.emplace_back(std::move(*(*i)));
}
return result.begin() + old_size;
}
};
/**
* @brief Base class for all event-based actor implementations.
*/
class abstract_event_based_actor : public detail::abstract_scheduled_actor
class abstract_event_based_actor
: public detail::abstract_scheduled_actor<
intrusive::single_reader_queue<
detail::recursive_queue_node,
std::vector<detail::recursive_queue_node>,
vec_append
>
>
{
typedef detail::abstract_scheduled_actor super;
typedef super::queue_node queue_node;
typedef detail::abstract_scheduled_actor<
intrusive::single_reader_queue<
detail::recursive_queue_node,
std::vector<detail::recursive_queue_node>,
vec_append
>
>
super;
public:
......@@ -59,7 +94,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
void dequeue(partial_function&); //override
void resume(util::fiber*, resume_callback* callback); //override
void resume(util::fiber*, scheduler::callback* cb); //override
/**
* @brief Initializes the actor by defining an initial behavior.
......@@ -71,14 +106,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
*/
virtual void on_exit();
inline abstract_event_based_actor* attach_to_scheduler(scheduler* sched)
{
CPPA_REQUIRE(sched != nullptr);
m_scheduler = sched;
init();
return this;
}
protected:
abstract_event_based_actor();
......@@ -89,9 +116,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
std::vector<stack_element> m_loop_stack;
// current position in mailbox
mailbox_cache_type::iterator m_mailbox_pos;
// provoke compiler errors for usage of receive() and related functions
/**
......@@ -135,8 +159,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
private:
bool handle_message(queue_node& iter);
bool invoke_from_cache();
bool handle_message(mailbox_element& iter);
};
......
......@@ -33,6 +33,7 @@
#include <tuple>
#include <cstdint>
#include <functional>
#include <type_traits>
#include "cppa/on.hpp"
......@@ -474,7 +475,7 @@ void demonitor(actor_ptr& whom);
*/
inline actor_ptr spawn(scheduled_actor* what)
{
return get_scheduler()->spawn(what, scheduled);
return get_scheduler()->spawn(what);
}
/**
......@@ -484,9 +485,9 @@ inline actor_ptr spawn(scheduled_actor* what)
* @returns A pointer to the spawned {@link actor Actor}.
*/
template<scheduling_hint Hint>
inline actor_ptr spawn(scheduled_actor* what)
inline actor_ptr spawn(std::function<void()> what)
{
return get_scheduler()->spawn(what, Hint);
return get_scheduler()->spawn(std::move(what), Hint);
}
/**
......@@ -494,19 +495,43 @@ inline actor_ptr spawn(scheduled_actor* what)
* @brief Spans a new event-based actor.
* @returns A pointer to the spawned {@link actor Actor}.
*/
inline actor_ptr spawn(abstract_event_based_actor* what)
inline actor_ptr spawn(std::function<void()> what)
{
return get_scheduler()->spawn(what);
return get_scheduler()->spawn(std::move(what), scheduled);
}
/**
template<typename T>
struct spawn_fwd_
{
static inline T&& _(T&& arg) { return std::move(arg); }
static inline T& _(T& arg) { return arg; }
static inline T const& _(T const& arg) { return arg; }
};
template<>
struct spawn_fwd_<self_type>
{
static inline actor_ptr _(self_type const&) { return self; }
};
template<typename F, typename Arg0, typename... Args>
inline actor_ptr spawn(F&& what, Arg0&& arg0, Args&&... args)
{
return spawn(std::bind(std::move(what),
spawn_fwd_<typename util::rm_ref<Arg0>::type>::_(arg0),
spawn_fwd_<typename util::rm_ref<Args>::type>::_(args)...));
}
/*
/ **
* @ingroup ActorManagement
* @brief Spawns a new actor that executes @p what with given arguments.
* @tparam Hint Hint to the scheduler for the best scheduling strategy.
* @param what Function or functor that the spawned Actor should execute.
* @param args Arguments needed to invoke @p what.
* @returns A pointer to the spawned {@link actor actor}.
*/
* /
template<scheduling_hint Hint, typename F, typename... Args>
auto //actor_ptr
spawn(F&& what, Args const&... args)
......@@ -522,10 +547,10 @@ spawn(F&& what, Args const&... args)
return get_scheduler()->spawn(ptr, Hint);
}
/**
/ **
* @ingroup ActorManagement
* @brief Alias for <tt>spawn<scheduled>(what, args...)</tt>.
*/
* /
template<typename F, typename... Args>
auto // actor_ptr
spawn(F&& what, Args const&... args)
......@@ -537,6 +562,7 @@ spawn(F&& what, Args const&... args)
{
return spawn<scheduled>(std::forward<F>(what), args...);
}
*/
#ifdef CPPA_DOCUMENTATION
......
......@@ -31,46 +31,31 @@
#ifndef SCHEDULED_ACTOR_HPP
#define SCHEDULED_ACTOR_HPP
#include <atomic>
#include "cppa/any_tuple.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/local_actor.hpp"
#include "cppa/abstract_actor.hpp"
#include "cppa/scheduled_actor.hpp"
#include "cppa/util/fiber.hpp"
#include "cppa/intrusive/singly_linked_list.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { class scheduler; }
namespace cppa { namespace detail {
// A spawned, scheduled Actor.
class abstract_scheduled_actor : public abstract_actor<local_actor>
template<class MailboxType = intrusive::single_reader_queue<detail::recursive_queue_node> >
class abstract_scheduled_actor : public abstract_actor<scheduled_actor, MailboxType>
{
friend class intrusive::single_reader_queue<abstract_scheduled_actor>;
abstract_scheduled_actor* next; // intrusive next pointer
void enqueue_node(queue_node* node);
typedef abstract_actor<scheduled_actor, MailboxType> super;
protected:
std::atomic<int> m_state;
scheduler* m_scheduler;
typedef abstract_actor super;
typedef super::queue_node_guard queue_node_guard;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
enum dq_result
{
dq_done,
dq_indeterminate,
dq_timeout_occured
};
enum filter_result
{
......@@ -80,16 +65,48 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
ordinary_message
};
filter_result filter_msg(any_tuple const& msg);
auto dq(queue_node& node, partial_function& rules) -> dq_result;
filter_result filter_msg(any_tuple const& msg)
{
auto& arr = detail::static_types_array<atom_value, std::uint32_t>::arr;
if ( msg.size() == 2
&& msg.type_at(0) == arr[0]
&& msg.type_at(1) == arr[1])
{
auto v0 = *reinterpret_cast<const atom_value*>(msg.at(0));
auto v1 = *reinterpret_cast<const std::uint32_t*>(msg.at(1));
if (v0 == atom(":Exit"))
{
if (this->m_trap_exit == false)
{
if (v1 != exit_reason::normal)
{
quit(v1);
}
return normal_exit_signal;
}
}
else if (v0 == atom(":Timeout"))
{
return (v1 == m_active_timeout_id) ? timeout_message
: expired_timeout_message;
}
}
return ordinary_message;
}
bool has_pending_timeout()
{
return m_has_pending_timeout_request;
}
void request_timeout(util::duration const& d);
void request_timeout(util::duration const& d)
{
if (d.valid())
{
get_scheduler()->future_send(this, d, atom(":Timeout"), ++m_active_timeout_id);
m_has_pending_timeout_request = true;
}
}
void reset_timeout()
{
......@@ -100,8 +117,6 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
}
}
private:
bool m_has_pending_timeout_request;
std::uint32_t m_active_timeout_id;
......@@ -112,42 +127,78 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
static constexpr int blocked = 0x02;
static constexpr int about_to_block = 0x04;
abstract_scheduled_actor(int state = done);
abstract_scheduled_actor(scheduler* sched);
void quit(std::uint32_t reason);
abstract_scheduled_actor(int state = done)
: m_state(state)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0)
{
}
void enqueue(actor* sender, any_tuple&& msg);
void quit(std::uint32_t reason)
{
this->cleanup(reason);
throw actor_exited(reason);
}
void enqueue(actor* sender, any_tuple const& msg);
void enqueue(actor* sender, any_tuple&& msg)
{
enqueue_node(super::fetch_node(sender, std::move(msg)));
}
int compare_exchange_state(int expected, int new_value);
void enqueue(actor* sender, any_tuple const& msg)
{
enqueue_node(super::fetch_node(sender, msg));
}
struct resume_callback
int compare_exchange_state(int expected, int new_value)
{
virtual ~resume_callback();
// called if an actor finished execution
virtual void exec_done() = 0;
};
int e = expected;
do
{
if (m_state.compare_exchange_weak(e, new_value))
{
return new_value;
}
}
while (e == expected);
return e;
}
// from = calling worker
virtual void resume(util::fiber* from, resume_callback* callback) = 0;
private:
};
void enqueue_node(typename super::mailbox_element* node)
{
if (this->m_mailbox._push_back(node))
{
for (;;)
{
int state = m_state.load();
switch (state)
{
case blocked:
{
if (m_state.compare_exchange_weak(state, ready))
{
CPPA_REQUIRE(this->m_scheduler != nullptr);
this->m_scheduler->enqueue(this);
return;
}
break;
}
case about_to_block:
{
if (m_state.compare_exchange_weak(state, ready))
{
return;
}
break;
}
default: return;
}
}
}
}
struct scheduled_actor_dummy : abstract_scheduled_actor
{
void resume(util::fiber*, resume_callback*);
void quit(std::uint32_t);
void dequeue(behavior&);
void dequeue(partial_function&);
void link_to(intrusive_ptr<actor>&);
void unlink_from(intrusive_ptr<actor>&);
bool establish_backlink(intrusive_ptr<actor>&);
bool remove_backlink(intrusive_ptr<actor>&);
void detach(attachable::token const&);
bool attach(attachable*);
};
} } // namespace cppa::detail
......
......@@ -59,8 +59,6 @@ class converted_thread_context : public abstract_actor<local_actor>
{
typedef abstract_actor<local_actor> super;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
public:
......@@ -86,7 +84,7 @@ class converted_thread_context : public abstract_actor<local_actor>
private:
typedef intrusive::singly_linked_list<queue_node> queue_node_buffer;
//typedef intrusive::singly_linked_list<queue_node> queue_node_buffer;
enum throw_on_exit_result
{
......@@ -95,7 +93,7 @@ class converted_thread_context : public abstract_actor<local_actor>
};
// returns true if node->msg was accepted by rules
bool dq(queue_node& node, partial_function& rules);
bool dq(mailbox_element& node, partial_function& rules);
throw_on_exit_result throw_on_exit(any_tuple const& msg);
......
......@@ -67,9 +67,6 @@ struct mailman_add_peer
class mailman_job
{
friend class intrusive::singly_linked_list<mailman_job>;
friend class intrusive::single_reader_queue<mailman_job>;
public:
enum job_type
......@@ -123,9 +120,10 @@ class mailman_job
return m_type == kill_type;
}
mailman_job* next;
private:
mailman_job* next;
job_type m_type;
// unrestricted union
union
......
......@@ -40,13 +40,13 @@ class mock_scheduler : public scheduler
public:
actor_ptr spawn(abstract_event_based_actor* what);
actor_ptr spawn(scheduled_actor* what);
actor_ptr spawn(scheduled_actor*, scheduling_hint);
actor_ptr spawn(std::function<void()> what, scheduling_hint);
static actor_ptr spawn(scheduled_actor*);
static actor_ptr spawn(std::function<void()> what);
void enqueue(detail::abstract_scheduled_actor*);
void enqueue(scheduled_actor* what);
};
......
......@@ -43,9 +43,6 @@ namespace cppa { namespace detail {
class post_office_msg
{
friend class intrusive::singly_linked_list<post_office_msg>;
friend class intrusive::single_reader_queue<post_office_msg>;
public:
enum msg_type
......@@ -130,10 +127,10 @@ class post_office_msg
~post_office_msg();
private:
post_office_msg* next;
private:
msg_type m_type;
union
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef RECURSIVE_QUEUE_NODE_HPP
#define RECURSIVE_QUEUE_NODE_HPP
#include "cppa/actor.hpp"
#include "cppa/any_tuple.hpp"
namespace cppa { namespace detail {
struct recursive_queue_node
{
recursive_queue_node* next; // intrusive next pointer
bool marked; // denotes if this node is currently processed
actor_ptr sender;
any_tuple msg;
inline recursive_queue_node()
: next(nullptr)
, marked(false)
{
}
inline recursive_queue_node(actor* from, any_tuple content)
: next(nullptr)
, marked(false)
, sender(from)
, msg(std::move(content))
{
}
inline recursive_queue_node(recursive_queue_node&& other)
: next(nullptr)
, marked(false)
, sender(std::move(other.sender))
, msg(std::move(other.msg))
{
}
struct guard
{
recursive_queue_node* m_node;
inline guard(recursive_queue_node* ptr) : m_node(ptr)
{
ptr->marked = true;
}
inline void release()
{
m_node = nullptr;
}
inline ~guard()
{
if (m_node) m_node->marked = false;
}
};
};
} } // namespace cppa::detail
#endif // RECURSIVE_QUEUE_NODE_HPP
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef SCHEDULED_ACTOR_DUMMY_HPP
#define SCHEDULED_ACTOR_DUMMY_HPP
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail {
struct scheduled_actor_dummy : abstract_scheduled_actor<>
{
void resume(util::fiber*, scheduler::callback*);
void quit(std::uint32_t);
void dequeue(behavior&);
void dequeue(partial_function&);
void link_to(intrusive_ptr<actor>&);
void unlink_from(intrusive_ptr<actor>&);
bool establish_backlink(intrusive_ptr<actor>&);
bool remove_backlink(intrusive_ptr<actor>&);
void detach(attachable::token const&);
bool attach(attachable*);
};
} } // namespace cppa::detail
#endif // SCHEDULED_ACTOR_DUMMY_HPP
......@@ -34,6 +34,7 @@
#include "cppa/scheduler.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/util/producer_consumer_list.hpp"
#include "cppa/detail/scheduled_actor_dummy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail {
......@@ -51,26 +52,26 @@ class thread_pool_scheduler : public scheduler
void stop() /*override*/;
void enqueue(abstract_scheduled_actor* what) /*override*/;
void enqueue(scheduled_actor* what) /*override*/;
actor_ptr spawn(abstract_event_based_actor* what);
actor_ptr spawn(scheduled_actor* what);
actor_ptr spawn(scheduled_actor* behavior, scheduling_hint hint);
actor_ptr spawn(std::function<void()> what, scheduling_hint hint);
private:
//typedef util::single_reader_queue<abstract_scheduled_actor> job_queue;
typedef util::producer_consumer_list<abstract_scheduled_actor> job_queue;
typedef util::producer_consumer_list<scheduled_actor> job_queue;
job_queue m_queue;
scheduled_actor_dummy m_dummy;
thread m_supervisor;
actor_ptr spawn_impl(abstract_scheduled_actor* what,
actor_ptr spawn_impl(scheduled_actor* what,
bool push_to_queue = true);
static void worker_loop(worker*);
static void supervisor_loop(job_queue*, abstract_scheduled_actor*);
static void supervisor_loop(job_queue*, scheduled_actor*);
};
......
......@@ -45,15 +45,13 @@
namespace cppa { namespace detail {
class yielding_actor : public abstract_scheduled_actor
class yielding_actor : public abstract_scheduled_actor<>
{
typedef abstract_scheduled_actor super;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
util::fiber m_fiber;
scheduled_actor* m_behavior;
std::function<void()> m_behavior;
static void run(void* _this);
......@@ -63,15 +61,13 @@ class yielding_actor : public abstract_scheduled_actor
public:
yielding_actor(scheduled_actor* behavior, scheduler* sched);
~yielding_actor(); //override
yielding_actor(std::function<void()> fun);
void dequeue(behavior& bhvr); //override
void dequeue(partial_function& fun); //override
void resume(util::fiber* from, resume_callback* callback); //override
void resume(util::fiber* from, scheduler::callback* callback); //override
private:
......@@ -89,6 +85,15 @@ class yielding_actor : public abstract_scheduled_actor
mbox_cache.erase(iter);
}
enum dq_result
{
dq_done,
dq_indeterminate,
dq_timeout_occured
};
auto dq(mailbox_element& node, partial_function& rules) -> dq_result;
};
} } // namespace cppa::detail
......
......@@ -35,16 +35,46 @@
#include <atomic>
#include <memory>
#include "cppa/config.hpp"
#include "cppa/detail/thread.hpp"
namespace cppa { namespace intrusive {
template<typename List>
struct default_list_append
{
template<typename T>
typename List::iterator operator()(List& l, T* e)
{
CPPA_REQUIRE(e != nullptr);
// temporary list to convert LIFO to FIFO order
List tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
{
// next iteration element
T* next = e->next;
// insert e to private cache (convert to LIFO order)
tmp.emplace_front(e);
e = next;
}
CPPA_REQUIRE(tmp.empty() == false);
auto result = tmp.begin();
l.splice(l.end(), tmp);
return result;
}
};
/**
* @brief An intrusive, thread safe queue implementation.
* @note For implementation details see
* http://libcppa.blogspot.com/2011/04/mailbox-part-1.html
*/
template<typename T>
template<typename T,
class CacheType = std::list<std::unique_ptr<T> >,
class CacheAppend = default_list_append<std::list<std::unique_ptr<T> > > >
class single_reader_queue
{
......@@ -52,42 +82,39 @@ class single_reader_queue
public:
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
typedef T value_type;
typedef value_type* pointer;
typedef std::unique_ptr<value_type> unique_value_ptr;
typedef std::list<unique_value_ptr> cache_type;
typedef typename cache_type::iterator cache_iterator;
typedef CacheType cache_type;
typedef typename cache_type::value_type cache_value_type;
typedef typename cache_type::iterator cache_iterator;
/**
* @warning call only from the reader (owner)
*/
pointer pop()
cache_value_type pop()
{
wait_for_data();
return take_head();
cache_value_type result;
take_head(result);
return result;
}
/**
* @warning call only from the reader (owner)
*/
pointer try_pop()
bool try_pop(cache_value_type& result)
{
return take_head();
return take_head(result);
}
/**
* @warning call only from the reader (owner)
*/
template<typename TimePoint>
pointer try_pop(TimePoint const& abs_time)
bool try_pop(cache_value_type& result, TimePoint const& abs_time)
{
return (timed_wait_for_data(abs_time)) ? take_head() : nullptr;
return (timed_wait_for_data(abs_time)) ? take_head(result) : false;
}
// returns true if the queue was empty
......@@ -192,6 +219,7 @@ class single_reader_queue
// accessed only by the owner
cache_type m_cache;
CacheAppend m_append;
// locked on enqueue/dequeue operations to/from an empty list
detail::mutex m_mtx;
......@@ -231,22 +259,8 @@ class single_reader_queue
{
if (m_stack.compare_exchange_weak(e, 0))
{
// temporary list to convert LIFO to FIFO order
cache_type tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
{
// next iteration element
pointer next = e->next;
// insert e to private cache (convert to LIFO order)
tmp.push_front(unique_value_ptr{e});
//m_cache.insert(iter, unique_value_ptr{e});
// next iteration
e = next;
}
if (iter) *iter = tmp.begin();
m_cache.splice(m_cache.end(), tmp);
auto i = m_append(m_cache, e);
if (iter) *iter = i;
return true;
}
// next iteration
......@@ -255,16 +269,15 @@ class single_reader_queue
return false;
}
pointer take_head()
bool take_head(cache_value_type& result)
{
if (!m_cache.empty() || fetch_new_data())
{
auto result = m_cache.front().release();
result = std::move(m_cache.front());
m_cache.pop_front();
return result;
//return m_cache.take_after(m_cache.before_begin());
return true;
}
return nullptr;
return false;
}
};
......
......@@ -31,8 +31,14 @@
#ifndef ACTOR_BEHAVIOR_HPP
#define ACTOR_BEHAVIOR_HPP
#include "cppa/config.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/local_actor.hpp"
namespace cppa {
namespace util { class fiber; }
/**
* @brief A base class for context-switching or thread-mapped actor
* implementations.
......@@ -43,12 +49,14 @@ namespace cppa {
* blocking functions, or need to have your own thread for other reasons,
* this class can be used to define a class-based actor.
*/
class scheduled_actor
class scheduled_actor : public local_actor
{
public:
virtual ~scheduled_actor();
scheduled_actor();
scheduled_actor* next; // intrusive next pointer
/**
* @brief Can be overridden to perform cleanup code after an actor
......@@ -59,10 +67,19 @@ class scheduled_actor
virtual void on_exit();
/**
* @brief Implements the behavior of a context-switching or thread-mapped
* actor.
* @brief Can be overridden to initialize and actor before any
* message is handled.
*/
virtual void act() = 0;
virtual void init();
// called from worker thread
virtual void resume(util::fiber* from, scheduler::callback* cb) = 0;
scheduled_actor* attach_to_scheduler(scheduler* sched);
protected:
scheduler* m_scheduler;
};
......
......@@ -34,6 +34,7 @@
#include <chrono>
#include <memory>
#include <cstdint>
#include <functional>
#include "cppa/self.hpp"
#include "cppa/atom.hpp"
......@@ -50,10 +51,6 @@ namespace cppa {
class scheduled_actor;
class scheduler_helper;
class abstract_event_based_actor;
namespace detail { class abstract_scheduled_actor; }
/**
* @brief
*/
......@@ -70,6 +67,13 @@ class scheduler
public:
struct callback
{
virtual ~callback();
// called if an actor finished execution during resume()
virtual void exec_done() = 0;
};
virtual ~scheduler();
/**
......@@ -82,19 +86,19 @@ class scheduler
*/
virtual void stop();
virtual void enqueue(detail::abstract_scheduled_actor*) = 0;
virtual void enqueue(scheduled_actor*) = 0;
/**
* @brief Spawns a new actor that executes <code>behavior->act()</code>
* with the scheduling policy @p hint if possible.
*/
virtual actor_ptr spawn(scheduled_actor* behavior,
virtual actor_ptr spawn(std::function<void()> behavior,
scheduling_hint hint) = 0;
/**
* @brief Spawns a new event-based actor.
*/
virtual actor_ptr spawn(abstract_event_based_actor* what) = 0;
virtual actor_ptr spawn(scheduled_actor* what) = 0;
/**
* @brief Informs the scheduler about a converted context
......
......@@ -38,8 +38,7 @@
namespace cppa {
abstract_event_based_actor::abstract_event_based_actor()
: super(abstract_event_based_actor::blocked)
, m_mailbox_pos(m_mailbox.cache().end())
: super(super::blocked)
{
//m_mailbox_pos = m_mailbox.cache().end();
}
......@@ -54,58 +53,59 @@ void abstract_event_based_actor::dequeue(partial_function&)
quit(exit_reason::unallowed_function_call);
}
bool abstract_event_based_actor::handle_message(queue_node& node)
bool abstract_event_based_actor::handle_message(mailbox_element& node)
{
CPPA_REQUIRE(m_loop_stack.empty() == false);
if (node.marked) return false;
auto& bhvr = *(m_loop_stack.back());
if (bhvr.timeout().valid())
switch (filter_msg(node.msg))
{
switch (dq(node, bhvr.get_partial_function()))
{
case dq_timeout_occured:
{
bhvr.handle_timeout();
// fall through
}
case dq_done:
case normal_exit_signal:
case expired_timeout_message:
node.marked = true;
return false;
case timeout_message:
m_has_pending_timeout_request = false;
CPPA_REQUIRE(bhvr.timeout().valid());
bhvr.handle_timeout();
if (!m_loop_stack.empty())
{
// callback might have called become()/unbecome()
// request next timeout if needed
if (!m_loop_stack.empty())
{
auto& next_bhvr = *(m_loop_stack.back());
request_timeout(next_bhvr.timeout());
}
return true;
auto& next_bhvr = *(m_loop_stack.back());
request_timeout(next_bhvr.timeout());
}
default: return false;
}
}
else
{
return dq(node, bhvr.get_partial_function()) == dq_done;
}
}
return true;
bool abstract_event_based_actor::invoke_from_cache()
{
for (auto i = m_mailbox_pos; i != m_mailbox.cache().end(); ++i)
default:
break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
if ((bhvr.get_partial_function())(m_last_dequeued))
{
auto& ptr = *i;
CPPA_REQUIRE(ptr.get() != nullptr);
if (handle_message(*ptr))
{
m_mailbox.cache().erase(i);
return true;
}
node.marked = true;
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return true;
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return false;
}
void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb)
{
self.set(this);
auto& mbox_cache = m_mailbox.cache();
auto pos = mbox_cache.end();
try
{
for (;;)
......@@ -116,14 +116,18 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear();
on_exit();
callback->exec_done();
cb->exec_done();
return;
}
while (m_mailbox_pos == mbox_cache.end())
while (pos == mbox_cache.end())
{
// try fetch more
if (m_mailbox.can_fetch_more() == false)
{
// sweep marked elements
auto new_end = std::remove_if(mbox_cache.begin(), mbox_cache.end(),
[](detail::recursive_queue_node const& n) { return n.marked; });
mbox_cache.resize(std::distance(mbox_cache.begin(), new_end));
m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER();
if (m_mailbox.can_fetch_more() == false)
......@@ -133,22 +137,27 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{
case abstract_scheduled_actor::ready:
{
// someone preempt us
// someone preempt us, set position to new end()
pos = mbox_cache.end();
break;
}
case abstract_scheduled_actor::blocked:
{
// done
return;
}
default: exit(7); // illegal state
};
}
}
m_mailbox_pos = m_mailbox.try_fetch_more();
pos = m_mailbox.try_fetch_more();
}
pos = std::find_if(pos, mbox_cache.end(),
[&](mailbox_element& e) { return handle_message(e); });
if (pos != mbox_cache.end())
{
// handled a message, scan mailbox from start again
pos = mbox_cache.begin();
}
m_mailbox_pos = (invoke_from_cache()) ? mbox_cache.begin()
: mbox_cache.end();
}
}
catch (actor_exited& what)
......@@ -162,7 +171,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear();
on_exit();
callback->exec_done();
cb->exec_done();
}
void abstract_event_based_actor::on_exit()
......
......@@ -70,7 +70,7 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
void converted_thread_context::dequeue(partial_function& rules) /*override*/
{
auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, rules); };
auto rm_fun = [&](mailbox_cache_element& node) { return dq(*node, rules); };
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
......@@ -87,7 +87,7 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/
{
auto timeout = now();
timeout += rules.timeout();
auto rm_fun = [&](queue_node_ptr& node)
auto rm_fun = [&](mailbox_cache_element& node)
{
return dq(*node, rules.get_partial_function());
};
......@@ -131,7 +131,7 @@ converted_thread_context::throw_on_exit(any_tuple const& msg)
return not_an_exit_signal;
}
bool converted_thread_context::dq(queue_node& node, partial_function& rules)
bool converted_thread_context::dq(mailbox_element& node, partial_function& rules)
{
if ( m_trap_exit == false
&& throw_on_exit(node.msg) == normal_exit_signal)
......@@ -141,7 +141,7 @@ bool converted_thread_context::dq(queue_node& node, partial_function& rules)
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
{
queue_node_guard qguard{&node};
mailbox_element::guard qguard{&node};
if (rules(m_last_dequeued))
{
// client calls erase(iter)
......
......@@ -103,7 +103,7 @@ void mailman_loop()
std::map<process_information, native_socket_type> peers;
for (;;)
{
job.reset(mqueue.pop());
job = mqueue.pop();
if (job->is_send_job())
{
mailman_send_job& sjob = job->send_job();
......
......@@ -54,18 +54,12 @@ using std::endl;
namespace {
void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
cppa::scheduled_actor* behavior)
std::function<void()> what)
{
cppa::self.set(m_self.get());
if (behavior)
{
try { behavior->act(); }
catch (...) { }
try { behavior->on_exit(); }
catch (...) { }
delete behavior;
cppa::self.set(nullptr);
}
try { what(); }
catch (...) { }
cppa::self.set(nullptr);
cppa::detail::dec_actor_count();
}
......@@ -73,30 +67,30 @@ void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
namespace cppa { namespace detail {
actor_ptr mock_scheduler::spawn(scheduled_actor* behavior)
actor_ptr mock_scheduler::spawn(std::function<void()> what)
{
inc_actor_count();
CPPA_MEMORY_BARRIER();
intrusive_ptr<local_actor> ctx(new detail::converted_thread_context);
thread(run_actor, ctx, behavior).detach();
thread(run_actor, ctx, std::move(what)).detach();
return ctx;
}
actor_ptr mock_scheduler::spawn(abstract_event_based_actor* what)
actor_ptr mock_scheduler::spawn(scheduled_actor*)
{
// TODO: don't delete what :)
delete what;
cerr << "mock_scheduler::spawn(scheduled_actor*)" << endl;
abort();
return nullptr;
}
actor_ptr mock_scheduler::spawn(scheduled_actor* behavior, scheduling_hint)
actor_ptr mock_scheduler::spawn(std::function<void()> what, scheduling_hint)
{
return spawn(behavior);
return spawn(std::move(what));
}
void mock_scheduler::enqueue(detail::abstract_scheduled_actor*)
void mock_scheduler::enqueue(scheduled_actor*)
{
cerr << "mock_scheduler::enqueue" << endl;
cerr << "mock_scheduler::enqueue(scheduled_actor)" << endl;
abort();
}
......
......@@ -32,7 +32,7 @@
namespace cppa {
scheduled_actor::~scheduled_actor()
scheduled_actor::scheduled_actor() : next(nullptr), m_scheduler(nullptr)
{
}
......@@ -40,4 +40,17 @@ void scheduled_actor::on_exit()
{
}
void scheduled_actor::init()
{
}
scheduled_actor* scheduled_actor::attach_to_scheduler(scheduler* sched)
{
CPPA_REQUIRE(sched != nullptr);
m_scheduler = sched;
init();
return this;
}
} // namespace cppa
......@@ -28,202 +28,11 @@
\******************************************************************************/
#include "cppa/cppa.hpp"
#include "cppa/config.hpp"
#include "cppa/to_string.hpp"
#include "cppa/exception.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/detail/types_array.hpp"
#include "cppa/detail/yield_interface.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
#include "cppa/detail/scheduled_actor_dummy.hpp"
namespace cppa { namespace detail {
namespace {
void dummy_enqueue(void*, abstract_scheduled_actor*) { }
types_array<atom_value, std::uint32_t> t_atom_ui32_types;
}
abstract_scheduled_actor::abstract_scheduled_actor(scheduler* sched)
: next(nullptr)
, m_state(ready)
, m_scheduler(sched)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0)
{
CPPA_REQUIRE(sched != nullptr);
}
abstract_scheduled_actor::abstract_scheduled_actor(int state)
: next(nullptr)
, m_state(state)
, m_scheduler(nullptr)
, m_has_pending_timeout_request(false)
, m_active_timeout_id(0)
{
}
abstract_scheduled_actor::resume_callback::~resume_callback()
{
}
void abstract_scheduled_actor::quit(std::uint32_t reason)
{
cleanup(reason);
throw actor_exited(reason);
}
void abstract_scheduled_actor::enqueue_node(queue_node* node)
{
if (m_mailbox._push_back(node))
{
for (;;)
{
int state = m_state.load();
switch (state)
{
case blocked:
{
if (m_state.compare_exchange_weak(state, ready))
{
CPPA_REQUIRE(m_scheduler != nullptr);
m_scheduler->enqueue(this);
return;
}
break;
}
case about_to_block:
{
if (m_state.compare_exchange_weak(state, ready))
{
return;
}
break;
}
default: return;
}
}
}
}
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple&& msg)
{
enqueue_node(fetch_node(sender, std::move(msg)));
//enqueue_node(new queue_node(sender, std::move(msg)));
}
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple const& msg)
{
enqueue_node(fetch_node(sender, msg));
//enqueue_node(new queue_node(sender, msg));
}
int abstract_scheduled_actor::compare_exchange_state(int expected,
int new_value)
{
int e = expected;
do
{
if (m_state.compare_exchange_weak(e, new_value))
{
return new_value;
}
}
while (e == expected);
return e;
}
void abstract_scheduled_actor::request_timeout(util::duration const& d)
{
if (d.valid())
{
future_send(this, d, atom(":Timeout"), ++m_active_timeout_id);
m_has_pending_timeout_request = true;
}
}
auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result
{
if ( msg.size() == 2
&& msg.type_at(0) == t_atom_ui32_types[0]
&& msg.type_at(1) == t_atom_ui32_types[1])
{
auto v0 = *reinterpret_cast<const atom_value*>(msg.at(0));
auto v1 = *reinterpret_cast<const std::uint32_t*>(msg.at(1));
if (v0 == atom(":Exit"))
{
if (m_trap_exit == false)
{
if (v1 != exit_reason::normal)
{
quit(v1);
}
return normal_exit_signal;
}
}
else if (v0 == atom(":Timeout"))
{
return (v1 == m_active_timeout_id) ? timeout_message
: expired_timeout_message;
}
}
return ordinary_message;
}
auto abstract_scheduled_actor::dq(queue_node& node,
partial_function& fun) -> dq_result
{
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg))
{
case normal_exit_signal:
case expired_timeout_message:
{
// skip message
return dq_indeterminate;
}
case timeout_message:
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
return dq_timeout_occured;
}
default: break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard
{
// make sure nested receives do not process this node again
queue_node_guard qguard{&node};
// try to invoke given function
if (fun(m_last_dequeued))
{
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return dq_indeterminate;
}
// dummy
void scheduled_actor_dummy::resume(util::fiber*, resume_callback*)
void scheduled_actor_dummy::resume(util::fiber*, scheduler::callback*)
{
}
......
......@@ -99,7 +99,8 @@ struct scheduler_helper
void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{
typedef abstract_actor<local_actor>::queue_node_ptr queue_node_ptr;
typedef abstract_actor<local_actor> impl_type;
typedef impl_type::mailbox_type::cache_value_type queue_node_ptr;
// setup & local variables
self.set(m_self.get());
auto& queue = m_self->mailbox();
......@@ -141,7 +142,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{
if (messages.empty())
{
msg_ptr.reset(queue.pop());
msg_ptr = queue.pop();
}
else
{
......@@ -150,8 +151,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
auto it = messages.begin();
while (it != messages.end() && (it->first) <= now)
{
abstract_actor<local_actor>::queue_node_ptr ptr(std::move(it->second));
//auto ptr = it->second;
queue_node_ptr ptr{std::move(it->second)};
auto whom = const_cast<actor_ptr*>(
reinterpret_cast<actor_ptr const*>(
ptr->msg.at(1)));
......@@ -163,17 +163,16 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
}
messages.erase(it);
it = messages.begin();
//delete ptr;
}
// wait for next message or next timeout
if (it != messages.end())
{
msg_ptr.reset(queue.try_pop(it->first));
msg_ptr.reset();
queue.try_pop(msg_ptr, it->first);
}
}
}
handle_msg(msg_ptr->msg);
//delete msg_ptr;
}
}
......@@ -243,4 +242,6 @@ scheduler* get_scheduler()
return result;
}
scheduler::callback::~callback() { }
} // namespace cppa
......@@ -56,7 +56,7 @@ typedef intrusive::single_reader_queue<thread_pool_scheduler::worker> worker_que
struct thread_pool_scheduler::worker
{
typedef abstract_scheduled_actor* job_ptr;
typedef scheduled_actor* job_ptr;
job_queue* m_job_queue;
job_ptr m_dummy;
......@@ -134,11 +134,10 @@ struct thread_pool_scheduler::worker
void operator()()
{
util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback
struct handler : scheduler::callback
{
abstract_scheduled_actor* job;
scheduled_actor* job;
handler() : job(nullptr) { }
bool still_ready() { return true; }
void exec_done()
{
if (!job->deref()) delete job;
......@@ -179,7 +178,7 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w)
}
void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
abstract_scheduled_actor* dummy)
scheduled_actor* dummy)
{
std::vector<worker_ptr> workers;
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 8);
......@@ -210,46 +209,48 @@ void thread_pool_scheduler::stop()
super::stop();
}
void thread_pool_scheduler::enqueue(abstract_scheduled_actor* what)
void thread_pool_scheduler::enqueue(scheduled_actor* what)
{
m_queue.push_back(what);
}
actor_ptr thread_pool_scheduler::spawn_impl(abstract_scheduled_actor* what,
actor_ptr thread_pool_scheduler::spawn_impl(scheduled_actor* what,
bool push_to_queue)
{
inc_actor_count();
CPPA_MEMORY_BARRIER();
intrusive_ptr<abstract_scheduled_actor> ctx(what);
intrusive_ptr<scheduled_actor> ctx(what);
ctx->ref();
if (push_to_queue) m_queue.push_back(ctx.get());
return std::move(ctx);
}
actor_ptr thread_pool_scheduler::spawn(abstract_event_based_actor* what)
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* what)
{
// do NOT push event-based actors to the queue on startup
return spawn_impl(what->attach_to_scheduler(this), false);
}
#ifndef CPPA_DISABLE_CONTEXT_SWITCHING
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr,
actor_ptr thread_pool_scheduler::spawn(std::function<void()> what,
scheduling_hint hint)
{
if (hint == detached)
{
return mock_scheduler::spawn(bhvr);
return mock_scheduler::spawn(std::move(what));
}
else
{
return spawn_impl(new yielding_actor(bhvr, this));
auto new_actor = new yielding_actor(std::move(what));
return spawn_impl(new_actor->attach_to_scheduler(this));
}
}
#else
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, scheduling_hint)
actor_ptr thread_pool_scheduler::spawn(std::function<void()> what,
scheduling_hint)
{
return mock_scheduler::spawn(bhvr);
return mock_scheduler::spawn(what);
}
#endif
......
......@@ -39,40 +39,30 @@
namespace cppa { namespace detail {
yielding_actor::yielding_actor(scheduled_actor* behavior, scheduler* sched)
: super(sched)
, m_fiber(&yielding_actor::run, this)
, m_behavior(behavior)
yielding_actor::yielding_actor(std::function<void()> fun)
: m_fiber(&yielding_actor::run, this)
, m_behavior(fun)
{
}
yielding_actor::~yielding_actor()
{
delete m_behavior;
}
void yielding_actor::run(void* ptr_arg)
{
auto this_ptr = reinterpret_cast<yielding_actor*>(ptr_arg);
auto behavior_ptr = this_ptr->m_behavior;
if (behavior_ptr)
CPPA_REQUIRE(static_cast<bool>(this_ptr->m_behavior));
bool cleanup_called = false;
try { this_ptr->m_behavior(); }
catch (actor_exited&)
{
bool cleanup_called = false;
try { behavior_ptr->act(); }
catch (actor_exited&)
{
// cleanup already called by scheduled_actor::quit
cleanup_called = true;
}
catch (...)
{
this_ptr->cleanup(exit_reason::unhandled_exception);
cleanup_called = true;
}
if (!cleanup_called) this_ptr->cleanup(exit_reason::normal);
try { behavior_ptr->on_exit(); }
catch (...) { }
// cleanup already called by scheduled_actor::quit
cleanup_called = true;
}
catch (...)
{
this_ptr->cleanup(exit_reason::unhandled_exception);
cleanup_called = true;
}
if (!cleanup_called) this_ptr->cleanup(exit_reason::normal);
this_ptr->on_exit();
yield(yield_state::done);
}
......@@ -98,7 +88,10 @@ void yielding_actor::yield_until_not_empty()
void yielding_actor::dequeue(partial_function& fun)
{
auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, fun) == dq_done; };
auto rm_fun = [&](mailbox_cache_element& node)
{
return dq(*node, fun) == dq_done;
};
dequeue_impl(rm_fun);
}
......@@ -107,7 +100,7 @@ void yielding_actor::dequeue(behavior& bhvr)
if (bhvr.timeout().valid())
{
request_timeout(bhvr.timeout());
auto rm_fun = [&](queue_node_ptr& node) -> bool
auto rm_fun = [&](mailbox_cache_element& node) -> bool
{
switch (dq(*node, bhvr.get_partial_function()))
{
......@@ -129,7 +122,7 @@ void yielding_actor::dequeue(behavior& bhvr)
}
}
void yielding_actor::resume(util::fiber* from, resume_callback* callback)
void yielding_actor::resume(util::fiber* from, scheduler::callback* callback)
{
self.set(this);
for (;;)
......@@ -176,6 +169,57 @@ void yielding_actor::resume(util::fiber* from, resume_callback* callback)
}
}
auto yielding_actor::dq(mailbox_element& node,
partial_function& fun) -> dq_result
{
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg))
{
case normal_exit_signal:
case expired_timeout_message:
{
// skip message
return dq_indeterminate;
}
case timeout_message:
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
return dq_timeout_occured;
}
default: break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard
{
// make sure nested receives do not process this node again
mailbox_element::guard qguard{&node};
// try to invoke given function
if (fun(m_last_dequeued))
{
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return dq_indeterminate;
}
} } // namespace cppa::detail
#else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
......
......@@ -195,7 +195,7 @@ struct chopstick : public fsm_actor<chopstick>
};
class testee_actor : public scheduled_actor
class testee_actor
{
void wait4string()
......@@ -235,7 +235,7 @@ class testee_actor : public scheduled_actor
public:
void act()
void operator()()
{
receive_loop
(
......@@ -295,11 +295,10 @@ void testee3(actor_ptr parent)
}
template<class Testee>
std::string behavior_test()
std::string behavior_test(actor_ptr et)
{
std::string result;
std::string testee_name = detail::to_uniform_name(typeid(Testee));
auto et = spawn(new Testee);
send(et, 1);
send(et, 2);
send(et, 3);
......@@ -368,8 +367,8 @@ size_t test__spawn()
await_all_others_done();
CPPA_IF_VERBOSE(cout << "ok" << endl);
CPPA_CHECK_EQUAL(behavior_test<testee_actor>(), "wait4int");
CPPA_CHECK_EQUAL(behavior_test<event_testee>(), "wait4int");
CPPA_CHECK_EQUAL(behavior_test<testee_actor>(spawn(testee_actor{})), "wait4int");
CPPA_CHECK_EQUAL(behavior_test<event_testee>(spawn(new event_testee)), "wait4int");
// create 20,000 actors linked to one single actor
// and kill them all through killing the link
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment