Commit 7d07ea2f authored by Dominik Charousset's avatar Dominik Charousset

implemented new work-stealing scheduler

parent fdef7f32
...@@ -171,6 +171,7 @@ set(LIBCPPA_SRC ...@@ -171,6 +171,7 @@ set(LIBCPPA_SRC
src/empty_tuple.cpp src/empty_tuple.cpp
src/event_based_actor.cpp src/event_based_actor.cpp
src/exception.cpp src/exception.cpp
src/execution_unit.cpp
src/exit_reason.cpp src/exit_reason.cpp
src/fd_util.cpp src/fd_util.cpp
src/functor_based_actor.cpp src/functor_based_actor.cpp
...@@ -208,7 +209,6 @@ set(LIBCPPA_SRC ...@@ -208,7 +209,6 @@ set(LIBCPPA_SRC
src/singleton_manager.cpp src/singleton_manager.cpp
src/string_serialization.cpp src/string_serialization.cpp
src/sync_request_bouncer.cpp src/sync_request_bouncer.cpp
src/thread_pool_scheduler.cpp
src/to_uniform_name.cpp src/to_uniform_name.cpp
src/type_lookup_table.cpp src/type_lookup_table.cpp
src/unicast_network.cpp src/unicast_network.cpp
......
...@@ -67,7 +67,6 @@ cppa/detail/singleton_mixin.hpp ...@@ -67,7 +67,6 @@ cppa/detail/singleton_mixin.hpp
cppa/detail/swap_bytes.hpp cppa/detail/swap_bytes.hpp
cppa/detail/sync_request_bouncer.hpp cppa/detail/sync_request_bouncer.hpp
cppa/detail/tdata.hpp cppa/detail/tdata.hpp
cppa/detail/thread_pool_scheduler.hpp
cppa/detail/to_uniform_name.hpp cppa/detail/to_uniform_name.hpp
cppa/detail/tuple_cast_impl.hpp cppa/detail/tuple_cast_impl.hpp
cppa/detail/tuple_iterator.hpp cppa/detail/tuple_iterator.hpp
...@@ -83,6 +82,7 @@ cppa/detail/yield_interface.hpp ...@@ -83,6 +82,7 @@ cppa/detail/yield_interface.hpp
cppa/enable_weak_ptr.hpp cppa/enable_weak_ptr.hpp
cppa/event_based_actor.hpp cppa/event_based_actor.hpp
cppa/exception.hpp cppa/exception.hpp
cppa/execution_unit.hpp
cppa/exit_reason.hpp cppa/exit_reason.hpp
cppa/extend.hpp cppa/extend.hpp
cppa/from_string.hpp cppa/from_string.hpp
...@@ -275,6 +275,7 @@ src/empty_tuple.cpp ...@@ -275,6 +275,7 @@ src/empty_tuple.cpp
src/event_based_actor.cpp src/event_based_actor.cpp
src/exception.cpp src/exception.cpp
src/execinfo_windows.cpp src/execinfo_windows.cpp
src/execution_unit.cpp
src/exit_reason.cpp src/exit_reason.cpp
src/factory.cpp src/factory.cpp
src/fd_util.cpp src/fd_util.cpp
...@@ -322,7 +323,6 @@ src/shared_spinlock.cpp ...@@ -322,7 +323,6 @@ src/shared_spinlock.cpp
src/singleton_manager.cpp src/singleton_manager.cpp
src/string_serialization.cpp src/string_serialization.cpp
src/sync_request_bouncer.cpp src/sync_request_bouncer.cpp
src/thread_pool_scheduler.cpp
src/to_uniform_name.cpp src/to_uniform_name.cpp
src/type_lookup_table.cpp src/type_lookup_table.cpp
src/unicast_network.cpp src/unicast_network.cpp
......
...@@ -55,6 +55,7 @@ namespace cppa { ...@@ -55,6 +55,7 @@ namespace cppa {
class actor_addr; class actor_addr;
class serializer; class serializer;
class deserializer; class deserializer;
class execution_unit;
/** /**
* @brief A unique actor ID. * @brief A unique actor ID.
...@@ -241,6 +242,9 @@ class abstract_actor : public abstract_channel { ...@@ -241,6 +242,9 @@ class abstract_actor : public abstract_channel {
// identifies the node this actor is running on // identifies the node this actor is running on
node_id_ptr m_node; node_id_ptr m_node;
// identifies the execution unit this actor is currently executed by
execution_unit* m_host;
}; };
/****************************************************************************** /******************************************************************************
......
...@@ -31,14 +31,11 @@ ...@@ -31,14 +31,11 @@
#ifndef CPPA_ABSTRACT_CHANNEL_HPP #ifndef CPPA_ABSTRACT_CHANNEL_HPP
#define CPPA_ABSTRACT_CHANNEL_HPP #define CPPA_ABSTRACT_CHANNEL_HPP
#include "cppa/cppa_fwd.hpp"
#include "cppa/ref_counted.hpp" #include "cppa/ref_counted.hpp"
namespace cppa { namespace cppa {
// forward declarations
class any_tuple;
class message_header;
/** /**
* @brief Interface for all message receivers. * @brief Interface for all message receivers.
* *
...@@ -50,9 +47,18 @@ class abstract_channel : public ref_counted { ...@@ -50,9 +47,18 @@ class abstract_channel : public ref_counted {
public: public:
/** /**
* @brief Enqueues @p msg to the list of received messages. * @brief Enqueues a new message to the channel.
* @param header Contains meta information about this message
* such as the address of the sender and the
* ID of the message if it is a synchronous message.
* @param content The content encapsulated in a copy-on-write tuple.
* @param host Pointer to the {@link execution_unit execution unit} the
* caller is executed by or @p nullptr if the caller
* is not a scheduled actor.
*/ */
virtual void enqueue(const message_header& hdr, any_tuple msg) = 0; virtual void enqueue(msg_hdr_cref header,
any_tuple content,
execution_unit* host) = 0;
protected: protected:
......
...@@ -76,7 +76,7 @@ class actor_companion : public extend<local_actor, actor_companion>:: ...@@ -76,7 +76,7 @@ class actor_companion : public extend<local_actor, actor_companion>::
*/ */
void on_enqueue(enqueue_handler handler); void on_enqueue(enqueue_handler handler);
void enqueue(const message_header& hdr, any_tuple msg) override; void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit* eu) override;
private: private:
......
...@@ -72,7 +72,7 @@ class actor_proxy : public extend<abstract_actor>::with<enable_weak_ptr> { ...@@ -72,7 +72,7 @@ class actor_proxy : public extend<abstract_actor>::with<enable_weak_ptr> {
* middleman's thread. * middleman's thread.
* @note This function is guaranteed to be called non-concurrently. * @note This function is guaranteed to be called non-concurrently.
*/ */
virtual void deliver(const message_header& hdr, any_tuple msg) = 0; virtual void deliver(msg_hdr_cref hdr, any_tuple msg) = 0;
protected: protected:
......
...@@ -438,7 +438,7 @@ namespace cppa { ...@@ -438,7 +438,7 @@ namespace cppa {
* @brief Sends @p to a message under the identity of @p from. * @brief Sends @p to a message under the identity of @p from.
*/ */
inline void send_tuple_as(const actor& from, const channel& to, any_tuple msg) { inline void send_tuple_as(const actor& from, const channel& to, any_tuple msg) {
if (to) to->enqueue({from.address(), to}, std::move(msg)); if (to) to->enqueue({from.address(), to}, std::move(msg), nullptr);
} }
/** /**
......
...@@ -41,10 +41,12 @@ class group; ...@@ -41,10 +41,12 @@ class group;
class channel; class channel;
class node_id; class node_id;
class behavior; class behavior;
class resumable;
class any_tuple; class any_tuple;
class actor_addr; class actor_addr;
class actor_proxy; class actor_proxy;
class scoped_actor; class scoped_actor;
class execution_unit;
class abstract_actor; class abstract_actor;
class abstract_group; class abstract_group;
class blocking_actor; class blocking_actor;
...@@ -74,6 +76,9 @@ typedef intrusive_ptr<node_id> node_id_ptr; ...@@ -74,6 +76,9 @@ typedef intrusive_ptr<node_id> node_id_ptr;
// weak intrusive pointer typedefs // weak intrusive pointer typedefs
typedef weak_intrusive_ptr<actor_proxy> weak_actor_proxy_ptr; typedef weak_intrusive_ptr<actor_proxy> weak_actor_proxy_ptr;
// convenience typedefs
typedef const message_header& msg_hdr_cref;
} // namespace cppa } // namespace cppa
#endif // CPPA_FWD_HPP #endif // CPPA_FWD_HPP
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
#include "cppa/util/duration.hpp" #include "cppa/util/duration.hpp"
#include "cppa/detail/resumable.hpp"
namespace cppa { namespace cppa {
namespace detail { namespace detail {
// 'imports' all member functions from policies to the actor // 'imports' all member functions from policies to the actor,
// the resume mixin also adds the m_hidden member which *must* be
// initialized to @p true
template<class Base, template<class Base,
class Derived, class Derived,
class Policies> class Policies>
...@@ -27,7 +27,9 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D ...@@ -27,7 +27,9 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D
public: public:
template <typename... Ts> template <typename... Ts>
proper_actor_base(Ts&&... args) : super(std::forward<Ts>(args)...) { } proper_actor_base(Ts&&... args) : super(std::forward<Ts>(args)...) {
CPPA_REQUIRE(this->m_hidden == true);
}
// grant access to the actor's mailbox // grant access to the actor's mailbox
typename Base::mailbox_type& mailbox() { typename Base::mailbox_type& mailbox() {
...@@ -38,11 +40,11 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D ...@@ -38,11 +40,11 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D
typedef typename Policies::scheduling_policy::timeout_type timeout_type; typedef typename Policies::scheduling_policy::timeout_type timeout_type;
void enqueue(const message_header& hdr, any_tuple msg) override { void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit* eu) override {
CPPA_PUSH_AID(dptr()->id()); CPPA_PUSH_AID(dptr()->id());
CPPA_LOG_DEBUG(CPPA_TARG(hdr, to_string) CPPA_LOG_DEBUG(CPPA_TARG(hdr, to_string)
<< ", " << CPPA_TARG(msg, to_string)); << ", " << CPPA_TARG(msg, to_string));
scheduling_policy().enqueue(dptr(), hdr, msg); scheduling_policy().enqueue(dptr(), hdr, msg, eu);
} }
// NOTE: scheduling_policy::launch is 'imported' in proper_actor // NOTE: scheduling_policy::launch is 'imported' in proper_actor
...@@ -117,6 +119,17 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D ...@@ -117,6 +119,17 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D
awaited_response); awaited_response);
} }
inline bool hidden() const {
return this->m_hidden;
}
void cleanup(std::uint32_t reason) override {
CPPA_LOG_TRACE(CPPA_ARG(reason));
if (!hidden()) get_actor_registry()->dec_running();
super::cleanup(reason);
}
protected: protected:
inline typename Policies::scheduling_policy& scheduling_policy() { inline typename Policies::scheduling_policy& scheduling_policy() {
...@@ -139,6 +152,13 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D ...@@ -139,6 +152,13 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D
return static_cast<Derived*>(this); return static_cast<Derived*>(this);
} }
inline void hidden(bool value) {
if (this->m_hidden == value) return;
if (value) get_actor_registry()->dec_running();
else get_actor_registry()->inc_running();
this->m_hidden = value;
}
private: private:
Policies m_policies; Policies m_policies;
...@@ -170,12 +190,13 @@ class proper_actor : public proper_actor_base<Base, ...@@ -170,12 +190,13 @@ class proper_actor : public proper_actor_base<Base,
inline void launch(bool is_hidden) { inline void launch(bool is_hidden) {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
this->hidden(is_hidden);
auto bhvr = this->make_behavior(); auto bhvr = this->make_behavior();
if (bhvr) this->become(std::move(bhvr)); if (bhvr) this->become(std::move(bhvr));
CPPA_LOG_WARNING_IF(this->bhvr_stack().empty(), CPPA_LOG_WARNING_IF(this->bhvr_stack().empty(),
"actor did not set a behavior"); "actor did not set a behavior");
if (!this->bhvr_stack().empty()) { if (!this->bhvr_stack().empty()) {
this->scheduling_policy().launch(this, is_hidden); this->scheduling_policy().launch(this);
} }
} }
...@@ -236,7 +257,8 @@ class proper_actor<Base, Policies,true> : public proper_actor_base<Base, ...@@ -236,7 +257,8 @@ class proper_actor<Base, Policies,true> : public proper_actor_base<Base,
} }
inline void launch(bool is_hidden) { inline void launch(bool is_hidden) {
this->scheduling_policy().launch(this, is_hidden); this->hidden(is_hidden);
this->scheduling_policy().launch(this);
} }
// implement blocking_actor::dequeue_response // implement blocking_actor::dequeue_response
...@@ -298,7 +320,8 @@ class proper_actor<Base, Policies,true> : public proper_actor_base<Base, ...@@ -298,7 +320,8 @@ class proper_actor<Base, Policies,true> : public proper_actor_base<Base,
auto msg = make_any_tuple(timeout_msg{tid}); auto msg = make_any_tuple(timeout_msg{tid});
if (d.is_zero()) { if (d.is_zero()) {
// immediately enqueue timeout message if duration == 0s // immediately enqueue timeout message if duration == 0s
this->enqueue({this->address(), this}, std::move(msg)); this->enqueue({this->address(), this},
std::move(msg), this->m_host);
//auto e = this->new_mailbox_element(this, std::move(msg)); //auto e = this->new_mailbox_element(this, std::move(msg));
//this->m_mailbox.enqueue(e); //this->m_mailbox.enqueue(e);
} }
......
...@@ -33,12 +33,9 @@ ...@@ -33,12 +33,9 @@
#include <atomic> #include <atomic>
namespace cppa { namespace cppa { class logging; }
class logging; namespace cppa { namespace scheduler { class coordinator; } }
class scheduler;
} // namespace cppa
namespace cppa { namespace io { class middleman; } } namespace cppa { namespace io { class middleman; } }
...@@ -62,9 +59,7 @@ class singleton_manager { ...@@ -62,9 +59,7 @@ class singleton_manager {
static logging* get_logger(); static logging* get_logger();
static scheduler* get_scheduler(); static scheduler::coordinator* get_scheduling_coordinator();
static bool set_scheduler(scheduler*);
static group_manager* get_group_manager(); static group_manager* get_group_manager();
......
...@@ -28,58 +28,31 @@ ...@@ -28,58 +28,31 @@
\******************************************************************************/ \******************************************************************************/
#ifndef CPPA_THREAD_POOL_SCHEDULER_HPP #ifndef CPPA_DETAIL_EXECUTION_UNIT_HPP
#define CPPA_THREAD_POOL_SCHEDULER_HPP #define CPPA_DETAIL_EXECUTION_UNIT_HPP
#include <thread> namespace cppa {
#include "cppa/scheduler.hpp" class resumable;
#include "cppa/util/producer_consumer_list.hpp" /*
* @brief Identifies an execution unit, e.g., a worker thread of the scheduler.
#include "cppa/detail/resumable.hpp" */
class execution_unit {
namespace cppa { namespace detail {
struct cs_thread;
class thread_pool_scheduler : public scheduler {
typedef scheduler super;
public: public:
struct dummy : resumable { virtual ~execution_unit();
resume_result resume(detail::cs_thread*) override;
};
struct worker;
thread_pool_scheduler();
thread_pool_scheduler(size_t num_worker_threads);
void initialize();
void destroy();
void enqueue(resumable* what) override;
private:
//typedef util::single_reader_queue<abstract_scheduled_actor> job_queue;
typedef util::producer_consumer_list<resumable> job_queue;
size_t m_num_threads;
job_queue m_queue;
dummy m_dummy;
std::thread m_supervisor;
static void worker_loop(worker*); /*
static void supervisor_loop(job_queue*, resumable*, size_t); * @brief Enqueues @p ptr to the job list of the execution unit.
* @warning Must only be called from a {@link resumable} currently
* executed by this execution unit.
*/
virtual void exec_later(resumable* ptr) = 0;
}; };
} } // namespace cppa::detail } // namespace cppa
#endif // CPPA_THREAD_POOL_SCHEDULER_HPP #endif // CPPA_DETAIL_EXECUTION_UNIT_HPP
...@@ -90,7 +90,7 @@ class broker : public extend<local_actor>:: ...@@ -90,7 +90,7 @@ class broker : public extend<local_actor>::
enum policy_flag { at_least, at_most, exactly }; enum policy_flag { at_least, at_most, exactly };
void enqueue(const message_header& hdr, any_tuple msg); void enqueue(msg_hdr_cref, any_tuple, execution_unit*) override;
bool initialized() const; bool initialized() const;
...@@ -172,7 +172,7 @@ class broker : public extend<local_actor>:: ...@@ -172,7 +172,7 @@ class broker : public extend<local_actor>::
input_stream_ptr in, input_stream_ptr in,
output_stream_ptr out); output_stream_ptr out);
void invoke_message(const message_header& hdr, any_tuple msg); void invoke_message(msg_hdr_cref hdr, any_tuple msg);
bool invoke_message_from_cache(); bool invoke_message_from_cache();
......
...@@ -143,7 +143,7 @@ class middleman { ...@@ -143,7 +143,7 @@ class middleman {
* @brief Delivers a message to given node. * @brief Delivers a message to given node.
*/ */
virtual void deliver(const node_id& node, virtual void deliver(const node_id& node,
const message_header& hdr, msg_hdr_cref hdr,
any_tuple msg ) = 0; any_tuple msg ) = 0;
/** /**
...@@ -207,6 +207,7 @@ inline actor_namespace& middleman::get_namespace() { ...@@ -207,6 +207,7 @@ inline actor_namespace& middleman::get_namespace() {
} }
const node_id_ptr& middleman::node() const { const node_id_ptr& middleman::node() const {
CPPA_REQUIRE(m_node != nullptr);
return m_node; return m_node;
} }
......
...@@ -73,7 +73,7 @@ class peer : public extend<continuable>::with<buffered_writing> { ...@@ -73,7 +73,7 @@ class peer : public extend<continuable>::with<buffered_writing> {
void io_failed(event_bitmask mask) override; void io_failed(event_bitmask mask) override;
void enqueue(const message_header& hdr, const any_tuple& msg); void enqueue(msg_hdr_cref hdr, const any_tuple& msg);
inline bool stop_on_last_proxy_exited() const { inline bool stop_on_last_proxy_exited() const {
return m_stop_on_last_proxy_exited; return m_stop_on_last_proxy_exited;
...@@ -132,13 +132,13 @@ class peer : public extend<continuable>::with<buffered_writing> { ...@@ -132,13 +132,13 @@ class peer : public extend<continuable>::with<buffered_writing> {
void unlink(const actor_addr& sender, const actor_addr& ptr); void unlink(const actor_addr& sender, const actor_addr& ptr);
void deliver(const message_header& hdr, any_tuple msg); void deliver(msg_hdr_cref hdr, any_tuple msg);
inline void enqueue(const any_tuple& msg) { inline void enqueue(const any_tuple& msg) {
enqueue({invalid_actor_addr, nullptr}, msg); enqueue({invalid_actor_addr, nullptr}, msg);
} }
void enqueue_impl(const message_header& hdr, const any_tuple& msg); void enqueue_impl(msg_hdr_cref hdr, const any_tuple& msg);
void add_type_if_needed(const std::string& tname); void add_type_if_needed(const std::string& tname);
......
...@@ -77,7 +77,7 @@ class remote_actor_proxy : public actor_proxy { ...@@ -77,7 +77,7 @@ class remote_actor_proxy : public actor_proxy {
node_id_ptr pinfo, node_id_ptr pinfo,
middleman* parent); middleman* parent);
void enqueue(const message_header& hdr, any_tuple msg) override; void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit*) override;
void link_to(const actor_addr& other) override; void link_to(const actor_addr& other) override;
...@@ -91,7 +91,7 @@ class remote_actor_proxy : public actor_proxy { ...@@ -91,7 +91,7 @@ class remote_actor_proxy : public actor_proxy {
void local_unlink_from(const actor_addr& other) override; void local_unlink_from(const actor_addr& other) override;
void deliver(const message_header& hdr, any_tuple msg) override; void deliver(msg_hdr_cref hdr, any_tuple msg) override;
protected: protected:
...@@ -99,7 +99,7 @@ class remote_actor_proxy : public actor_proxy { ...@@ -99,7 +99,7 @@ class remote_actor_proxy : public actor_proxy {
private: private:
void forward_msg(const message_header& hdr, any_tuple msg); void forward_msg(msg_hdr_cref hdr, any_tuple msg);
middleman* m_parent; middleman* m_parent;
intrusive::single_reader_queue<sync_request_info, detail::disposer> m_pending_requests; intrusive::single_reader_queue<sync_request_info, detail::disposer> m_pending_requests;
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include "cppa/message_header.hpp" #include "cppa/message_header.hpp"
#include "cppa/abstract_actor.hpp" #include "cppa/abstract_actor.hpp"
#include "cppa/abstract_group.hpp" #include "cppa/abstract_group.hpp"
#include "cppa/execution_unit.hpp"
#include "cppa/mailbox_element.hpp" #include "cppa/mailbox_element.hpp"
#include "cppa/response_promise.hpp" #include "cppa/response_promise.hpp"
#include "cppa/message_priority.hpp" #include "cppa/message_priority.hpp"
...@@ -67,8 +68,7 @@ ...@@ -67,8 +68,7 @@
namespace cppa { namespace cppa {
// forward declarations // forward declarations
class scheduler; class execution_unit;
class local_scheduler;
class sync_handle_helper; class sync_handle_helper;
/** /**
...@@ -557,7 +557,7 @@ class local_actor : public extend<abstract_actor>::with<memory_cached> { ...@@ -557,7 +557,7 @@ class local_actor : public extend<abstract_actor>::with<memory_cached> {
return m_state; return m_state;
} }
void cleanup(std::uint32_t reason); void cleanup(std::uint32_t reason) override;
mailbox_element* dummy_node() { mailbox_element* dummy_node() {
return &m_dummy_node; return &m_dummy_node;
......
...@@ -194,7 +194,7 @@ oss_wr operator<<(oss_wr&& lhs, T rhs) { ...@@ -194,7 +194,7 @@ oss_wr operator<<(oss_wr&& lhs, T rhs) {
::cppa::get_logger()->set_aid(aid_arg) ::cppa::get_logger()->set_aid(aid_arg)
#endif #endif
#define CPPA_CLASS_NAME ::cppa::detail::demangle(typeid(*this)).c_str() #define CPPA_CLASS_NAME cppa::detail::demangle(typeid(decltype(*this))).c_str()
#define CPPA_PRINT0(lvlname, classname, funname, msg) \ #define CPPA_PRINT0(lvlname, classname, funname, msg) \
CPPA_LOG_IMPL(lvlname, classname, funname, msg) CPPA_LOG_IMPL(lvlname, classname, funname, msg)
......
...@@ -65,7 +65,7 @@ class mailbox_element : public extend<memory_managed>::with<memory_cached> { ...@@ -65,7 +65,7 @@ class mailbox_element : public extend<memory_managed>::with<memory_cached> {
mailbox_element& operator=(const mailbox_element&) = delete; mailbox_element& operator=(const mailbox_element&) = delete;
template<typename T> template<typename T>
inline static mailbox_element* create(const message_header& hdr, T&& data) { inline static mailbox_element* create(msg_hdr_cref hdr, T&& data) {
return detail::memory::create<mailbox_element>(hdr, std::forward<T>(data)); return detail::memory::create<mailbox_element>(hdr, std::forward<T>(data));
} }
...@@ -73,7 +73,7 @@ class mailbox_element : public extend<memory_managed>::with<memory_cached> { ...@@ -73,7 +73,7 @@ class mailbox_element : public extend<memory_managed>::with<memory_cached> {
mailbox_element() = default; mailbox_element() = default;
mailbox_element(const message_header& hdr, any_tuple data); mailbox_element(msg_hdr_cref hdr, any_tuple data);
}; };
......
...@@ -69,9 +69,14 @@ class message_header { ...@@ -69,9 +69,14 @@ class message_header {
}; };
bool operator==(const message_header& lhs, const message_header& rhs); /**
* @brief Convenience typedef.
*/
typedef const message_header& msg_hdr_cref;
bool operator==(msg_hdr_cref lhs, msg_hdr_cref rhs);
bool operator!=(const message_header& lhs, const message_header& rhs); bool operator!=(msg_hdr_cref lhs, msg_hdr_cref rhs);
} // namespace cppa } // namespace cppa
......
...@@ -118,7 +118,7 @@ class actor_facade<Ret(Args...)> : public abstract_actor { ...@@ -118,7 +118,7 @@ class actor_facade<Ret(Args...)> : public abstract_actor {
}; };
} }
void enqueue(const message_header& hdr, any_tuple msg) override { void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit*) override {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
typename util::il_indices<util::type_list<Args...>>::type indices; typename util::il_indices<util::type_list<Args...>>::type indices;
enqueue_impl(hdr.sender, std::move(msg), hdr.id, indices); enqueue_impl(hdr.sender, std::move(msg), hdr.id, indices);
...@@ -130,10 +130,10 @@ class actor_facade<Ret(Args...)> : public abstract_actor { ...@@ -130,10 +130,10 @@ class actor_facade<Ret(Args...)> : public abstract_actor {
using args_vec = std::vector<mem_ptr>; using args_vec = std::vector<mem_ptr>;
actor_facade(const program& prog, kernel_ptr kernel, actor_facade(const program& prog, kernel_ptr kernel,
const dim_vec& global_dimensions, const dim_vec& global_dimensions,
const dim_vec& global_offsets, const dim_vec& global_offsets,
const dim_vec& local_dimensions, const dim_vec& local_dimensions,
arg_mapping map_args, result_mapping map_result, arg_mapping map_args, result_mapping map_result,
size_t result_size) size_t result_size)
: m_kernel(kernel) , m_program(prog.m_program) : m_kernel(kernel) , m_program(prog.m_program)
, m_context(prog.m_context) , m_queue(prog.m_queue) , m_context(prog.m_context) , m_queue(prog.m_queue)
...@@ -155,10 +155,10 @@ class actor_facade<Ret(Args...)> : public abstract_actor { ...@@ -155,10 +155,10 @@ class actor_facade<Ret(Args...)> : public abstract_actor {
response_promise handle{this->address(), sender, id.response_id()}; response_promise handle{this->address(), sender, id.response_id()};
evnt_vec events; evnt_vec events;
args_vec arguments; args_vec arguments;
add_arguments_to_kernel<Ret>(events, arguments, m_result_size, add_arguments_to_kernel<Ret>(events, arguments, m_result_size,
get_ref<Is>(*opt)...); get_ref<Is>(*opt)...);
auto cmd = make_counted<command<actor_facade, Ret>>( auto cmd = make_counted<command<actor_facade, Ret>>(
handle, this, handle, this,
std::move(events), std::move(arguments), std::move(events), std::move(arguments),
m_result_size, *opt m_result_size, *opt
); );
...@@ -194,7 +194,7 @@ class actor_facade<Ret(Args...)> : public abstract_actor { ...@@ -194,7 +194,7 @@ class actor_facade<Ret(Args...)> : public abstract_actor {
} }
template<typename T0, typename... Ts> template<typename T0, typename... Ts>
void add_arguments_to_kernel_rec(evnt_vec& events, args_vec& arguments, void add_arguments_to_kernel_rec(evnt_vec& events, args_vec& arguments,
T0& arg0, Ts&... args) { T0& arg0, Ts&... args) {
cl_int err{0}; cl_int err{0};
size_t buffer_size = sizeof(typename T0::value_type) * arg0.size(); size_t buffer_size = sizeof(typename T0::value_type) * arg0.size();
......
...@@ -31,11 +31,12 @@ ...@@ -31,11 +31,12 @@
#define CPPA_CONTEXT_SWITCHING_ACTOR_HPP #define CPPA_CONTEXT_SWITCHING_ACTOR_HPP
#include "cppa/config.hpp" #include "cppa/config.hpp"
#include "cppa/logging.hpp"
#include "cppa/resumable.hpp"
#include "cppa/actor_state.hpp" #include "cppa/actor_state.hpp"
#include "cppa/mailbox_element.hpp" #include "cppa/mailbox_element.hpp"
#include "cppa/detail/cs_thread.hpp" #include "cppa/detail/cs_thread.hpp"
#include "cppa/detail/resumable.hpp"
#include "cppa/policy/resume_policy.hpp" #include "cppa/policy/resume_policy.hpp"
...@@ -61,7 +62,7 @@ class context_switching_resume { ...@@ -61,7 +62,7 @@ class context_switching_resume {
// Base must be a mailbox-based actor // Base must be a mailbox-based actor
template<class Base, class Derived> template<class Base, class Derived>
struct mixin : Base, detail::resumable { struct mixin : Base, resumable {
template<typename... Ts> template<typename... Ts>
mixin(Ts&&... args) mixin(Ts&&... args)
...@@ -69,9 +70,11 @@ class context_switching_resume { ...@@ -69,9 +70,11 @@ class context_switching_resume {
, m_cs_thread(context_switching_resume::trampoline, , m_cs_thread(context_switching_resume::trampoline,
static_cast<blocking_actor*>(this)) { } static_cast<blocking_actor*>(this)) { }
detail::resumable::resume_result resume(detail::cs_thread* from) override { resumable::resume_result resume(detail::cs_thread* from,
execution_unit* host) override {
CPPA_REQUIRE(from != nullptr); CPPA_REQUIRE(from != nullptr);
CPPA_PUSH_AID(this->id()); CPPA_PUSH_AID(this->id());
this->m_host = host;
using namespace detail; using namespace detail;
for (;;) { for (;;) {
switch (call(&m_cs_thread, from)) { switch (call(&m_cs_thread, from)) {
...@@ -105,6 +108,7 @@ class context_switching_resume { ...@@ -105,6 +108,7 @@ class context_switching_resume {
template<class Actor> template<class Actor>
void await_ready(Actor* self) { void await_ready(Actor* self) {
CPPA_LOG_TRACE("");
while (!self->has_next_message()) { while (!self->has_next_message()) {
self->set_state(actor_state::about_to_block); self->set_state(actor_state::about_to_block);
// double-check before going to block // double-check before going to block
......
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#include <atomic> #include <atomic>
#include "cppa/any_tuple.hpp" #include "cppa/any_tuple.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/singletons.hpp"
#include "cppa/actor_state.hpp" #include "cppa/actor_state.hpp"
#include "cppa/message_header.hpp" #include "cppa/message_header.hpp"
...@@ -80,12 +82,15 @@ class cooperative_scheduling { ...@@ -80,12 +82,15 @@ class cooperative_scheduling {
} }
template<class Actor> template<class Actor>
inline void launch(Actor*) { inline void launch(Actor* self) {
static_cast<void>(m_hidden); get_scheduling_coordinator()->enqueue(self);
} }
template<class Actor> template<class Actor>
void enqueue(Actor* self, const message_header& hdr, any_tuple& msg) { void enqueue(Actor* self,
msg_hdr_cref hdr,
any_tuple& msg,
execution_unit* host) {
auto e = self->new_mailbox_element(hdr, std::move(msg)); auto e = self->new_mailbox_element(hdr, std::move(msg));
switch (self->mailbox().enqueue(e)) { switch (self->mailbox().enqueue(e)) {
case intrusive::first_enqueued: { case intrusive::first_enqueued: {
...@@ -98,7 +103,8 @@ class cooperative_scheduling { ...@@ -98,7 +103,8 @@ class cooperative_scheduling {
switch (state) { switch (state) {
case actor_state::blocked: { case actor_state::blocked: {
if (set_ready()) { if (set_ready()) {
//m_scheduler->enqueue(this); if (host) host->exec_later(self);
else get_scheduling_coordinator()->enqueue(self);
return; return;
} }
break; break;
...@@ -116,9 +122,8 @@ class cooperative_scheduling { ...@@ -116,9 +122,8 @@ class cooperative_scheduling {
} }
case intrusive::queue_closed: { case intrusive::queue_closed: {
if (hdr.id.is_request()) { if (hdr.id.is_request()) {
//FIXME detail::sync_request_bouncer f{self->exit_reason()};
//detail::sync_request_bouncer f{exit_reason()}; f(hdr.sender, hdr.id);
//f(hdr.sender, hdr.id);
} }
break; break;
} }
...@@ -126,11 +131,6 @@ class cooperative_scheduling { ...@@ -126,11 +131,6 @@ class cooperative_scheduling {
} }
} }
private:
// denotes whether this actor is ignored by await_all_actors_done()
bool m_hidden;
}; };
} } // namespace cppa::policy } } // namespace cppa::policy
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "cppa/config.hpp" #include "cppa/config.hpp"
#include "cppa/extend.hpp" #include "cppa/extend.hpp"
#include "cppa/behavior.hpp" #include "cppa/behavior.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/actor_state.hpp" #include "cppa/actor_state.hpp"
#include "cppa/policy/resume_policy.hpp" #include "cppa/policy/resume_policy.hpp"
...@@ -54,7 +55,7 @@ class event_based_resume { ...@@ -54,7 +55,7 @@ class event_based_resume {
// Base must be a mailbox-based actor // Base must be a mailbox-based actor
template<class Base, class Derived> template<class Base, class Derived>
struct mixin : Base, detail::resumable { struct mixin : Base, resumable {
template<typename... Ts> template<typename... Ts>
mixin(Ts&&... args) : Base(std::forward<Ts>(args)...) { } mixin(Ts&&... args) : Base(std::forward<Ts>(args)...) { }
...@@ -63,8 +64,10 @@ class event_based_resume { ...@@ -63,8 +64,10 @@ class event_based_resume {
return static_cast<Derived*>(this); return static_cast<Derived*>(this);
} }
resumable::resume_result resume(detail::cs_thread*) override { resumable::resume_result resume(detail::cs_thread*,
execution_unit* host) override {
auto d = dptr(); auto d = dptr();
d->m_host = host;
CPPA_LOG_TRACE("id = " << d->id() CPPA_LOG_TRACE("id = " << d->id()
<< ", state = " << static_cast<int>(d->state())); << ", state = " << static_cast<int>(d->state()));
CPPA_REQUIRE( d->state() == actor_state::ready CPPA_REQUIRE( d->state() == actor_state::ready
...@@ -171,7 +174,7 @@ class event_based_resume { ...@@ -171,7 +174,7 @@ class event_based_resume {
} }
} }
done_cb(); done_cb();
return detail::resumable::done; return resumable::done;
} }
}; };
......
...@@ -55,7 +55,7 @@ class middleman_scheduling { ...@@ -55,7 +55,7 @@ class middleman_scheduling {
typedef intrusive_ptr<Actor> pointer; typedef intrusive_ptr<Actor> pointer;
continuation(pointer ptr, const message_header& hdr, any_tuple&& msg) continuation(pointer ptr, msg_hdr_cref hdr, any_tuple&& msg)
: m_self(std::move(ptr)), m_hdr(hdr), m_data(std::move(msg)) { } : m_self(std::move(ptr)), m_hdr(hdr), m_data(std::move(msg)) { }
inline void operator()() const { inline void operator()() const {
...@@ -91,7 +91,7 @@ class middleman_scheduling { ...@@ -91,7 +91,7 @@ class middleman_scheduling {
} }
template<class Actor> template<class Actor>
void enqueue(Actor* self, const message_header& hdr, any_tuple& msg) { void enqueue(Actor* self, msg_hdr_cref hdr, any_tuple& msg) {
get_middleman()->run_later(continuation<Actor>{self, hdr, std::move(msg)}); get_middleman()->run_later(continuation<Actor>{self, hdr, std::move(msg)});
} }
......
...@@ -27,9 +27,12 @@ class no_resume { ...@@ -27,9 +27,12 @@ class no_resume {
struct mixin : Base { struct mixin : Base {
template<typename... Ts> template<typename... Ts>
mixin(Ts&&... args) : Base(std::forward<Ts>(args)...) { } mixin(Ts&&... args)
: Base(std::forward<Ts>(args)...)
, m_hidden(true) { }
inline detail::resumable::resume_result resume(detail::cs_thread*) { inline resumable::resume_result resume(detail::cs_thread*,
execution_unit*) {
auto done_cb = [=](std::uint32_t reason) { auto done_cb = [=](std::uint32_t reason) {
this->planned_exit_reason(reason); this->planned_exit_reason(reason);
this->on_exit(); this->on_exit();
...@@ -45,8 +48,11 @@ class no_resume { ...@@ -45,8 +48,11 @@ class no_resume {
catch (...) { catch (...) {
done_cb(exit_reason::unhandled_exception); done_cb(exit_reason::unhandled_exception);
} }
return detail::resumable::done; return resumable::done;
} }
bool m_hidden;
}; };
template<class Actor> template<class Actor>
......
...@@ -83,7 +83,8 @@ class no_scheduling { ...@@ -83,7 +83,8 @@ class no_scheduling {
} }
template<class Actor> template<class Actor>
void enqueue(Actor* self, const message_header& hdr, any_tuple& msg) { void enqueue(Actor* self, msg_hdr_cref hdr,
any_tuple& msg, execution_unit*) {
auto ptr = self->new_mailbox_element(hdr, std::move(msg)); auto ptr = self->new_mailbox_element(hdr, std::move(msg));
switch (self->mailbox().enqueue(ptr)) { switch (self->mailbox().enqueue(ptr)) {
default: default:
...@@ -104,22 +105,18 @@ class no_scheduling { ...@@ -104,22 +105,18 @@ class no_scheduling {
} }
template<class Actor> template<class Actor>
void launch(Actor* self, bool is_hidden) { void launch(Actor* self) {
CPPA_PUSH_AID(self->id()); CPPA_PUSH_AID(self->id());
CPPA_LOG_TRACE(CPPA_ARG(self) << ", " << CPPA_ARG(is_hidden)); CPPA_LOG_TRACE(CPPA_ARG(self));
CPPA_REQUIRE(self != nullptr); CPPA_REQUIRE(self != nullptr);
if (!is_hidden) get_actor_registry()->inc_running();
intrusive_ptr<Actor> mself{self}; intrusive_ptr<Actor> mself{self};
std::thread([=] { std::thread([=] {
CPPA_PUSH_AID(mself->id()); CPPA_PUSH_AID(mself->id());
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
auto guard = util::make_scope_guard([is_hidden] {
if (!is_hidden) get_actor_registry()->dec_running();
});
detail::cs_thread fself; detail::cs_thread fself;
for (;;) { for (;;) {
mself->set_state(actor_state::ready); mself->set_state(actor_state::ready);
if (mself->resume(&fself) == detail::resumable::done) { if (mself->resume(&fself, nullptr) == resumable::done) {
return; return;
} }
// await new data before resuming actor // await new data before resuming actor
......
...@@ -31,21 +31,17 @@ ...@@ -31,21 +31,17 @@
#ifndef CPPA_RESUME_POLICY_HPP #ifndef CPPA_RESUME_POLICY_HPP
#define CPPA_RESUME_POLICY_HPP #define CPPA_RESUME_POLICY_HPP
#include "cppa/detail/resumable.hpp" #include "cppa/resumable.hpp"
// this header consists all type definitions needed to // this header consists all type definitions needed to
// implement the resume_policy trait // implement the resume_policy trait
namespace cppa { namespace cppa {
namespace util {
class duration;
} // namespace util
namespace detail {
struct cs_thread;
} // namespace detail
} // namespace cppa
namespace cppa { class execution_unit;
namespace util { class duration; }
namespace detail { struct cs_thread; }
namespace policy { namespace policy {
/** /**
...@@ -64,8 +60,9 @@ class resume_policy { ...@@ -64,8 +60,9 @@ class resume_policy {
* actor finishes execution. * actor finishes execution.
*/ */
template<class Actor> template<class Actor>
detail::resumable::resume_result resume(Actor* self, resumable::resume_result resume(Actor* self,
detail::cs_thread* from); detail::cs_thread* from,
execution_unit*);
/** /**
* @brief Waits unconditionally until the actor is ready to resume. * @brief Waits unconditionally until the actor is ready to resume.
......
...@@ -31,11 +31,15 @@ ...@@ -31,11 +31,15 @@
#ifndef CPPA_SCHEDULING_POLICY_HPP #ifndef CPPA_SCHEDULING_POLICY_HPP
#define CPPA_SCHEDULING_POLICY_HPP #define CPPA_SCHEDULING_POLICY_HPP
namespace cppa { class message_header; class any_tuple; } namespace cppa {
namespace cppa { namespace util { class duration; } } class any_tuple;
class execution_unit;
class message_header;
namespace cppa { namespace policy { namespace util { class duration; }
namespace policy {
enum class timed_fetch_result { enum class timed_fetch_result {
no_message, no_message,
...@@ -100,7 +104,10 @@ class scheduling_policy { ...@@ -100,7 +104,10 @@ class scheduling_policy {
* steps to resume the actor if it's currently blocked. * steps to resume the actor if it's currently blocked.
*/ */
template<class Actor> template<class Actor>
void enqueue(Actor* self, const message_header& hdr, any_tuple& msg); void enqueue(Actor* self,
msg_hdr_cref hdr,
any_tuple& msg,
execution_unit* host);
/** /**
* @brief Starts the given actor either by launching a thread or enqueuing * @brief Starts the given actor either by launching a thread or enqueuing
......
...@@ -32,9 +32,10 @@ ...@@ -32,9 +32,10 @@
#define CPPA_RESUMABLE_HPP #define CPPA_RESUMABLE_HPP
namespace cppa { namespace cppa {
namespace detail {
struct cs_thread; class execution_unit;
namespace detail { struct cs_thread; }
class resumable { class resumable {
...@@ -49,13 +50,18 @@ class resumable { ...@@ -49,13 +50,18 @@ class resumable {
// 'resumable' with 'single_reader_queue' // 'resumable' with 'single_reader_queue'
resumable* next; resumable* next;
resumable();
virtual ~resumable(); virtual ~resumable();
virtual resume_result resume(detail::cs_thread*) = 0; virtual resume_result resume(detail::cs_thread*, execution_unit*) = 0;
protected:
bool m_hidden;
}; };
} // namespace detail
} // namespace cppa } // namespace cppa
#endif // CPPA_RESUMABLE_HPP #endif // CPPA_RESUMABLE_HPP
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <chrono> #include <chrono>
#include <memory> #include <memory>
#include <thread>
#include <cstdint> #include <cstdint>
#include <functional> #include <functional>
#include <type_traits> #include <type_traits>
...@@ -43,53 +44,44 @@ ...@@ -43,53 +44,44 @@
#include "cppa/any_tuple.hpp" #include "cppa/any_tuple.hpp"
#include "cppa/cow_tuple.hpp" #include "cppa/cow_tuple.hpp"
#include "cppa/attachable.hpp" #include "cppa/attachable.hpp"
#include "cppa/scoped_actor.hpp"
#include "cppa/spawn_options.hpp" #include "cppa/spawn_options.hpp"
#include "cppa/execution_unit.hpp"
#include "cppa/message_header.hpp" #include "cppa/message_header.hpp"
#include "cppa/util/duration.hpp" #include "cppa/util/duration.hpp"
#include "cppa/util/producer_consumer_list.hpp"
namespace cppa { namespace cppa {
class event_based_actor;
class scheduled_actor;
class scheduler_helper;
typedef intrusive_ptr<scheduled_actor> scheduled_actor_ptr;
namespace detail {
class resumable; class resumable;
class singleton_manager;
} // namespace detail namespace detail { class singleton_manager; }
namespace scheduler {
class worker;
/** /**
* @brief This abstract class allows to create (spawn) new actors * @brief Central scheduling interface.
* and offers delayed sends.
*/ */
class scheduler { class coordinator {
friend class detail::singleton_manager; friend class detail::singleton_manager;
protected: public:
scheduler();
virtual ~scheduler();
class shutdown_helper;
/** /**
* @warning Always call scheduler::initialize on overriding. * @brief Returns a handle to the central printing actor.
*/ */
virtual void initialize(); actor printer() const;
/** /**
* @warning Always call scheduler::destroy on overriding. * @brief Puts @p what into the queue of a randomly chosen worker.
*/ */
virtual void destroy(); void enqueue(resumable* what);
public:
actor printer() const;
virtual void enqueue(detail::resumable*) = 0;
template<typename Duration, typename... Data> template<typename Duration, typename... Data>
void delayed_send(message_header hdr, void delayed_send(message_header hdr,
...@@ -99,7 +91,7 @@ class scheduler { ...@@ -99,7 +91,7 @@ class scheduler {
util::duration{rel_time}, util::duration{rel_time},
std::move(hdr), std::move(hdr),
std::move(data)); std::move(data));
delayed_send_helper()->enqueue(message_header{}, std::move(tup)); m_timer->enqueue(message_header{}, std::move(tup), nullptr);
} }
template<typename Duration, typename... Data> template<typename Duration, typename... Data>
...@@ -111,34 +103,124 @@ class scheduler { ...@@ -111,34 +103,124 @@ class scheduler {
util::duration{rel_time}, util::duration{rel_time},
std::move(hdr), std::move(hdr),
std::move(data)); std::move(data));
delayed_send_helper()->enqueue(message_header{}, std::move(tup)); m_timer->enqueue(message_header{}, std::move(tup), nullptr);
}
inline size_t num_workers() const {
return static_cast<unsigned>(m_workers.size());
}
inline worker* worker_by_id(size_t id) const {
return m_workers[id].get();
} }
private: private:
static scheduler* create_singleton(); static coordinator* create_singleton();
coordinator();
inline void dispose() { delete this; } inline void dispose() { delete this; }
actor delayed_send_helper(); void initialize();
void destroy();
intrusive_ptr<blocking_actor> m_timer;
scoped_actor m_printer;
std::thread m_timer_thread;
std::thread m_printer_thread;
scheduler_helper* m_helper; // ID of the worker receiving the next enqueue
std::atomic<size_t> m_next_worker;
// vector of size std::thread::hardware_concurrency()
std::vector<std::unique_ptr<worker>> m_workers;
}; };
/** /**
* @brief Sets the scheduler to @p sched. * @brief A work-stealing scheduling worker.
* @param sched A user-defined scheduler implementation. *
* @pre <tt>sched != nullptr</tt>. * The work-stealing implementation of libcppa minimizes access to the
* @throws std::runtime_error if there's already a scheduler defined. * synchronized queue. The reasoning behind this design decision is that
* it has been shown that stealing actually is very rare for workloads [1].
* Hence, implementations should focus on the performance in
* the non-stealing case. For this reason, each worker has an exposed
* job queue that can be accessed by the central scheduler instance as
* well as other workers, but it also has a private job list it is
* currently working on. To account for the load balancing aspect, each
* worker makes sure that at least one job is left in its exposed queue
* to allow other workers to steal it.
*
* [1] http://dl.acm.org/citation.cfm?doid=2398857.2384639
*/ */
void set_scheduler(scheduler* sched); class worker : public execution_unit {
/** friend class coordinator;
* @brief Sets a thread pool scheduler with @p num_threads worker threads.
* @throws std::runtime_error if there's already a scheduler defined. friend class coordinator::shutdown_helper;
*/
void set_default_scheduler(size_t num_threads); public:
typedef resumable* job_ptr;
typedef util::producer_consumer_list<resumable> job_queue;
worker(size_t id, coordinator* parent);
/**
* @brief Attempt to steal an element from the exposed job queue.
*/
job_ptr try_steal();
/**
* @brief Enqueues a new job to the worker's queue from an external
* source, i.e., from any other thread.
*/
void external_enqueue(job_ptr);
/**
* @brief Enqueues a new job to the worker's queue from an internal
* source, i.e., a job that is currently executed by
* this worker.
* @warning Must not be called from other threads.
*/
void exec_later(job_ptr) override;
private:
void start(); // called from the scheduler
void run(); // work loop
job_ptr raid(); // go on a raid in quest for a new shiny job
bool m_running;
// this queue is exposed to others, i.e., other workers
// may attempt to steal jobs from it and the central scheduling
// unit can push new jobs to the queue
job_queue m_exposed_queue;
// internal job stack
std::vector<job_ptr> m_job_list;
// the worker's thread
std::thread m_this_thread;
// the worker's ID received from scheduler
size_t m_id;
// the ID of the last victim we stole from
size_t m_last_victim;
coordinator* m_parent;
};
} // namespace scheduler
} // namespace cppa } // namespace cppa
......
...@@ -62,7 +62,8 @@ class single_timeout : public Base { ...@@ -62,7 +62,8 @@ class single_timeout : public Base {
auto msg = make_any_tuple(timeout_msg{tid}); auto msg = make_any_tuple(timeout_msg{tid});
if (d.is_zero()) { if (d.is_zero()) {
// immediately enqueue timeout message if duration == 0s // immediately enqueue timeout message if duration == 0s
this->enqueue({this->address(), this}, std::move(msg)); this->enqueue({this->address(), this}, std::move(msg),
this->m_host);
//auto e = this->new_mailbox_element(this, std::move(msg)); //auto e = this->new_mailbox_element(this, std::move(msg));
//this->m_mailbox.enqueue(e); //this->m_mailbox.enqueue(e);
} }
......
...@@ -39,8 +39,8 @@ inline logging* get_logger() { ...@@ -39,8 +39,8 @@ inline logging* get_logger() {
return detail::singleton_manager::get_logger(); return detail::singleton_manager::get_logger();
} }
inline scheduler* get_scheduler() { inline scheduler::coordinator* get_scheduling_coordinator() {
return detail::singleton_manager::get_scheduler(); return detail::singleton_manager::get_scheduling_coordinator();
} }
inline detail::group_manager* get_group_manager() { inline detail::group_manager* get_group_manager() {
......
...@@ -72,14 +72,11 @@ intrusive_ptr<C> spawn_impl(BeforeLaunch before_launch_fun, Ts&&... args) { ...@@ -72,14 +72,11 @@ intrusive_ptr<C> spawn_impl(BeforeLaunch before_launch_fun, Ts&&... args) {
return spawn_impl<C, Os + detached>(before_launch_fun, return spawn_impl<C, Os + detached>(before_launch_fun,
std::forward<Ts>(args)...); std::forward<Ts>(args)...);
} }
/*
using scheduling_policy = typename std::conditional< using scheduling_policy = typename std::conditional<
has_detach_flag(Os), has_detach_flag(Os),
policy::no_scheduling, policy::no_scheduling,
policy::cooperative_scheduling policy::cooperative_scheduling
>::type; >::type;
*/
using scheduling_policy = policy::no_scheduling;
using priority_policy = typename std::conditional< using priority_policy = typename std::conditional<
has_priority_aware_flag(Os), has_priority_aware_flag(Os),
policy::prioritizing, policy::prioritizing,
......
...@@ -65,7 +65,7 @@ inline std::string to_string(const any_tuple& what) { ...@@ -65,7 +65,7 @@ inline std::string to_string(const any_tuple& what) {
return detail::to_string_impl(what); return detail::to_string_impl(what);
} }
inline std::string to_string(const message_header& what) { inline std::string to_string(msg_hdr_cref what) {
return detail::to_string_impl(what); return detail::to_string_impl(what);
} }
......
...@@ -175,6 +175,13 @@ class producer_consumer_list { ...@@ -175,6 +175,13 @@ class producer_consumer_list {
} }
} }
bool empty() const {
// this seems to be a non-thread-safe implementation,
// however, any 'race condition' that might occur
// only means we cannot assume an empty list
return m_first == m_last;
}
}; };
} } // namespace cppa::util } } // namespace cppa::util
......
...@@ -62,11 +62,12 @@ namespace { typedef std::unique_lock<std::mutex> guard_type; } ...@@ -62,11 +62,12 @@ namespace { typedef std::unique_lock<std::mutex> guard_type; }
// by std::atomic<> constructor // by std::atomic<> constructor
abstract_actor::abstract_actor(actor_id aid) abstract_actor::abstract_actor(actor_id aid)
: m_id(aid), m_is_proxy(true), m_exit_reason(exit_reason::not_exited) { } : m_id(aid), m_is_proxy(true)
, m_exit_reason(exit_reason::not_exited), m_host(nullptr) { }
abstract_actor::abstract_actor() abstract_actor::abstract_actor()
: m_id(get_actor_registry()->next_id()), m_is_proxy(false) : m_id(get_actor_registry()->next_id()), m_is_proxy(false)
, m_exit_reason(exit_reason::not_exited) { , m_exit_reason(exit_reason::not_exited) {
m_node = get_middleman()->node(); m_node = get_middleman()->node();
} }
...@@ -77,7 +78,8 @@ bool abstract_actor::link_to_impl(const actor_addr& other) { ...@@ -77,7 +78,8 @@ bool abstract_actor::link_to_impl(const actor_addr& other) {
// send exit message if already exited // send exit message if already exited
if (exited()) { if (exited()) {
ptr->enqueue({address(), ptr}, ptr->enqueue({address(), ptr},
make_any_tuple(exit_msg{address(), exit_reason()})); make_any_tuple(exit_msg{address(), exit_reason()}),
m_host);
} }
// add link if not already linked to other // add link if not already linked to other
// (checked by establish_backlink) // (checked by establish_backlink)
...@@ -160,7 +162,8 @@ bool abstract_actor::establish_backlink(const actor_addr& other) { ...@@ -160,7 +162,8 @@ bool abstract_actor::establish_backlink(const actor_addr& other) {
if (reason != exit_reason::not_exited) { if (reason != exit_reason::not_exited) {
auto ptr = detail::raw_access::unsafe_cast(other); auto ptr = detail::raw_access::unsafe_cast(other);
ptr->enqueue({address(), ptr}, ptr->enqueue({address(), ptr},
make_any_tuple(exit_msg{address(), exit_reason()})); make_any_tuple(exit_msg{address(), exit_reason()}),
m_host);
} }
return false; return false;
} }
...@@ -213,7 +216,8 @@ void abstract_actor::cleanup(std::uint32_t reason) { ...@@ -213,7 +216,8 @@ void abstract_actor::cleanup(std::uint32_t reason) {
auto msg = make_any_tuple(exit_msg{address(), reason}); auto msg = make_any_tuple(exit_msg{address(), reason});
CPPA_LOGM_DEBUG("cppa::actor", "send EXIT to " << mlinks.size() << " links"); CPPA_LOGM_DEBUG("cppa::actor", "send EXIT to " << mlinks.size() << " links");
for (auto& aptr : mlinks) { for (auto& aptr : mlinks) {
aptr->enqueue({address(), aptr, message_id{}.with_high_priority()}, msg); aptr->enqueue({address(), aptr, message_id{}.with_high_priority()},
msg, m_host);
} }
CPPA_LOGM_DEBUG("cppa::actor", "run " << mattachables.size() CPPA_LOGM_DEBUG("cppa::actor", "run " << mattachables.size()
<< " attachables"); << " attachables");
......
...@@ -89,7 +89,9 @@ void publish_local_groups(std::uint16_t port, const char* addr) { ...@@ -89,7 +89,9 @@ void publish_local_groups(std::uint16_t port, const char* addr) {
publish(gn, port, addr); publish(gn, port, addr);
} }
catch (std::exception&) { catch (std::exception&) {
gn->enqueue({invalid_actor_addr, nullptr}, make_any_tuple(atom("SHUTDOWN"))); gn->enqueue({invalid_actor_addr, nullptr},
make_any_tuple(atom("SHUTDOWN")),
nullptr);
throw; throw;
} }
} }
......
...@@ -46,9 +46,9 @@ void actor_companion::on_enqueue(enqueue_handler handler) { ...@@ -46,9 +46,9 @@ void actor_companion::on_enqueue(enqueue_handler handler) {
m_on_enqueue = std::move(handler); m_on_enqueue = std::move(handler);
} }
void actor_companion::enqueue(const message_header& hdr, any_tuple msg) { void actor_companion::enqueue(msg_hdr_cref hdr, any_tuple ct, execution_unit*) {
message_pointer ptr; message_pointer ptr;
ptr.reset(detail::memory::create<mailbox_element>(hdr, std::move(msg))); ptr.reset(detail::memory::create<mailbox_element>(hdr, std::move(ct)));
util::shared_lock_guard<lock_type> guard(m_lock); util::shared_lock_guard<lock_type> guard(m_lock);
if (!m_on_enqueue) return; if (!m_on_enqueue) return;
m_on_enqueue(std::move(ptr)); m_on_enqueue(std::move(ptr));
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
namespace cppa { namespace cppa {
actor_ostream::actor_ostream(local_actor* self) : m_self(self) { actor_ostream::actor_ostream(local_actor* self) : m_self(self) {
m_printer = get_scheduler()->printer(); m_printer = get_scheduling_coordinator()->printer();
} }
actor_ostream& actor_ostream::write(std::string arg) { actor_ostream& actor_ostream::write(std::string arg) {
......
...@@ -73,7 +73,8 @@ behavior default_broker::make_behavior() { ...@@ -73,7 +73,8 @@ behavior default_broker::make_behavior() {
CPPA_PUSH_AID(id()); CPPA_PUSH_AID(id());
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
enqueue({invalid_actor_addr, channel{this}}, enqueue({invalid_actor_addr, channel{this}},
make_any_tuple(atom("INITMSG"))); make_any_tuple(atom("INITMSG")),
nullptr);
return ( return (
on(atom("INITMSG")) >> [=] { on(atom("INITMSG")) >> [=] {
unbecome(); unbecome();
...@@ -86,7 +87,7 @@ class broker::continuation { ...@@ -86,7 +87,7 @@ class broker::continuation {
public: public:
continuation(broker_ptr ptr, const message_header& hdr, any_tuple&& msg) continuation(broker_ptr ptr, msg_hdr_cref hdr, any_tuple&& msg)
: m_self(move(ptr)), m_hdr(hdr), m_data(move(msg)) { } : m_self(move(ptr)), m_hdr(hdr), m_data(move(msg)) { }
inline void operator()() { inline void operator()() {
...@@ -287,7 +288,7 @@ class broker::doorman : public broker::servant { ...@@ -287,7 +288,7 @@ class broker::doorman : public broker::servant {
}; };
void broker::invoke_message(const message_header& hdr, any_tuple msg) { void broker::invoke_message(msg_hdr_cref hdr, any_tuple msg) {
CPPA_LOG_TRACE(CPPA_TARG(msg, to_string)); CPPA_LOG_TRACE(CPPA_TARG(msg, to_string));
if (planned_exit_reason() != exit_reason::not_exited || bhvr_stack().empty()) { if (planned_exit_reason() != exit_reason::not_exited || bhvr_stack().empty()) {
CPPA_LOG_DEBUG("actor already finished execution" CPPA_LOG_DEBUG("actor already finished execution"
...@@ -372,7 +373,7 @@ bool broker::invoke_message_from_cache() { ...@@ -372,7 +373,7 @@ bool broker::invoke_message_from_cache() {
return false; return false;
} }
void broker::enqueue(const message_header& hdr, any_tuple msg) { void broker::enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit*) {
get_middleman()->run_later(continuation{this, hdr, move(msg)}); get_middleman()->run_later(continuation{this, hdr, move(msg)});
} }
......
...@@ -40,6 +40,7 @@ namespace cppa { ...@@ -40,6 +40,7 @@ namespace cppa {
namespace policy { namespace policy {
void context_switching_resume::trampoline(void* this_ptr) { void context_switching_resume::trampoline(void* this_ptr) {
CPPA_LOGF_TRACE(CPPA_ARG(this_ptr));
auto self = reinterpret_cast<blocking_actor*>(this_ptr); auto self = reinterpret_cast<blocking_actor*>(this_ptr);
auto shut_actor_down = [self](std::uint32_t reason) { auto shut_actor_down = [self](std::uint32_t reason) {
if (self->planned_exit_reason() == exit_reason::not_exited) { if (self->planned_exit_reason() == exit_reason::not_exited) {
...@@ -59,6 +60,7 @@ void context_switching_resume::trampoline(void* this_ptr) { ...@@ -59,6 +60,7 @@ void context_switching_resume::trampoline(void* this_ptr) {
shut_actor_down(exit_reason::unhandled_exception); shut_actor_down(exit_reason::unhandled_exception);
} }
std::atomic_thread_fence(std::memory_order_seq_cst); std::atomic_thread_fence(std::memory_order_seq_cst);
CPPA_LOGF_DEBUG("done, yield() back to execution unit");;
detail::yield(detail::yield_state::done); detail::yield(detail::yield_state::done);
} }
......
...@@ -28,153 +28,10 @@ ...@@ -28,153 +28,10 @@
\******************************************************************************/ \******************************************************************************/
#include <mutex> #include "cppa/execution_unit.hpp"
#include <thread>
#include <cstdint>
#include <cstddef>
#include <iostream>
#include "cppa/on.hpp" namespace cppa {
#include "cppa/logging.hpp"
#include "cppa/detail/cs_thread.hpp" execution_unit::~execution_unit() { }
#include "cppa/detail/actor_registry.hpp" } // namespace cppa
#include "cppa/detail/thread_pool_scheduler.hpp"
using std::cout;
using std::endl;
namespace cppa { namespace detail {
resumable::resume_result thread_pool_scheduler::dummy::resume(detail::cs_thread*) {
throw std::logic_error("thread_pool_scheduler::dummy::resume");
}
struct thread_pool_scheduler::worker {
typedef resumable* job_ptr;
job_queue* m_job_queue;
job_ptr m_dummy;
std::thread m_thread;
worker(job_queue* jq, job_ptr dummy) : m_job_queue(jq), m_dummy(dummy) { }
void start() {
m_thread = std::thread(&thread_pool_scheduler::worker_loop, this);
}
worker(const worker&) = delete;
worker& operator=(const worker&) = delete;
bool aggressive(job_ptr& result) {
for (int i = 0; i < 100; ++i) {
result = m_job_queue->try_pop();
if (result) return true;
std::this_thread::yield();
}
return false;
}
bool moderate(job_ptr& result) {
for (int i = 0; i < 550; ++i) {
result = m_job_queue->try_pop();
if (result) return true;
std::this_thread::sleep_for(std::chrono::microseconds(50));
}
return false;
}
bool relaxed(job_ptr& result) {
for (;;) {
result = m_job_queue->try_pop();
if (result) return true;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
void operator()() {
CPPA_LOG_TRACE("");
detail::cs_thread fself;
job_ptr job = nullptr;
for (;;) {
aggressive(job) || moderate(job) || relaxed(job);
CPPA_LOG_DEBUG("dequeued new job");
if (job == m_dummy) {
CPPA_LOG_DEBUG("received dummy (quit)");
// dummy of doom received ...
m_job_queue->push_back(job); // kill the next guy
return; // and say goodbye
}
if (job->resume(&fself) == resumable::done) {
CPPA_LOG_DEBUG("actor is done");
/*FIXME bool hidden = job->is_hidden();
job->deref();
if (!hidden)*/ get_actor_registry()->dec_running();
}
job = nullptr;
}
}
};
void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w) {
(*w)();
}
thread_pool_scheduler::thread_pool_scheduler() {
m_num_threads = std::max<size_t>(std::thread::hardware_concurrency(), 4);
}
thread_pool_scheduler::thread_pool_scheduler(size_t num_worker_threads) {
m_num_threads = num_worker_threads;
}
void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
resumable* dummy,
size_t num_threads) {
std::vector<std::unique_ptr<thread_pool_scheduler::worker> > workers;
for (size_t i = 0; i < num_threads; ++i) {
workers.emplace_back(new worker(jqueue, dummy));
workers.back()->start();
}
// wait for workers
for (auto& w : workers) {
w->m_thread.join();
}
}
void thread_pool_scheduler::initialize() {
m_supervisor = std::thread(&thread_pool_scheduler::supervisor_loop,
&m_queue, &m_dummy, m_num_threads);
super::initialize();
}
void thread_pool_scheduler::destroy() {
CPPA_LOG_TRACE("");
m_queue.push_back(&m_dummy);
CPPA_LOG_DEBUG("join supervisor");
m_supervisor.join();
// make sure job queue is empty, because destructor of m_queue would
// otherwise delete elements it shouldn't
CPPA_LOG_DEBUG("flush queue");
auto ptr = m_queue.try_pop();
while (ptr != nullptr) {
if (ptr != &m_dummy) {
/*FIXME bool hidden = ptr->is_hidden();
ptr->deref();
std::atomic_thread_fence(std::memory_order_seq_cst);
if (!hidden)*/ get_actor_registry()->dec_running();
}
ptr = m_queue.try_pop();
}
super::destroy();
}
void thread_pool_scheduler::enqueue(resumable* what) {
m_queue.push_back(what);
}
} } // namespace cppa::detail
...@@ -70,26 +70,27 @@ class local_group : public abstract_group { ...@@ -70,26 +70,27 @@ class local_group : public abstract_group {
public: public:
void send_all_subscribers(const message_header& hdr, const any_tuple& msg) { void send_all_subscribers(msg_hdr_cref hdr, const any_tuple& msg,
execution_unit* eu) {
CPPA_LOG_TRACE(CPPA_TARG(hdr.sender, to_string) << ", " CPPA_LOG_TRACE(CPPA_TARG(hdr.sender, to_string) << ", "
<< CPPA_TARG(msg, to_string)); << CPPA_TARG(msg, to_string));
shared_guard guard(m_mtx); shared_guard guard(m_mtx);
for (auto& s : m_subscribers) { for (auto& s : m_subscribers) {
s->enqueue(hdr, msg); s->enqueue(hdr, msg, eu);
} }
} }
void enqueue(const message_header& hdr, any_tuple msg) override { void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit* eu) override {
CPPA_LOG_TRACE(CPPA_TARG(hdr, to_string) << ", " CPPA_LOG_TRACE(CPPA_TARG(hdr, to_string) << ", "
<< CPPA_TARG(msg, to_string)); << CPPA_TARG(msg, to_string));
send_all_subscribers(hdr, msg); send_all_subscribers(hdr, msg, eu);
m_broker->enqueue(hdr, msg); m_broker->enqueue(hdr, msg, eu);
} }
pair<bool, size_t> add_subscriber(const channel& who) { pair<bool, size_t> add_subscriber(const channel& who) {
CPPA_LOG_TRACE(CPPA_TARG(who, to_string)); CPPA_LOG_TRACE(CPPA_TARG(who, to_string));
exclusive_guard guard(m_mtx); exclusive_guard guard(m_mtx);
if (m_subscribers.insert(who).second) { if (who && m_subscribers.insert(who).second) {
return {true, m_subscribers.size()}; return {true, m_subscribers.size()};
} }
return {false, m_subscribers.size()}; return {false, m_subscribers.size()};
...@@ -160,7 +161,7 @@ class local_broker : public event_based_actor { ...@@ -160,7 +161,7 @@ class local_broker : public event_based_actor {
CPPA_TARG(what, to_string)); CPPA_TARG(what, to_string));
// local forwarding // local forwarding
message_header hdr{last_sender(), nullptr}; message_header hdr{last_sender(), nullptr};
m_group->send_all_subscribers(hdr, what); m_group->send_all_subscribers(hdr, what, m_host);
// forward to all acquaintances // forward to all acquaintances
send_to_acquaintances(what); send_to_acquaintances(what);
}, },
...@@ -195,7 +196,7 @@ class local_broker : public event_based_actor { ...@@ -195,7 +196,7 @@ class local_broker : public event_based_actor {
<< " acquaintances; " << CPPA_TSARG(sender) << " acquaintances; " << CPPA_TSARG(sender)
<< ", " << CPPA_TSARG(what)); << ", " << CPPA_TSARG(what));
for (auto& acquaintance : m_acquaintances) { for (auto& acquaintance : m_acquaintances) {
acquaintance->enqueue({sender, acquaintance}, what); acquaintance->enqueue({sender, acquaintance}, what, m_host);
} }
} }
...@@ -249,9 +250,9 @@ class local_group_proxy : public local_group { ...@@ -249,9 +250,9 @@ class local_group_proxy : public local_group {
} }
} }
void enqueue(const message_header& hdr, any_tuple msg) override { void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit* eu) override {
// forward message to the broker // forward message to the broker
m_broker->enqueue(hdr, make_any_tuple(atom("FORWARD"), move(msg))); m_broker->enqueue(hdr, make_any_tuple(atom("FORWARD"), move(msg)), eu);
} }
private: private:
...@@ -272,7 +273,7 @@ class proxy_broker : public event_based_actor { ...@@ -272,7 +273,7 @@ class proxy_broker : public event_based_actor {
return ( return (
others() >> [=] { others() >> [=] {
message_header hdr{last_sender(), nullptr}; message_header hdr{last_sender(), nullptr};
m_group->send_all_subscribers(hdr, last_dequeued()); m_group->send_all_subscribers(hdr, last_dequeued(), m_host);
} }
); );
} }
...@@ -378,18 +379,18 @@ class remote_group : public abstract_group { ...@@ -378,18 +379,18 @@ class remote_group : public abstract_group {
CPPA_LOG_ERROR("should never be called"); CPPA_LOG_ERROR("should never be called");
} }
void enqueue(const message_header& hdr, any_tuple msg) override { void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit* eu) override {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
m_decorated->enqueue(hdr, std::move(msg)); m_decorated->enqueue(hdr, std::move(msg), eu);
} }
void serialize(serializer* sink); void serialize(serializer* sink);
void group_down() { void group_down() {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
group this_group{this}; group_down_msg gdm{group{this}};
m_decorated->send_all_subscribers({invalid_actor_addr, nullptr}, m_decorated->send_all_subscribers({invalid_actor_addr, nullptr},
make_any_tuple(group_down_msg{this_group})); make_any_tuple(gdm), nullptr);
} }
private: private:
......
...@@ -73,8 +73,7 @@ class down_observer : public attachable { ...@@ -73,8 +73,7 @@ class down_observer : public attachable {
} // namespace <anonymous> } // namespace <anonymous>
local_actor::local_actor() local_actor::local_actor()
: m_trap_exit(false) : m_trap_exit(false), m_dummy_node(), m_current_node(&m_dummy_node)
, m_dummy_node(), m_current_node(&m_dummy_node)
, m_planned_exit_reason(exit_reason::not_exited) , m_planned_exit_reason(exit_reason::not_exited)
, m_state(actor_state::ready) { , m_state(actor_state::ready) {
m_node = get_middleman()->node(); m_node = get_middleman()->node();
...@@ -126,17 +125,19 @@ void local_actor::reply_message(any_tuple&& what) { ...@@ -126,17 +125,19 @@ void local_actor::reply_message(any_tuple&& what) {
} }
else if (!id.is_answered()) { else if (!id.is_answered()) {
auto ptr = detail::raw_access::get(whom); auto ptr = detail::raw_access::get(whom);
ptr->enqueue({address(), ptr, id.response_id()}, std::move(what)); ptr->enqueue({address(), ptr, id.response_id()},
std::move(what), m_host);
id.mark_as_answered(); id.mark_as_answered();
} }
} }
void local_actor::forward_message(const actor& dest, message_priority p) { void local_actor::forward_message(const actor& dest, message_priority prio) {
if (!dest) return; if (!dest) return;
auto id = (p == message_priority::high) auto id = (prio == message_priority::high)
? m_current_node->mid.with_high_priority() ? m_current_node->mid.with_high_priority()
: m_current_node->mid.with_normal_priority(); : m_current_node->mid.with_normal_priority();
detail::raw_access::get(dest)->enqueue({m_current_node->sender, detail::raw_access::get(dest), id}, m_current_node->msg); auto p = detail::raw_access::get(dest);
p->enqueue({m_current_node->sender, p, id}, m_current_node->msg, m_host);
// treat this message as asynchronous message from now on // treat this message as asynchronous message from now on
m_current_node->mid = message_id::invalid; m_current_node->mid = message_id::invalid;
} }
...@@ -145,7 +146,7 @@ void local_actor::send_tuple(message_priority prio, const channel& dest, any_tup ...@@ -145,7 +146,7 @@ void local_actor::send_tuple(message_priority prio, const channel& dest, any_tup
if (!dest) return; if (!dest) return;
message_id id; message_id id;
if (prio == message_priority::high) id = id.with_high_priority(); if (prio == message_priority::high) id = id.with_high_priority();
dest->enqueue({address(), dest, id}, std::move(what)); dest->enqueue({address(), dest, id}, std::move(what), m_host);
} }
void local_actor::send_exit(const actor_addr& whom, std::uint32_t reason) { void local_actor::send_exit(const actor_addr& whom, std::uint32_t reason) {
...@@ -158,7 +159,7 @@ void local_actor::delayed_send_tuple(message_priority prio, ...@@ -158,7 +159,7 @@ void local_actor::delayed_send_tuple(message_priority prio,
cppa::any_tuple msg) { cppa::any_tuple msg) {
message_id mid; message_id mid;
if (prio == message_priority::high) mid = mid.with_high_priority(); if (prio == message_priority::high) mid = mid.with_high_priority();
get_scheduler()->delayed_send({address(), dest, mid}, get_scheduling_coordinator()->delayed_send({address(), dest, mid},
rel_time, std::move(msg)); rel_time, std::move(msg));
} }
...@@ -199,9 +200,9 @@ message_id local_actor::timed_sync_send_tuple_impl(message_priority mp, ...@@ -199,9 +200,9 @@ message_id local_actor::timed_sync_send_tuple_impl(message_priority mp,
any_tuple&& what) { any_tuple&& what) {
auto nri = new_request_id(); auto nri = new_request_id();
if (mp == message_priority::high) nri = nri.with_high_priority(); if (mp == message_priority::high) nri = nri.with_high_priority();
dest->enqueue({address(), dest, nri}, std::move(what)); dest->enqueue({address(), dest, nri}, std::move(what), m_host);
auto rri = nri.response_id(); auto rri = nri.response_id();
get_scheduler()->delayed_send({address(), this, rri}, rtime, get_scheduling_coordinator()->delayed_send({address(), this, rri}, rtime,
make_any_tuple(sync_timeout_msg{})); make_any_tuple(sync_timeout_msg{}));
return rri; return rri;
} }
...@@ -211,14 +212,14 @@ message_id local_actor::sync_send_tuple_impl(message_priority mp, ...@@ -211,14 +212,14 @@ message_id local_actor::sync_send_tuple_impl(message_priority mp,
any_tuple&& what) { any_tuple&& what) {
auto nri = new_request_id(); auto nri = new_request_id();
if (mp == message_priority::high) nri = nri.with_high_priority(); if (mp == message_priority::high) nri = nri.with_high_priority();
dest->enqueue({address(), dest, nri}, std::move(what)); dest->enqueue({address(), dest, nri}, std::move(what), m_host);
return nri.response_id(); return nri.response_id();
} }
void anon_send_exit(const actor_addr& whom, std::uint32_t reason) { void anon_send_exit(const actor_addr& whom, std::uint32_t reason) {
auto ptr = detail::raw_access::get(whom); auto ptr = detail::raw_access::get(whom);
ptr->enqueue({invalid_actor_addr, ptr, message_id{}.with_high_priority()}, ptr->enqueue({invalid_actor_addr, ptr, message_id{}.with_high_priority()},
make_any_tuple(exit_msg{invalid_actor_addr, reason})); make_any_tuple(exit_msg{invalid_actor_addr, reason}), nullptr);
} }
} // namespace cppa } // namespace cppa
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
namespace cppa { namespace cppa {
mailbox_element::mailbox_element(const message_header& hdr, any_tuple data) mailbox_element::mailbox_element(msg_hdr_cref hdr, any_tuple data)
: next(nullptr), marked(false), sender(hdr.sender), msg(std::move(data)), mid(hdr.id) { } : next(nullptr), marked(false), sender(hdr.sender), msg(std::move(data)), mid(hdr.id) { }
} // namespace cppa } // namespace cppa
...@@ -49,7 +49,7 @@ bool operator!=(const message_header& lhs, const message_header& rhs) { ...@@ -49,7 +49,7 @@ bool operator!=(const message_header& lhs, const message_header& rhs) {
} }
void message_header::deliver(any_tuple msg) const { void message_header::deliver(any_tuple msg) const {
if (receiver) receiver->enqueue(*this, std::move(msg)); if (receiver) receiver->enqueue(*this, std::move(msg), nullptr);
} }
} // namespace cppa::network } // namespace cppa::network
...@@ -227,7 +227,7 @@ class middleman_impl : public middleman { ...@@ -227,7 +227,7 @@ class middleman_impl : public middleman {
} }
void deliver(const node_id& node, void deliver(const node_id& node,
const message_header& hdr, msg_hdr_cref hdr,
any_tuple msg ) override { any_tuple msg ) override {
auto& entry = m_peers[node]; auto& entry = m_peers[node];
if (entry.impl) { if (entry.impl) {
......
...@@ -268,7 +268,7 @@ void peer::kill_proxy(const actor_addr& sender, ...@@ -268,7 +268,7 @@ void peer::kill_proxy(const actor_addr& sender,
} }
} }
void peer::deliver(const message_header& hdr, any_tuple msg) { void peer::deliver(msg_hdr_cref hdr, any_tuple msg) {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
if (hdr.sender && hdr.sender.is_remote()) { if (hdr.sender && hdr.sender.is_remote()) {
// is_remote() is guaranteed to return true if and only if // is_remote() is guaranteed to return true if and only if
...@@ -361,7 +361,7 @@ void peer::add_type_if_needed(const std::string& tname) { ...@@ -361,7 +361,7 @@ void peer::add_type_if_needed(const std::string& tname) {
} }
} }
void peer::enqueue_impl(const message_header& hdr, const any_tuple& msg) { void peer::enqueue_impl(msg_hdr_cref hdr, const any_tuple& msg) {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
auto tname = msg.tuple_type_names(); auto tname = msg.tuple_type_names();
add_type_if_needed((tname) ? *tname : detail::get_tuple_type_names(*msg.vals())); add_type_if_needed((tname) ? *tname : detail::get_tuple_type_names(*msg.vals()));
...@@ -384,7 +384,7 @@ void peer::enqueue_impl(const message_header& hdr, const any_tuple& msg) { ...@@ -384,7 +384,7 @@ void peer::enqueue_impl(const message_header& hdr, const any_tuple& msg) {
memcpy(wbuf.offset_data(before), &size, sizeof(std::uint32_t)); memcpy(wbuf.offset_data(before), &size, sizeof(std::uint32_t));
} }
void peer::enqueue(const message_header& hdr, const any_tuple& msg) { void peer::enqueue(msg_hdr_cref hdr, const any_tuple& msg) {
enqueue_impl(hdr, msg); enqueue_impl(hdr, msg);
register_for_writing(); register_for_writing();
} }
......
...@@ -81,7 +81,7 @@ remote_actor_proxy::~remote_actor_proxy() { ...@@ -81,7 +81,7 @@ remote_actor_proxy::~remote_actor_proxy() {
}); });
} }
void remote_actor_proxy::deliver(const message_header& hdr, any_tuple msg) { void remote_actor_proxy::deliver(msg_hdr_cref hdr, any_tuple msg) {
// this member function is exclusively called from default_peer from inside // this member function is exclusively called from default_peer from inside
// the middleman's thread, therefore we can safely access // the middleman's thread, therefore we can safely access
// m_pending_requests here // m_pending_requests here
...@@ -95,7 +95,7 @@ void remote_actor_proxy::deliver(const message_header& hdr, any_tuple msg) { ...@@ -95,7 +95,7 @@ void remote_actor_proxy::deliver(const message_header& hdr, any_tuple msg) {
hdr.deliver(std::move(msg)); hdr.deliver(std::move(msg));
} }
void remote_actor_proxy::forward_msg(const message_header& hdr, any_tuple msg) { void remote_actor_proxy::forward_msg(msg_hdr_cref hdr, any_tuple msg) {
CPPA_LOG_TRACE(CPPA_ARG(m_id) << ", " << CPPA_TSARG(hdr) CPPA_LOG_TRACE(CPPA_ARG(m_id) << ", " << CPPA_TSARG(hdr)
<< ", " << CPPA_TSARG(msg)); << ", " << CPPA_TSARG(msg));
if (hdr.receiver != this) { if (hdr.receiver != this) {
...@@ -130,9 +130,11 @@ void remote_actor_proxy::forward_msg(const message_header& hdr, any_tuple msg) { ...@@ -130,9 +130,11 @@ void remote_actor_proxy::forward_msg(const message_header& hdr, any_tuple msg) {
}); });
} }
void remote_actor_proxy::enqueue(const message_header& hdr, any_tuple msg) { void remote_actor_proxy::enqueue(msg_hdr_cref hdr, any_tuple msg,
execution_unit*) {
CPPA_REQUIRE(m_parent != nullptr); CPPA_REQUIRE(m_parent != nullptr);
CPPA_LOG_TRACE(CPPA_TARG(hdr, to_string) << ", " << CPPA_TARG(msg, to_string)); CPPA_LOG_TRACE(CPPA_TARG(hdr, to_string)
<< ", " << CPPA_TARG(msg, to_string));
auto& arr = detail::static_types_array<atom_value, uint32_t>::arr; auto& arr = detail::static_types_array<atom_value, uint32_t>::arr;
if ( msg.size() == 2 if ( msg.size() == 2
&& msg.type_at(0) == arr[0] && msg.type_at(0) == arr[0]
......
...@@ -53,7 +53,8 @@ response_promise::operator bool() const { ...@@ -53,7 +53,8 @@ response_promise::operator bool() const {
void response_promise::deliver(any_tuple msg) { void response_promise::deliver(any_tuple msg) {
if (m_to) { if (m_to) {
auto ptr = detail::raw_access::get(m_to); auto ptr = detail::raw_access::get(m_to);
ptr->enqueue({m_from, ptr, m_id}, move(msg)); // TODO: breaks out of the execution unit
ptr->enqueue({m_from, ptr, m_id}, move(msg), nullptr);
m_to = invalid_actor_addr; m_to = invalid_actor_addr;
} }
} }
......
...@@ -28,12 +28,12 @@ ...@@ -28,12 +28,12 @@
\******************************************************************************/ \******************************************************************************/
#include "cppa/detail/resumable.hpp" #include "cppa/resumable.hpp"
namespace cppa { namespace cppa {
namespace detail {
resumable::resumable() : next(nullptr), m_hidden(true) { }
resumable::~resumable() { } resumable::~resumable() { }
} // namespace detail
} // namespace cppa } // namespace cppa
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <atomic> #include <atomic>
#include <chrono> #include <chrono>
#include <iostream> #include <iostream>
#include <condition_variable>
#include "cppa/on.hpp" #include "cppa/on.hpp"
#include "cppa/policy.hpp" #include "cppa/policy.hpp"
...@@ -46,11 +47,18 @@ ...@@ -46,11 +47,18 @@
#include "cppa/detail/proper_actor.hpp" #include "cppa/detail/proper_actor.hpp"
#include "cppa/detail/actor_registry.hpp" #include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/singleton_manager.hpp" #include "cppa/detail/singleton_manager.hpp"
#include "cppa/detail/thread_pool_scheduler.hpp"
using std::move; using std::move;
namespace cppa { namespace { namespace cppa {
namespace scheduler {
/******************************************************************************
* utility and implementation details *
******************************************************************************/
namespace {
typedef std::uint32_t ui32; typedef std::uint32_t ui32;
...@@ -130,7 +138,7 @@ class timer_actor final : public detail::proper_actor<blocking_actor, ...@@ -130,7 +138,7 @@ class timer_actor final : public detail::proper_actor<blocking_actor,
}, },
others() >> [&]() { others() >> [&]() {
# ifdef CPPA_DEBUG_MODE # ifdef CPPA_DEBUG_MODE
std::cerr << "scheduler_helper::timer_loop: UNKNOWN MESSAGE: " std::cerr << "coordinator::timer_loop: UNKNOWN MESSAGE: "
<< to_string(msg_ptr->msg) << to_string(msg_ptr->msg)
<< std::endl; << std::endl;
# endif # endif
...@@ -162,47 +170,7 @@ class timer_actor final : public detail::proper_actor<blocking_actor, ...@@ -162,47 +170,7 @@ class timer_actor final : public detail::proper_actor<blocking_actor,
}; };
} // namespace <anonymous> void printer_loop(blocking_actor* self) {
class scheduler_helper {
public:
scheduler_helper() : m_timer(new timer_actor), m_printer(true) { }
void start() {
// launch threads
m_timer_thread = std::thread{&scheduler_helper::timer_loop, m_timer.get()};
m_printer_thread = std::thread{&scheduler_helper::printer_loop, m_printer.get()};
}
void stop() {
auto msg = make_any_tuple(atom("DIE"));
m_timer->enqueue({invalid_actor_addr, nullptr}, msg);
m_printer->enqueue({invalid_actor_addr, nullptr}, msg);
m_timer_thread.join();
m_printer_thread.join();
}
intrusive_ptr<timer_actor> m_timer;
std::thread m_timer_thread;
scoped_actor m_printer;
std::thread m_printer_thread;
private:
static void timer_loop(timer_actor* self);
static void printer_loop(blocking_actor* self);
};
void scheduler_helper::timer_loop(timer_actor* self) {
self->act();
}
void scheduler_helper::printer_loop(blocking_actor* self) {
std::map<actor_addr, std::string> out; std::map<actor_addr, std::string> out;
auto flush_output = [&out](const actor_addr& s) { auto flush_output = [&out](const actor_addr& s) {
auto i = out.find(s); auto i = out.find(s);
...@@ -257,43 +225,197 @@ void scheduler_helper::printer_loop(blocking_actor* self) { ...@@ -257,43 +225,197 @@ void scheduler_helper::printer_loop(blocking_actor* self) {
); );
} }
scheduler::scheduler() : m_helper(nullptr) { } } // namespace <anonymous>
/******************************************************************************
* implementation of coordinator *
******************************************************************************/
class coordinator::shutdown_helper : public resumable {
public:
resumable::resume_result resume(detail::cs_thread*, execution_unit* ptr) {
auto w = dynamic_cast<worker*>(ptr);
CPPA_REQUIRE(w != nullptr);
w->m_running = false;
std::unique_lock<std::mutex> guard(mtx);
last_worker = w;
cv.notify_all();
return resumable::resume_later;
}
shutdown_helper() : last_worker(nullptr) { }
void scheduler::initialize() { std::mutex mtx;
m_helper = new scheduler_helper; std::condition_variable cv;
m_helper->start(); worker* last_worker;
};
void coordinator::initialize() {
// launch threads of utility actors
auto ptr = m_timer.get();
m_timer_thread = std::thread{[ptr] {
ptr->act();
}};
m_printer_thread = std::thread{printer_loop, m_printer.get()};
// launch workers
size_t hc = std::thread::hardware_concurrency();
for (size_t i = 0; i < hc; ++i) {
m_workers.emplace_back(new worker(i, this));
m_workers.back()->start();
}
} }
void scheduler::destroy() { void coordinator::destroy() {
CPPA_LOG_TRACE(""); // shutdown workers
m_helper->stop(); shutdown_helper sh;
std::vector<worker*> alive_workers;
for (auto& w : m_workers) alive_workers.push_back(w.get());
while (!alive_workers.empty()) {
alive_workers.back()->external_enqueue(&sh);
// since jobs can be stolen, we cannot assume that we have
// actually shut down the worker we've enqueued sh to
{ // lifetime scope of guard
std::unique_lock<std::mutex> guard(sh.mtx);
sh.cv.wait(guard, [&]{ return sh.last_worker != nullptr; });
}
auto first = alive_workers.begin();
auto last = alive_workers.end();
auto i = std::find(first, last, sh.last_worker);
sh.last_worker = nullptr;
alive_workers.erase(i);
}
// shutdown utility actors
auto msg = make_any_tuple(atom("DIE"));
m_timer->enqueue({invalid_actor_addr, nullptr}, msg, nullptr);
m_printer->enqueue({invalid_actor_addr, nullptr}, msg, nullptr);
m_timer_thread.join();
m_printer_thread.join();
// join each worker thread for good manners
for (auto& w : m_workers) w->m_this_thread.join();
// cleanup
delete this; delete this;
} }
scheduler::~scheduler() { coordinator::coordinator() : m_timer(new timer_actor), m_printer(true) {
delete m_helper; // NOP
}
coordinator* coordinator::create_singleton() {
return new coordinator;
} }
actor scheduler::delayed_send_helper() { actor coordinator::printer() const {
return m_helper->m_timer.get(); return m_printer.get();
}
void coordinator::enqueue(resumable* what) {
size_t nw = m_next_worker++;
m_workers[nw % m_workers.size()]->external_enqueue(what);
}
/******************************************************************************
* implementation of worker *
******************************************************************************/
worker::worker(size_t id, coordinator* parent)
: m_running(false), m_id(id), m_last_victim(id), m_parent(parent) { }
void worker::start() {
auto this_worker = this;
m_this_thread = std::thread{[this_worker] {
this_worker->run();
}};
} }
void set_scheduler(scheduler* sched) { void worker::run() {
if (detail::singleton_manager::set_scheduler(sched) == false) { CPPA_LOG_TRACE(CPPA_ARG(m_id));
throw std::runtime_error("scheduler already set"); // local variables
detail::cs_thread fself;
job_ptr job = nullptr;
// some utility functions
auto local_poll = [&]() -> bool {
if (!m_job_list.empty()) {
job = m_job_list.back();
m_job_list.pop_back();
return true;
}
return false;
};
auto aggressive_poll = [&]() -> bool {
for (int i = 1; i < 101; ++i) {
job = m_exposed_queue.try_pop();
if (job) return true;
// try to steal every 10 poll attempts
if ((i % 10) == 0) {
job = raid();
if (job) return true;
}
std::this_thread::yield();
}
return false;
};
auto moderate_poll = [&]() -> bool {
for (int i = 1; i < 550; ++i) {
job = m_exposed_queue.try_pop();
if (job) return true;
// try to steal every 5 poll attempts
if ((i % 5) == 0) {
job = raid();
if (job) return true;
}
std::this_thread::sleep_for(std::chrono::microseconds(50));
}
return false;
};
auto relaxed_poll = [&]() -> bool {
for (;;) {
job = m_exposed_queue.try_pop();
if (job) return true;
// always try to steal at this stage
job = raid();
if (job) return true;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
};
// scheduling loop
m_running = true;
while (m_running) {
local_poll() || aggressive_poll() || moderate_poll() || relaxed_poll();
CPPA_LOG_DEBUG("dequeued new job");
job->resume(&fself, this);
job = nullptr;
} }
} }
void set_default_scheduler(size_t num_threads) { worker::job_ptr worker::try_steal() {
set_scheduler(new detail::thread_pool_scheduler(num_threads)); return m_exposed_queue.try_pop();
} }
scheduler* scheduler::create_singleton() { worker::job_ptr worker::raid() {
return new detail::thread_pool_scheduler; // try once to steal from anyone
auto n = m_parent->num_workers();
for (size_t i = 0; i < n; ++i) {
m_last_victim = (m_last_victim + 1) % n;
if (m_last_victim != m_id) {
auto job = m_parent->worker_by_id(m_last_victim)->try_steal();
if (job) return job;
}
}
return nullptr;
} }
actor scheduler::printer() const { void worker::external_enqueue(job_ptr ptr) {
return m_helper->m_printer.get(); m_exposed_queue.push_back(ptr);
} }
void worker::exec_later(job_ptr ptr) {
m_job_list.push_back(ptr);
}
} // namespace scheduler
} // namespace cppa } // namespace cppa
...@@ -43,7 +43,6 @@ ...@@ -43,7 +43,6 @@
#include "cppa/detail/group_manager.hpp" #include "cppa/detail/group_manager.hpp"
#include "cppa/detail/actor_registry.hpp" #include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/singleton_manager.hpp" #include "cppa/detail/singleton_manager.hpp"
#include "cppa/detail/thread_pool_scheduler.hpp"
#include "cppa/detail/uniform_type_info_map.hpp" #include "cppa/detail/uniform_type_info_map.hpp"
#ifdef CPPA_OPENCL #ifdef CPPA_OPENCL
...@@ -68,14 +67,14 @@ std::atomic<io::middleman*> s_middleman; ...@@ -68,14 +67,14 @@ std::atomic<io::middleman*> s_middleman;
std::atomic<actor_registry*> s_actor_registry; std::atomic<actor_registry*> s_actor_registry;
std::atomic<group_manager*> s_group_manager; std::atomic<group_manager*> s_group_manager;
std::atomic<empty_tuple*> s_empty_tuple; std::atomic<empty_tuple*> s_empty_tuple;
std::atomic<scheduler*> s_scheduler; std::atomic<scheduler::coordinator*> s_scheduling_coordinator;
std::atomic<logging*> s_logger; std::atomic<logging*> s_logger;
} // namespace <anonymous> } // namespace <anonymous>
void singleton_manager::shutdown() { void singleton_manager::shutdown() {
CPPA_LOGF_DEBUG("shutdown scheduler"); CPPA_LOGF_DEBUG("shutdown scheduler");
destroy(s_scheduler); destroy(s_scheduling_coordinator);
CPPA_LOGF_DEBUG("shutdown middleman"); CPPA_LOGF_DEBUG("shutdown middleman");
destroy(s_middleman); destroy(s_middleman);
std::atomic_thread_fence(std::memory_order_seq_cst); std::atomic_thread_fence(std::memory_order_seq_cst);
...@@ -115,26 +114,14 @@ group_manager* singleton_manager::get_group_manager() { ...@@ -115,26 +114,14 @@ group_manager* singleton_manager::get_group_manager() {
return lazy_get(s_group_manager); return lazy_get(s_group_manager);
} }
scheduler* singleton_manager::get_scheduler() { scheduler::coordinator* singleton_manager::get_scheduling_coordinator() {
return lazy_get(s_scheduler); return lazy_get(s_scheduling_coordinator);
} }
logging* singleton_manager::get_logger() { logging* singleton_manager::get_logger() {
return lazy_get(s_logger); return lazy_get(s_logger);
} }
bool singleton_manager::set_scheduler(scheduler* ptr) {
scheduler* expected = nullptr;
if (s_scheduler.compare_exchange_weak(expected, ptr)) {
ptr->initialize();
return true;
}
else {
ptr->dispose();
return false;
}
}
io::middleman* singleton_manager::get_middleman() { io::middleman* singleton_manager::get_middleman() {
return lazy_get(s_middleman); return lazy_get(s_middleman);
} }
......
...@@ -49,7 +49,9 @@ void sync_request_bouncer::operator()(const actor_addr& sender, ...@@ -49,7 +49,9 @@ void sync_request_bouncer::operator()(const actor_addr& sender,
if (sender && mid.is_request()) { if (sender && mid.is_request()) {
auto ptr = detail::raw_access::get(sender); auto ptr = detail::raw_access::get(sender);
ptr->enqueue({invalid_actor_addr, ptr, mid.response_id()}, ptr->enqueue({invalid_actor_addr, ptr, mid.response_id()},
make_any_tuple(sync_exited_msg{sender, rsn})); make_any_tuple(sync_exited_msg{sender, rsn}),
// TODO: this breaks out of the execution unit
nullptr);
} }
} }
......
...@@ -82,7 +82,7 @@ std::uint32_t type_lookup_table::id_of(pointer uti) const { ...@@ -82,7 +82,7 @@ std::uint32_t type_lookup_table::id_of(pointer uti) const {
void type_lookup_table::emplace(std::uint32_t id, pointer instance) { void type_lookup_table::emplace(std::uint32_t id, pointer instance) {
CPPA_REQUIRE(instance); CPPA_REQUIRE(instance != nullptr);
value_type kvp{id, instance}; value_type kvp{id, instance};
auto i = find(id); auto i = find(id);
if (i == m_data.end()) m_data.push_back(std::move(kvp)); if (i == m_data.end()) m_data.push_back(std::move(kvp));
......
...@@ -312,7 +312,7 @@ void deserialize_impl(any_tuple& atref, deserializer* source) { ...@@ -312,7 +312,7 @@ void deserialize_impl(any_tuple& atref, deserializer* source) {
atref = uti->as_any_tuple(ptr); atref = uti->as_any_tuple(ptr);
} }
void serialize_impl(const message_header& hdr, serializer* sink) { void serialize_impl(msg_hdr_cref hdr, serializer* sink) {
serialize_impl(hdr.sender, sink); serialize_impl(hdr.sender, sink);
serialize_impl(hdr.receiver, sink); serialize_impl(hdr.receiver, sink);
sink->write_value(hdr.id.integer_value()); sink->write_value(hdr.id.integer_value());
......
...@@ -32,7 +32,6 @@ constexpr size_t reduce_global_size = reduce_buffer_size; ...@@ -32,7 +32,6 @@ constexpr size_t reduce_global_size = reduce_buffer_size;
constexpr size_t reduce_result_size = reduce_work_groups; constexpr size_t reduce_result_size = reduce_work_groups;
constexpr const char* kernel_name = "matrix_square"; constexpr const char* kernel_name = "matrix_square";
constexpr const char* kernel_name_result_size = "result_size";
constexpr const char* kernel_name_compiler_flag = "compiler_flag"; constexpr const char* kernel_name_compiler_flag = "compiler_flag";
constexpr const char* kernel_name_reduce = "reduce"; constexpr const char* kernel_name_reduce = "reduce";
constexpr const char* kernel_name_const = "const_mod"; constexpr const char* kernel_name_const = "const_mod";
...@@ -159,6 +158,8 @@ class square_matrix { ...@@ -159,6 +158,8 @@ class square_matrix {
const ivec& data() const { return m_data; } const ivec& data() const { return m_data; }
void data(ivec new_data) { m_data = std::move(new_data); }
private: private:
ivec m_data; ivec m_data;
...@@ -311,7 +312,7 @@ void test_opencl() { ...@@ -311,7 +312,7 @@ void test_opencl() {
} }
int main() { int main() {
CPPA_TEST(test_opencl); CPPA_TEST(tkest_opencl);
announce<ivec>(); announce<ivec>();
matrix_type::announce(); matrix_type::announce();
......
...@@ -61,8 +61,6 @@ using namespace cppa::util; ...@@ -61,8 +61,6 @@ using namespace cppa::util;
using cppa::detail::type_to_ptype; using cppa::detail::type_to_ptype;
using cppa::detail::ptype_to_type; using cppa::detail::ptype_to_type;
namespace { const size_t ui32size = sizeof(uint32_t); }
struct struct_a { struct struct_a {
int x; int x;
int y; int y;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment