Commit e5299044 authored by Dominik Charousset's avatar Dominik Charousset

got rid of actor_state

this patch removes the state variable in scheduled actors and
uses a dummy value on the mailbox instead for detecting when
to re-schedule an actor
parent c63d96b4
......@@ -7,7 +7,6 @@ cppa/actor_companion.hpp
cppa/actor_namespace.hpp
cppa/actor_ostream.hpp
cppa/actor_proxy.hpp
cppa/actor_state.hpp
cppa/announce.hpp
cppa/any_tuple.hpp
cppa/anything.hpp
......@@ -89,7 +88,6 @@ cppa/from_string.hpp
cppa/get.hpp
cppa/group.hpp
cppa/guard_expr.hpp
cppa/intrusive/blocking_single_reader_queue.hpp
cppa/intrusive/single_reader_queue.hpp
cppa/intrusive_ptr.hpp
cppa/io/accept_handle.hpp
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011-2013 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef CPPA_ACTOR_STATE_HPP
#define CPPA_ACTOR_STATE_HPP
namespace cppa {
/**
* @brief Denotes the state of a cooperatively scheduled actor.
*/
enum class actor_state : int {
/**
* @brief Indicates that the actor is either waiting to be executed
* or currently running.
*/
ready,
/**
* @brief Indicates that the actor finished exection.
*/
done,
/**
* @brief Indicates that the actor awaits a new message.
*/
blocked,
/**
* @brief Indicates that the actor is about to change its state to
* {@link blocked}, but still can be interrupted by an
* incoming message.
*/
about_to_block
};
} // namespace cppa
#endif // CPPA_ACTOR_STATE_HPP
......@@ -48,7 +48,7 @@ class functor_based_actor : public event_based_actor {
typedef std::function<void(event_based_actor*)> void_fun;
template<typename F, typename... Ts>
functor_based_actor(F f, Ts&&... vs) : m_void_impl(false) {
functor_based_actor(F f, Ts&&... vs) {
typedef typename util::get_callable_trait<F>::type trait;
typedef typename trait::arg_types arg_types;
typedef typename trait::result_type result_type;
......@@ -80,7 +80,6 @@ class functor_based_actor : public event_based_actor {
template<typename F>
void set(std::false_type, std::true_type, F fun) {
// void (pointer)
m_void_impl = true;
m_make_behavior = [fun](pointer ptr) {
fun(ptr);
return behavior{};
......@@ -96,7 +95,6 @@ class functor_based_actor : public event_based_actor {
template<typename F>
void set(std::false_type, std::false_type, F fun) {
// void (void)
m_void_impl = true;
m_make_behavior = [fun](pointer) {
fun();
return behavior{};
......@@ -118,7 +116,6 @@ class functor_based_actor : public event_based_actor {
std::forward<Ts>(args)...));
}
bool m_void_impl;
make_behavior_fun m_make_behavior;
};
......
......@@ -47,7 +47,11 @@ class proper_actor_base : public Policies::resume_policy::template mixin<Base, D
scheduling_policy().enqueue(dptr(), hdr, msg, eu);
}
// NOTE: scheduling_policy::launch is 'imported' in proper_actor
inline void launch(bool is_hidden, execution_unit* host) {
CPPA_LOG_TRACE("");
this->hidden(is_hidden);
this->scheduling_policy().launch(this, host);
}
template<typename F>
bool fetch_messages(F cb) {
......@@ -188,19 +192,6 @@ class proper_actor : public proper_actor_base<Base,
template <typename... Ts>
proper_actor(Ts&&... args) : super(std::forward<Ts>(args)...) { }
inline void launch(bool is_hidden, execution_unit* host) {
CPPA_LOG_TRACE("");
this->hidden(is_hidden);
this->m_host = host; // may be accessed during make_behavior call
auto bhvr = this->make_behavior();
if (bhvr) this->become(std::move(bhvr));
CPPA_LOG_WARNING_IF(this->bhvr_stack().empty(),
"actor did not set a behavior");
if (!this->bhvr_stack().empty()) {
this->scheduling_policy().launch(this, host);
}
}
// required by event_based_resume::mixin::resume
bool invoke_message(unique_mailbox_element_pointer& ptr) {
......@@ -233,7 +224,7 @@ class proper_actor : public proper_actor_base<Base,
// for blocking actors, there's one more member function to implement
template <class Base, class Policies>
class proper_actor<Base, Policies,true> : public proper_actor_base<Base,
class proper_actor<Base, Policies, true> : public proper_actor_base<Base,
proper_actor<Base,
Policies,
true>,
......@@ -257,11 +248,6 @@ class proper_actor<Base, Policies,true> : public proper_actor_base<Base,
this->resume_policy().await_ready(this);
}
inline void launch(bool is_hidden, execution_unit* host) {
this->hidden(is_hidden);
this->scheduling_policy().launch(this, host);
}
// implement blocking_actor::dequeue_response
void dequeue_response(behavior& bhvr, message_id mid) override {
......
......@@ -76,6 +76,8 @@ class event_based_actor : public extend<local_actor, event_based_actor>::
*/
void forward_to(const actor& whom);
bool m_initialized;
};
} // namespace cppa
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011-2013 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation; either version 2.1 of the License, *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef CPPA_BLOCKING_SINGLE_READER_QUEUE_HPP
#define CPPA_BLOCKING_SINGLE_READER_QUEUE_HPP
#include <mutex>
#include <thread>
#include <condition_variable>
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { namespace intrusive {
template<typename T, class Delete = std::default_delete<T> >
class blocking_single_reader_queue {
typedef std::unique_lock<std::mutex> lock_type;
public:
typedef single_reader_queue<T, Delete> impl_type;
typedef typename impl_type::value_type value_type;
typedef typename impl_type::pointer pointer;
pointer pop() {
wait_for_data();
return m_impl.try_pop();
}
inline pointer try_pop() {
return m_impl.try_pop();
}
/**
* @warning call only from the reader (owner)
*/
template<typename TimePoint>
pointer try_pop(const TimePoint& abs_time) {
return (timed_wait_for_data(abs_time)) ? try_pop() : nullptr;
}
bool push_back(pointer new_element) {
switch (m_impl.enqueue(new_element)) {
case first_enqueued: {
lock_type guard(m_mtx);
m_cv.notify_one();
return true;
}
case enqueued:
// enqueued message to a running actor's mailbox
return true;
case queue_closed:
// actor no longer alive
return false;
}
// should be unreachable
CPPA_CRITICAL("invalid result of enqueue()");
}
inline void clear() {
m_impl.clear();
}
inline void close() {
m_impl.close();
}
template<typename F>
inline void close(const F& f) {
m_impl.close(f);
}
inline bool closed() const {
return m_impl.closed();
}
private:
// locked on enqueue/dequeue operations to/from an empty list
std::mutex m_mtx;
std::condition_variable m_cv;
impl_type m_impl;
template<typename TimePoint>
bool timed_wait_for_data(const TimePoint& timeout) {
CPPA_REQUIRE(!m_impl.closed());
if (m_impl.empty()) {
lock_type guard(m_mtx);
while (m_impl.empty()) {
if (m_cv.wait_until(guard, timeout) == std::cv_status::timeout) {
return false;
}
}
}
return true;
}
void wait_for_data() {
if (m_impl.empty()) {
lock_type guard(m_mtx);
while (m_impl.empty()) m_cv.wait(guard);
}
}
};
} } // namespace cppa::intrusive
#endif // CPPA_BLOCKING_SINGLE_READER_QUEUE_HPP
......@@ -32,6 +32,7 @@
#define CPPA_SINGLE_READER_QUEUE_HPP
#include <list>
#include <mutex>
#include <atomic>
#include <memory>
......@@ -39,7 +40,30 @@
namespace cppa { namespace intrusive {
enum enqueue_result { enqueued, first_enqueued, queue_closed };
/**
* @brief Denotes in which state queue and reader are after an enqueue.
*/
enum class enqueue_result {
/**
* @brief Indicates that the enqueue operation succeeded and
* the reader is ready to receive the data.
**/
success,
/**
* @brief Indicates that the enqueue operation succeeded and
* the reader is currently blocked, i.e., needs to be re-scheduled.
**/
unblocked_reader,
/**
* @brief Indicates that the enqueue operation failed because the
* queue has been closed by the reader.
**/
queue_closed
};
/**
* @brief An intrusive, thread-safe queue implementation.
......@@ -97,39 +121,73 @@ class single_reader_queue {
}
else m_head = head;
}
}
// returns true if the queue was empty
enqueue_result enqueue(pointer new_element) {
pointer e = m_stack.load();
for (;;) {
if (e == nullptr) {
if (!e) {
// if tail is nullptr, the queue has been closed
m_delete(new_element);
return queue_closed; // queue is closed
return enqueue_result::queue_closed;
}
new_element->next = e;
new_element->next = is_dummy(e) ? nullptr : e;
if (m_stack.compare_exchange_weak(e, new_element)) {
return (e == stack_end()) ? first_enqueued : enqueued;
return (e == reader_blocked_dummy())
? enqueue_result::unblocked_reader
: enqueue_result::success;
}
}
}
inline bool can_fetch_more() const {
return m_stack.load() != stack_end();
/**
* @brief Queries whether there is new data to read.
* @pre m_stack.load() != reader_blocked_dummy()
*/
inline bool can_fetch_more() {
auto ptr = m_stack.load();
//CPPA_REQUIRE(ptr != reader_blocked_dummy());
CPPA_REQUIRE(ptr != nullptr);
return !is_dummy(ptr);
}
/**
* @warning call only from the reader (owner)
*/
inline bool empty() const {
return closed() || (m_head == nullptr && m_stack.load() == stack_end());
inline bool empty() {
CPPA_REQUIRE(m_stack.load() != nullptr);
return (!m_head && is_dummy(m_stack.load()));
}
inline bool closed() const {
inline bool closed() {
return m_stack.load() == nullptr;
}
inline bool blocked() {
return m_stack == reader_blocked_dummy();
}
/**
* @brief Tries to set this queue from state @p empty to state @p blocked.
* @returns @p true if the state change was successful, otherwise @p false.
* @note This function does never fail spuriously.
*/
inline bool try_block() {
auto e = stack_empty_dummy();
return m_stack.compare_exchange_strong(e, reader_blocked_dummy());
}
/**
* @brief Tries to set this queue from state @p blocked to state @p empty.
* @returns @p true if the state change was successful, otherwise @p false.
* @note This function does never fail spuriously.
*/
inline bool try_unblock() {
auto e = reader_blocked_dummy();
return m_stack.compare_exchange_strong(e, stack_empty_dummy());
}
/**
* @warning call only from the reader (owner)
*/
......@@ -147,7 +205,7 @@ class single_reader_queue {
}
inline single_reader_queue() : m_head(nullptr) {
m_stack = stack_end();
m_stack = stack_empty_dummy();
}
inline void clear() {
......@@ -157,7 +215,68 @@ class single_reader_queue {
}
}
inline ~single_reader_queue() { clear(); }
~single_reader_queue() {
clear();
}
/**************************************************************************
* support for synchronized access *
**************************************************************************/
template<class Mutex, class CondVar>
bool synchronized_enqueue(Mutex& mtx, CondVar& cv, pointer new_element) {
switch (enqueue(new_element)) {
case enqueue_result::unblocked_reader: {
std::unique_lock<Mutex> guard(mtx);
cv.notify_one();
return true;
}
case enqueue_result::success:
// enqueued message to a running actor's mailbox
return true;
case enqueue_result::queue_closed:
// actor no longer alive
return false;
}
// should be unreachable
CPPA_CRITICAL("invalid result of enqueue()");
}
template<class Mutex, class CondVar, class TimePoint>
pointer synchronized_try_pop(Mutex& mtx, CondVar& cv, const TimePoint& abs_time) {
return (synchronized_await(mtx, cv, abs_time)) ? try_pop() : nullptr;
}
template<class Mutex, class CondVar>
pointer synchronized_pop(Mutex& mtx, CondVar& cv) {
synchronized_await(mtx, cv);
return try_pop();
}
template<class Mutex, class CondVar>
void synchronized_await(Mutex& mtx, CondVar& cv) {
CPPA_REQUIRE(!closed());
if (try_block()) {
std::unique_lock<Mutex> guard(mtx);
while (blocked()) cv.wait(guard);
}
}
template<class Mutex, class CondVar, class TimePoint>
bool synchronized_await(Mutex& mtx, CondVar& cv, const TimePoint& timeout) {
CPPA_REQUIRE(!closed());
if (try_block()) {
std::unique_lock<Mutex> guard(mtx);
while (blocked()) {
if (cv.wait_until(guard, timeout) == std::cv_status::timeout) {
// if we're unable to set the queue from blocked to empty,
// than there's a new element in the list
return !try_unblock();
}
}
}
return true;
}
private:
......@@ -171,15 +290,22 @@ class single_reader_queue {
// atomically sets m_stack back and enqueues all elements to the cache
bool fetch_new_data(pointer end_ptr) {
CPPA_REQUIRE(m_head == nullptr);
CPPA_REQUIRE(end_ptr == nullptr || end_ptr == stack_end());
CPPA_REQUIRE(!end_ptr || end_ptr == stack_empty_dummy());
pointer e = m_stack.load();
// must not be called on a closed queue
CPPA_REQUIRE(e != nullptr);
// it's enough to check this once, since only the owner is allowed
// to close the queue and only the owner is allowed to call this
// member function
if (e == nullptr) return false;
while (e != end_ptr) {
if (m_stack.compare_exchange_weak(e, end_ptr)) {
while (e != stack_end()) {
if (is_dummy(e)) {
// only use-case for this is closing a queue
CPPA_REQUIRE(end_ptr == nullptr);
return false;
}
while (e) {
CPPA_REQUIRE(!is_dummy(e));
auto next = e->next;
e->next = m_head;
m_head = e;
......@@ -193,7 +319,7 @@ class single_reader_queue {
}
inline bool fetch_new_data() {
return fetch_new_data(stack_end());
return fetch_new_data(stack_empty_dummy());
}
pointer take_head() {
......@@ -215,7 +341,7 @@ class single_reader_queue {
template<typename F>
void clear_cached_elements(const F& f) {
while (m_head != nullptr) {
while (m_head) {
auto next = m_head->next;
f(*m_head);
m_delete(m_head);
......@@ -223,10 +349,19 @@ class single_reader_queue {
}
}
pointer stack_end() const {
inline pointer stack_empty_dummy() {
// we are *never* going to dereference the returned pointer;
// it is only used as indicator wheter this queue is closed or not
return reinterpret_cast<pointer>(const_cast<single_reader_queue*>(this));
return reinterpret_cast<pointer>(this);
}
inline pointer reader_blocked_dummy() {
// we are not going to dereference this pointer either
return reinterpret_cast<pointer>(reinterpret_cast<std::intptr_t>(this) + sizeof(void*));
}
inline bool is_dummy(pointer ptr) {
return ptr == stack_empty_dummy() || ptr == reader_blocked_dummy();
}
};
......
......@@ -44,7 +44,6 @@
#include "cppa/spawn_fwd.hpp"
#include "cppa/message_id.hpp"
#include "cppa/match_expr.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/typed_actor.hpp"
#include "cppa/spawn_options.hpp"
......@@ -424,8 +423,6 @@ class local_actor : public extend<abstract_actor>::with<memory_cached> {
/**
* @brief Can be overridden to perform cleanup code after an actor
* finished execution.
* @warning Must not call any function manipulating the actor's state such
* as join, leave, link, or monitor.
*/
virtual void on_exit();
......@@ -550,21 +547,6 @@ class local_actor : public extend<abstract_actor>::with<memory_cached> {
inline void planned_exit_reason(std::uint32_t value);
actor_state cas_state(actor_state expected, actor_state desired) {
auto e = expected;
do { if (m_state.compare_exchange_weak(e, desired)) return desired; }
while (e == expected);
return e;
}
inline void set_state(actor_state new_value) {
m_state.store(new_value);
}
inline actor_state state() const {
return m_state;
}
void cleanup(std::uint32_t reason) override;
mailbox_element* dummy_node() {
......@@ -602,9 +584,6 @@ class local_actor : public extend<abstract_actor>::with<memory_cached> {
// set by quit
std::uint32_t m_planned_exit_reason;
// the state of the (possibly cooperatively scheduled) actor
std::atomic<actor_state> m_state;
/** @endcond */
private:
......
......@@ -33,7 +33,6 @@
#include "cppa/config.hpp"
#include "cppa/logging.hpp"
#include "cppa/resumable.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/mailbox_element.hpp"
#include "cppa/detail/cs_thread.hpp"
......@@ -70,10 +69,6 @@ class context_switching_resume {
, m_cs_thread(context_switching_resume::trampoline,
static_cast<blocking_actor*>(this)) { }
inline bool exec_on_spawn() const {
return true;
}
void attach_to_scheduler() override {
this->ref();
}
......@@ -84,7 +79,7 @@ class context_switching_resume {
resumable::resume_result resume(detail::cs_thread* from,
execution_unit* host) override {
CPPA_REQUIRE(from != nullptr && host != nullptr);
CPPA_REQUIRE(from != nullptr);
CPPA_LOG_TRACE("");
this->m_host = host;
using namespace detail;
......@@ -93,29 +88,17 @@ class context_switching_resume {
case yield_state::done: {
return resumable::done;
}
case yield_state::ready: { break; }
case yield_state::blocked: {
switch (this->cas_state(actor_state::about_to_block,
actor_state::blocked)) {
case actor_state::ready: {
// restore variables
case yield_state::ready: {
// should not happen, since it is simply a waste
// of time (switching back-and-forth for no reason)
CPPA_LOG_WARNING("context-switching actor returned "
"with yield_state::ready");
break;
}
case actor_state::blocked: {
// wait until someone re-schedules that actor
case yield_state::blocked: {
if (static_cast<Derived*>(this)->mailbox().try_block()) {
return resumable::resume_later;
}
case actor_state::about_to_block:
CPPA_CRITICAL("attempt to set state from "
"about_to_block to blocked "
"failed: state is still set "
"to about_to_block");
case actor_state::done:
CPPA_CRITICAL("attempt to set state from "
"about_to_block to blocked "
"failed: state is set "
"to done");
}
break;
}
case yield_state::invalid: {
......@@ -134,14 +117,8 @@ class context_switching_resume {
void await_ready(Actor* self) {
CPPA_LOG_TRACE("");
while (!self->has_next_message()) {
self->set_state(actor_state::about_to_block);
// double-check before going to block
if (self->has_next_message()) {
// someone preempt us => continue
self->set_state(actor_state::ready);
}
// wait until actor becomes rescheduled
else detail::yield(detail::yield_state::blocked);
// will call mailbox().try_block() in resume()
detail::yield(detail::yield_state::blocked);
}
}
......
......@@ -36,7 +36,6 @@
#include "cppa/any_tuple.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/singletons.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/message_header.hpp"
#include "cppa/detail/yield_interface.hpp"
......@@ -51,93 +50,34 @@ class cooperative_scheduling {
using timeout_type = int;
// this does return nullptr
template<class Actor, typename F>
void fetch_messages(Actor* self, F cb) {
auto e = self->mailbox().try_pop();
while (e == nullptr) {
if (self->mailbox().can_fetch_more() == false) {
self->set_state(actor_state::about_to_block);
// make sure mailbox is empty
if (self->mailbox().can_fetch_more()) {
// someone preempt us => continue
self->set_state(actor_state::ready);
}
// wait until actor becomes rescheduled
else detail::yield(detail::yield_state::blocked);
}
}
// ok, we have at least one message
while (e) {
cb(e);
e = self->mailbox().try_pop();
}
}
template<class Actor, typename F>
inline void fetch_messages(Actor* self, F cb, timeout_type) {
// a call to this call is always preceded by init_timeout,
// which will trigger a timeout message
fetch_messages(self, cb);
}
template<class Actor>
inline void launch(Actor* self, execution_unit* host) {
// detached in scheduler::worker::run
self->attach_to_scheduler();
if (self->exec_on_spawn()) {
if (host) host->exec_later(self);
else get_scheduling_coordinator()->enqueue(self);
}
}
template<class Actor>
void enqueue(Actor* self,
msg_hdr_cref hdr,
any_tuple& msg,
execution_unit* host) {
void enqueue(Actor* self, msg_hdr_cref hdr,
any_tuple& msg, execution_unit* host) {
auto e = self->new_mailbox_element(hdr, std::move(msg));
switch (self->mailbox().enqueue(e)) {
case intrusive::first_enqueued: {
auto state = self->state();
auto set_ready = [&]() -> bool {
state = self->cas_state(state, actor_state::ready);
return state == actor_state::ready;
};
for (;;) {
switch (state) {
case actor_state::blocked: {
if (set_ready()) {
case intrusive::enqueue_result::unblocked_reader: {
// re-schedule actor
if (host) host->exec_later(self);
else get_scheduling_coordinator()->enqueue(self);
return;
}
break;
}
case actor_state::about_to_block: {
if (set_ready()) {
// actor is still running
return;
}
break;
}
case actor_state::ready:
case actor_state::done:
return;
}
}
break;
}
case intrusive::queue_closed: {
case intrusive::enqueue_result::queue_closed: {
if (hdr.id.is_request()) {
detail::sync_request_bouncer f{self->exit_reason()};
f(hdr.sender, hdr.id);
}
break;
}
case intrusive::enqueued:
// enqueued to an running actors' mailbox; nothing to do
case intrusive::enqueue_result::success:
// enqueued to a running actors' mailbox; nothing to do
break;
}
}
......
......@@ -42,7 +42,6 @@
#include "cppa/logging.hpp"
#include "cppa/behavior.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/policy/resume_policy.hpp"
......@@ -62,10 +61,6 @@ class event_based_resume {
template<typename... Ts>
mixin(Ts&&... args) : Base(std::forward<Ts>(args)...) { }
inline Derived* dptr() {
return static_cast<Derived*>(this);
}
void attach_to_scheduler() override {
this->ref();
}
......@@ -74,44 +69,49 @@ class event_based_resume {
this->deref();
}
inline bool exec_on_spawn() const {
return false;
}
resumable::resume_result resume(detail::cs_thread*,
execution_unit* host) override {
CPPA_REQUIRE(host != nullptr);
auto d = dptr();
auto d = static_cast<Derived*>(this);
d->m_host = host;
CPPA_LOG_TRACE("id = " << d->id()
<< ", state = " << static_cast<int>(d->state()));
CPPA_REQUIRE(d->state() == actor_state::ready);
auto done_cb = [&]() -> bool {
CPPA_LOG_TRACE("");
d->bhvr_stack().clear();
d->bhvr_stack().cleanup();
if (d->planned_exit_reason() == exit_reason::not_exited) {
d->planned_exit_reason(exit_reason::normal);
}
d->on_exit();
if (!d->bhvr_stack().empty()) {
CPPA_LOG_DEBUG("on_exit did set a new behavior in done_cb");
CPPA_LOG_DEBUG("on_exit did set a new behavior in on_exit");
d->planned_exit_reason(exit_reason::not_exited);
return false; // on_exit did set a new behavior
}
d->set_state(actor_state::done);
d->cleanup(d->planned_exit_reason());
auto rsn = d->planned_exit_reason();
if (rsn == exit_reason::not_exited) {
rsn = exit_reason::normal;
d->planned_exit_reason(rsn);
}
d->cleanup(rsn);
return true;
};
auto actor_done = [&] {
return d->bhvr_stack().empty()
|| d->planned_exit_reason() != exit_reason::not_exited;
};
// actors without behavior or that have already defined
// an exit reason must not be resumed
CPPA_REQUIRE(!d->m_initialized || !actor_done());
if (!d->m_initialized) {
d->m_initialized = true;
auto bhvr = d->make_behavior();
if (bhvr) d->become(std::move(bhvr));
// else: make_behavior() might have just called become()
if (actor_done() && done_cb()) return resume_result::done;
// else: enter resume loop
}
try {
for (;;) {
auto ptr = d->next_message();
if (ptr) {
CPPA_REQUIRE(!d->bhvr_stack().empty());
if (d->invoke_message(ptr)) {
if (actor_done() && done_cb()) {
CPPA_LOG_DEBUG("actor exited");
......@@ -137,40 +137,10 @@ class event_based_resume {
else {
CPPA_LOG_DEBUG("no more element in mailbox; "
"going to block");
d->set_state(actor_state::about_to_block);
std::atomic_thread_fence(std::memory_order_seq_cst);
if (!d->has_next_message()) {
switch (d->cas_state(actor_state::about_to_block,
actor_state::blocked)) {
case actor_state::ready:
// interrupted by arriving message
// restore members
CPPA_LOG_DEBUG("switched back to ready: "
"interrupted by "
"arriving message");
break;
case actor_state::blocked:
CPPA_LOG_DEBUG("set state successfully "
"to blocked");
// done setting actor to blocked
if (d->mailbox().try_block()) {
return resumable::resume_later;
case actor_state::about_to_block:
CPPA_CRITICAL("attempt to set state from "
"about_to_block to blocked "
"failed: state is still set "
"to about_to_block");
case actor_state::done:
CPPA_CRITICAL("attempt to set state from "
"about_to_block to blocked "
"failed: state is set "
"to done");
};
}
else {
CPPA_LOG_DEBUG("switched back to ready: "
"mailbox can fetch more");
d->set_state(actor_state::ready);
}
// else: try again
}
}
}
......
......@@ -38,7 +38,6 @@
#include "cppa/logging.hpp"
#include "cppa/singletons.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/cs_thread.hpp"
......@@ -62,47 +61,15 @@ class no_scheduling {
typedef std::chrono::high_resolution_clock::time_point timeout_type;
template<class Actor, typename F>
bool fetch_messages(Actor* self, F cb) {
await_data(self);
return fetch_messages_impl(self, cb);
}
template<class Actor, typename F>
bool try_fetch_messages(Actor* self, F cb) {
return fetch_messages_impl(self, cb);
}
template<class Actor, typename F>
timed_fetch_result fetch_messages(Actor* self, F cb, timeout_type abs_time) {
if (!await_data(self, abs_time)) {
return timed_fetch_result::no_message;
}
if (fetch_messages_impl(self, cb)) return timed_fetch_result::success;
return timed_fetch_result::no_message;
}
template<class Actor>
void enqueue(Actor* self, msg_hdr_cref hdr,
any_tuple& msg, execution_unit*) {
auto ptr = self->new_mailbox_element(hdr, std::move(msg));
switch (self->mailbox().enqueue(ptr)) {
case intrusive::first_enqueued: {
lock_type guard(m_mtx);
self->set_state(actor_state::ready);
m_cv.notify_one();
break;
}
case intrusive::queue_closed: {
if (hdr.id.valid()) {
detail::sync_request_bouncer f{self->exit_reason()};
f(hdr.sender, hdr.id);
}
break;
}
case intrusive::enqueued: {
// enqueued to a running actor's mailbox; nothing to do
break;
// returns false if mailbox has been closed
if (!self->mailbox().synchronized_enqueue(m_mtx, m_cv, ptr)) {
if (hdr.id.is_request()) {
detail::sync_request_bouncer srb{self->exit_reason()};
srb(hdr.sender, hdr.id);
}
}
}
......@@ -113,59 +80,40 @@ class no_scheduling {
CPPA_LOG_TRACE(CPPA_ARG(self));
CPPA_REQUIRE(self != nullptr);
intrusive_ptr<Actor> mself{self};
self->attach_to_scheduler();
std::thread([=] {
CPPA_PUSH_AID(mself->id());
CPPA_LOG_TRACE("");
detail::cs_thread fself;
for (;;) {
mself->set_state(actor_state::ready);
if (mself->resume(&fself, nullptr) == resumable::done) {
return;
}
// await new data before resuming actor
await_data(mself.get());
}
self->detach_from_scheduler();
}).detach();
}
// await_data is being called from no_scheduling (only)
template<class Actor>
void await_data(Actor* self) {
if (!self->has_next_message()) {
lock_type guard(m_mtx);
while (!self->has_next_message()) m_cv.wait(guard);
}
if (self->has_next_message()) return;
self->mailbox().synchronized_await(m_mtx, m_cv);
}
// this additional member function is needed to implement
// timer_actor (see scheduler.cpp)
template<class Actor, class TimePoint>
bool await_data(Actor* self, const TimePoint& tp) {
if (!self->has_next_message()) {
lock_type guard(m_mtx);
while (!self->has_next_message()) {
if (m_cv.wait_until(guard, tp) == std::cv_status::timeout) {
return false;
}
}
}
return true;
if (self->has_next_message()) return true;
return self->mailbox().synchronized_await(m_mtx, m_cv, tp);
}
private:
template<class Actor, typename F>
bool fetch_messages_impl(Actor* self, F cb) {
auto next = [&] { return self->mailbox().try_pop(); };
auto e = next();
if (e) {
for (; e != nullptr; e = next()) {
cb(e);
}
return true;
}
return false;
}
std::mutex m_mtx;
std::condition_variable m_cv;
......
......@@ -45,13 +45,15 @@ class priority_policy {
public:
/**
* @brief Returns the next message from the list of cached elements or
* @p nullptr. The latter indicates only that there is no element
* left in the cache.
* @brief Returns the next message from the mailbox or @p nullptr
* if it's empty.
*/
template<class Actor>
unique_mailbox_element_pointer next_message(Actor* self);
/**
* @brief Queries whether the mailbox is not empty.
*/
template<class Actor>
bool has_next_message(Actor* self);
......
......@@ -63,6 +63,8 @@ class typed_event_based_actor
public:
typed_event_based_actor() : m_initialized(false) { }
typedef util::type_list<Rs...> signatures;
typedef typed_behavior<Rs...> behavior_type;
......@@ -75,6 +77,8 @@ class typed_event_based_actor
virtual behavior_type make_behavior() = 0;
bool m_initialized;
};
} // namespace cppa
......
......@@ -35,9 +35,7 @@
namespace cppa {
event_based_actor::event_based_actor() {
m_state = actor_state::blocked;
}
event_based_actor::event_based_actor() : m_initialized(false) { }
event_based_actor::~event_based_actor() { }
......
......@@ -34,14 +34,6 @@ namespace cppa {
namespace detail {
behavior functor_based_actor::make_behavior() {
if (m_void_impl) {
enqueue({address(), this}, make_any_tuple(atom("RUN")), m_host);
return {
on(atom("RUN")) >> [=] {
become(m_make_behavior(this));
}
};
}
return m_make_behavior(this);
}
......
......@@ -74,8 +74,7 @@ class down_observer : public attachable {
local_actor::local_actor()
: m_trap_exit(false), m_dummy_node(), m_current_node(&m_dummy_node)
, m_planned_exit_reason(exit_reason::not_exited)
, m_state(actor_state::ready) {
, m_planned_exit_reason(exit_reason::not_exited) {
m_node = get_middleman()->node();
}
......
......@@ -34,6 +34,7 @@
#include <fstream>
#include <algorithm>
#include <pthread.h>
#include <condition_variable>
#ifndef CPPA_WINDOWS
#include <unistd.h>
......@@ -43,8 +44,10 @@
#include "cppa/cppa.hpp"
#include "cppa/logging.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/detail/singleton_manager.hpp"
#include "cppa/intrusive/blocking_single_reader_queue.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
using namespace std;
......@@ -89,7 +92,8 @@ class logging_impl : public logging {
void destroy() {
log("TRACE", "logging", "run", __FILE__, __LINE__, "EXIT");
// an empty string means: shut down
m_queue.push_back(new log_event{0, ""});
m_queue.synchronized_enqueue(m_queue_mtx, m_queue_cv,
new log_event{0, ""});
m_thread.join();
delete this;
}
......@@ -100,7 +104,7 @@ class logging_impl : public logging {
fstream out(fname.str().c_str(), ios::out | ios::app);
unique_ptr<log_event> event;
for (;;) {
event.reset(m_queue.pop());
event.reset(m_queue.synchronized_pop(m_queue_mtx, m_queue_cv));
if (event->msg.empty()) {
out.close();
return;
......@@ -137,13 +141,16 @@ class logging_impl : public logging {
<< file_name << ":" << line_num << " "
<< msg
<< endl;
m_queue.push_back(new log_event{nullptr, line.str()});
m_queue.synchronized_enqueue(m_queue_mtx, m_queue_cv,
new log_event{nullptr, line.str()});
}
private:
thread m_thread;
intrusive::blocking_single_reader_queue<log_event> m_queue;
mutex m_queue_mtx;
condition_variable m_queue_cv;
intrusive::single_reader_queue<log_event> m_queue;
};
......
......@@ -108,7 +108,7 @@ void remote_actor_proxy::forward_msg(msg_hdr_cref hdr, any_tuple msg) {
}
if (hdr.sender && hdr.id.is_request()) {
switch (m_pending_requests.enqueue(new_req_info(hdr.sender, hdr.id))) {
case intrusive::queue_closed: {
case intrusive::enqueue_result::queue_closed: {
auto rsn = exit_reason();
m_parent->run_later([rsn, hdr] {
CPPA_LOGC_TRACE("cppa::io::remote_actor_proxy",
......@@ -119,11 +119,11 @@ void remote_actor_proxy::forward_msg(msg_hdr_cref hdr, any_tuple msg) {
});
return; // no need to forward message
}
case intrusive::enqueued: {
case intrusive::enqueue_result::success: {
CPPA_LOG_DEBUG("enqueued pending request to non-empty queue");
break;
}
case intrusive::first_enqueued: {
case intrusive::enqueue_result::unblocked_reader: {
CPPA_LOG_DEBUG("enqueued pending request to empty queue");
break;
}
......
......@@ -321,10 +321,7 @@ void coordinator::destroy() {
}
coordinator::coordinator()
: m_timer(new timer_actor), m_printer(true)
, m_next_worker(0) {
// NOP
}
: m_timer(new timer_actor), m_printer(true) , m_next_worker(0) { }
coordinator* coordinator::create_singleton() {
return new coordinator;
......@@ -505,6 +502,7 @@ void worker::external_enqueue(job_ptr ptr) {
}
void worker::exec_later(job_ptr ptr) {
CPPA_REQUIRE(std::this_thread::get_id() == m_this_thread.get_id());
m_job_list.push_back(ptr);
}
......
......@@ -102,7 +102,6 @@ opencl::opencl_metainfo* singleton_manager::get_opencl_metainfo() {
}
actor_registry* singleton_manager::get_actor_registry() {
return lazy_get(s_actor_registry);
}
......
......@@ -36,6 +36,7 @@
#include <cstring> // memset
#include <iostream>
#include <stdexcept>
#include <condition_variable>
#ifndef CPPA_WINDOWS
#include <netinet/tcp.h>
......@@ -53,7 +54,6 @@
#include "cppa/detail/raw_access.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
#include "cppa/intrusive/blocking_single_reader_queue.hpp"
#include "cppa/io/acceptor.hpp"
#include "cppa/io/middleman.hpp"
......@@ -198,17 +198,19 @@ abstract_actor_ptr remote_actor_impl(stream_ptr_pair io, string_set expected) {
return ptr;
}
struct remote_actor_result { remote_actor_result* next; actor value; };
intrusive::blocking_single_reader_queue<remote_actor_result> q;
mm->run_later([mm, io, pinfptr, remote_aid, &q] {
std::mutex qmtx;
std::condition_variable qcv;
intrusive::single_reader_queue<remote_actor_result> q;
mm->run_later([mm, io, pinfptr, remote_aid, &q, &qmtx, &qcv] {
CPPA_LOGC_TRACE("cppa",
"remote_actor$create_connection", "");
auto pp = mm->get_peer(*pinfptr);
CPPA_LOGF_INFO_IF(pp, "connection already exists (re-use old one)");
if (!pp) mm->new_peer(io.first, io.second, pinfptr);
auto res = mm->get_namespace().get_or_put(pinfptr, remote_aid);
q.push_back(new remote_actor_result{0, res});
q.synchronized_enqueue(qmtx, qcv, new remote_actor_result{0, res});
});
std::unique_ptr<remote_actor_result> result(q.pop());
std::unique_ptr<remote_actor_result> result(q.synchronized_pop(qmtx, qcv));
CPPA_LOGF_DEBUG(CPPA_MARG(result, get));
return raw_access::get(result->value);
}
......
......@@ -22,9 +22,9 @@ string cppa_fill4(size_t value) {
return result;
}
const char* cppa_strip_path(const char* fname) {
auto res = fname;
auto i = fname;
const char* cppa_strip_path(const char* file) {
auto res = file;
auto i = file;
for (char c = *i; c != '\0'; c = *++i) {
if (c == '/') {
res = i + 1;
......@@ -33,12 +33,12 @@ const char* cppa_strip_path(const char* fname) {
return res;
}
void cppa_unexpected_message(const char* fname, size_t line_num) {
CPPA_PRINTERRC(fname, line_num, "unexpected message");
void cppa_unexpected_message(const char* file, size_t line, cppa::any_tuple t) {
CPPA_PRINTERRC(file, line, "unexpected message: " << to_string(t));
}
void cppa_unexpected_timeout(const char* fname, size_t line_num) {
CPPA_PRINTERRC(fname, line_num, "unexpected timeout");
void cppa_unexpected_timeout(const char* file, size_t line) {
CPPA_PRINTERRC(file, line, "unexpected timeout");
}
vector<string> split(const string& str, char delim, bool keep_empties) {
......
......@@ -27,9 +27,9 @@ void set_default_test_settings();
size_t cppa_error_count();
void cppa_inc_error_count();
std::string cppa_fill4(size_t value);
const char* cppa_strip_path(const char* fname);
void cppa_unexpected_message(const char* fname, size_t line_num);
void cppa_unexpected_timeout(const char* fname, size_t line_num);
const char* cppa_strip_path(const char* file);
void cppa_unexpected_message(const char* file, size_t line, cppa::any_tuple t);
void cppa_unexpected_timeout(const char* file, size_t line);
#define CPPA_STREAMIFY(fname, line, message) \
cppa_strip_path(fname) << ":" << cppa_fill4(line) << " " << message
......@@ -166,13 +166,14 @@ inline void cppa_check_value(V1 v1,
#define CPPA_UNEXPECTED_TOUT() \
cppa_unexpected_timeout(__FILE__, __LINE__)
#define CPPA_UNEXPECTED_MSG() \
cppa_unexpected_message(__FILE__, __LINE__)
#define CPPA_UNEXPECTED_MSG(selfptr) \
cppa_unexpected_message(__FILE__, __LINE__, selfptr ->last_dequeued())
// some convenience macros for defining callbacks
#define CPPA_CHECKPOINT_CB() [] { CPPA_CHECKPOINT(); }
#define CPPA_FAILURE_CB(err_msg) [] { CPPA_FAILURE(err_msg); }
#define CPPA_UNEXPECTED_MSG_CB() [] { CPPA_UNEXPECTED_MSG(); }
#define CPPA_UNEXPECTED_MSG_CB(selfptr) [=] { CPPA_UNEXPECTED_MSG(selfptr); }
#define CPPA_UNEXPECTED_MSG_CB_REF(selfref) [&] { CPPA_UNEXPECTED_MSG(selfref); }
#define CPPA_UNEXPECTED_TOUT_CB() [] { CPPA_UNEXPECTED_TOUT(); }
std::vector<std::string> split(const std::string& str, char delim = ' ', bool keep_empties = true);
......
......@@ -50,10 +50,10 @@ void ping(cppa::event_based_actor* self, size_t num_pings) {
if (++*count >= num_pings) self->quit();
return make_cow_tuple(atom("ping"), value + 1);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
}
......@@ -72,12 +72,12 @@ void pong(cppa::event_based_actor* self) {
on_arg_match >> [=](const down_msg& dm) {
self->quit(dm.reason);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
// reply to 'ping'
return {atom("pong"), value};
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
}
......@@ -116,7 +116,7 @@ void peer(io::broker* self, io::connection_handle hdl, const actor& buddy) {
on_arg_match >> [=](const down_msg& dm) {
if (dm.source == buddy) self->quit(dm.reason);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
}
......@@ -129,7 +129,7 @@ void peer_acceptor(io::broker* self, const actor& buddy) {
self->fork(peer, hdl, buddy);
self->quit();
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
}
......
......@@ -52,7 +52,7 @@ void spawn5_server_impl(event_based_actor* self, actor client, group grp) {
}
},
others() >> [=] {
CPPA_UNEXPECTED_MSG();
CPPA_UNEXPECTED_MSG(self);
self->quit(exit_reason::unhandled_exception);
},
after(chrono::seconds(10)) >> [=] {
......@@ -81,7 +81,7 @@ void spawn5_server_impl(event_based_actor* self, actor client, group grp) {
}
},
others() >> [=] {
CPPA_UNEXPECTED_MSG();
CPPA_UNEXPECTED_MSG(self);
//self->quit(exit_reason::unhandled_exception);
},
after(chrono::seconds(2)) >> [=] {
......
......@@ -8,6 +8,9 @@
#include "cppa/cppa.hpp"
#include "cppa/detail/cs_thread.hpp"
#include "cppa/detail/yield_interface.hpp"
using namespace std;
using namespace cppa;
......@@ -305,10 +308,10 @@ behavior high_priority_testee(event_based_actor* self) {
CPPA_CHECKPOINT();
self->quit();
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(self)
);
}
......@@ -335,7 +338,7 @@ struct slave : event_based_actor {
behavior make_behavior() override {
link_to(master);
return (
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(this)
);
}
......@@ -389,7 +392,8 @@ void test_serial_reply() {
);
}
);
});
}
);
{ // lifetime scope of self
scoped_actor self;
cout << "ID of main: " << self->id() << endl;
......@@ -397,7 +401,7 @@ void test_serial_reply() {
on(atom("hiho")) >> [] {
CPPA_CHECKPOINT();
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->send_exit(master, exit_reason::user_shutdown);
}
......@@ -500,10 +504,13 @@ void test_simple_reply_response() {
void test_spawn() {
test_simple_reply_response();
CPPA_CHECKPOINT();
test_serial_reply();
CPPA_CHECKPOINT();
test_or_else();
CPPA_CHECKPOINT();
test_continuation();
CPPA_CHECKPOINT();
scoped_actor self;
// check whether detached actors and scheduled actors interact w/o errors
auto m = spawn<master, detached>();
......@@ -518,13 +525,15 @@ void test_spawn() {
self->receive(on(1, 2, 3, true) >> [] { });
self->send_tuple(self, any_tuple{});
self->receive(on() >> [] { });
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
CPPA_PRINT("test self->receive with zero timeout");
self->receive (
others() >> CPPA_UNEXPECTED_MSG_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self),
after(chrono::seconds(0)) >> [] { /* mailbox empty */ }
);
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
CPPA_PRINT("test mirror"); {
......@@ -532,7 +541,7 @@ void test_spawn() {
self->send(mirror, "hello mirror");
self->receive (
on("hello mirror") >> CPPA_CHECKPOINT_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->send_exit(mirror, exit_reason::user_shutdown);
self->receive (
......@@ -540,9 +549,9 @@ void test_spawn() {
if (dm.reason == exit_reason::user_shutdown) {
CPPA_CHECKPOINT();
}
else { CPPA_UNEXPECTED_MSG(); }
else { CPPA_UNEXPECTED_MSG_CB_REF(self); }
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
......@@ -553,7 +562,7 @@ void test_spawn() {
self->send(mirror, "hello mirror");
self->receive (
on("hello mirror") >> CPPA_CHECKPOINT_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->send_exit(mirror, exit_reason::user_shutdown);
self->receive (
......@@ -561,9 +570,9 @@ void test_spawn() {
if (dm.reason == exit_reason::user_shutdown) {
CPPA_CHECKPOINT();
}
else { CPPA_UNEXPECTED_MSG(); }
else { CPPA_UNEXPECTED_MSG(self); }
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
......@@ -575,7 +584,7 @@ void test_spawn() {
self->send(mirror, "hello mirror");
self->receive (
on("hello mirror") >> CPPA_CHECKPOINT_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->send_exit(mirror, exit_reason::user_shutdown);
self->receive (
......@@ -583,9 +592,9 @@ void test_spawn() {
if (dm.reason == exit_reason::user_shutdown) {
CPPA_CHECKPOINT();
}
else { CPPA_UNEXPECTED_MSG(); }
else { CPPA_UNEXPECTED_MSG(self); }
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
......@@ -596,7 +605,7 @@ void test_spawn() {
self->send(mecho, "hello echo");
self->receive (
on("hello echo") >> [] { },
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
......@@ -627,7 +636,7 @@ void test_spawn() {
self->send(cstk, atom("put"), self);
self->send(cstk, atom("break"));
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
......@@ -663,6 +672,16 @@ void test_spawn() {
CPPA_CHECKPOINT();
auto sync_testee1 = spawn<blocking_api>([](blocking_actor* s) {
if (detail::cs_thread::is_disabled_feature) {
CPPA_LOGF_WARNING("compiled w/o context switching "
"(skip some tests)");
}
else {
CPPA_CHECKPOINT();
// scheduler should switch back immediately
detail::yield(detail::yield_state::ready);
CPPA_CHECKPOINT();
}
s->receive (
on(atom("get")) >> [] {
return make_cow_tuple(42, 2);
......@@ -687,14 +706,14 @@ void test_spawn() {
CPPA_CHECK_EQUAL(a, 42);
CPPA_CHECK_EQUAL(b, 2);
},
others() >> CPPA_UNEXPECTED_MSG_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self),
after(chrono::seconds(10)) >> CPPA_UNEXPECTED_TOUT_CB()
);
// dequeue remaining async. message
self->receive (on(0, 0) >> CPPA_CHECKPOINT_CB());
// make sure there's no other message in our mailbox
self->receive (
others() >> CPPA_UNEXPECTED_MSG_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self),
after(chrono::seconds(0)) >> [] { }
);
self->await_all_other_actors_done();
......@@ -718,7 +737,7 @@ void test_spawn() {
}
);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(s)
);
});
self->monitor(sync_testee);
......@@ -746,7 +765,7 @@ void test_spawn() {
self->sync_send(sync_testee, "!?").await(
on<sync_exited_msg>() >> CPPA_CHECKPOINT_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self),
after(chrono::milliseconds(5)) >> CPPA_UNEXPECTED_TOUT_CB()
);
......@@ -769,7 +788,7 @@ void test_spawn() {
self->send(bob, 1, "hello actor");
self->receive (
on(4, "hello actor from Bob from Joe") >> CPPA_CHECKPOINT_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
// kill joe and bob
auto poison_pill = make_any_tuple(atom("done"));
......@@ -831,6 +850,8 @@ void test_spawn() {
auto res1 = behavior_test<testee_actor>(self, spawn<blocking_api>(testee_actor{}));
CPPA_CHECK_EQUAL("wait4int", res1);
CPPA_CHECK_EQUAL(behavior_test<event_testee>(self, spawn<event_testee>()), "wait4int");
self->await_all_other_actors_done();
CPPA_CHECKPOINT();
// create some actors linked to one single actor
// and kill them all through killing the link
......@@ -839,7 +860,7 @@ void test_spawn() {
for (int i = 0; i < 100; ++i) {
s->spawn<event_testee, linked>();
}
s->become(others() >> CPPA_UNEXPECTED_MSG_CB());
s->become(others() >> CPPA_UNEXPECTED_MSG_CB(s));
});
self->send_exit(legion, exit_reason::user_shutdown);
self->await_all_other_actors_done();
......@@ -923,6 +944,8 @@ void test_spawn() {
int main() {
CPPA_TEST(test_spawn);
test_spawn();
CPPA_CHECKPOINT();
shutdown();
CPPA_CHECKPOINT();
return CPPA_TEST_RESULT();
}
......@@ -192,10 +192,10 @@ void test_sync_send() {
});
s->sync_send(foi, atom("i")).await(
[&](int i) { CPPA_CHECK_EQUAL(i, 0); ++invocations; },
[&](float) { CPPA_UNEXPECTED_MSG(); }
[&](float) { CPPA_UNEXPECTED_MSG(s); }
);
s->sync_send(foi, atom("f")).await(
[&](int) { CPPA_UNEXPECTED_MSG(); },
[&](int) { CPPA_UNEXPECTED_MSG(s); },
[&](float f) { CPPA_CHECK_EQUAL(f, 0.f); ++invocations; }
);
CPPA_CHECK_EQUAL(invocations, 2);
......@@ -219,7 +219,7 @@ void test_sync_send() {
on_arg_match >> [&](const down_msg& dm) {
CPPA_CHECK_EQUAL(dm.reason, exit_reason::user_shutdown);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
auto mirror = spawn<sync_mirror>();
bool continuation_called = false;
......@@ -256,7 +256,7 @@ void test_sync_send() {
CPPA_CHECKPOINT();
self->timed_sync_send(self, std::chrono::milliseconds(50), atom("NoWay")).await(
on<sync_timeout_msg>() >> CPPA_CHECKPOINT_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
// we should have received two DOWN messages with normal exit reason
// plus 'NoWay'
......@@ -270,13 +270,13 @@ void test_sync_send() {
CPPA_PRINT("trigger \"actor did not reply to a "
"synchronous request message\"");
},
others() >> CPPA_UNEXPECTED_MSG_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self),
after(std::chrono::seconds(0)) >> CPPA_UNEXPECTED_TOUT_CB()
);
CPPA_CHECKPOINT();
// mailbox should be empty now
self->receive (
others() >> CPPA_UNEXPECTED_MSG_CB(),
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self),
after(std::chrono::seconds(0)) >> CPPA_CHECKPOINT_CB()
);
// check wheter continuations are invoked correctly
......@@ -284,11 +284,11 @@ void test_sync_send() {
// first test: sync error must occur, continuation must not be called
bool timeout_occured = false;
self->on_sync_timeout([&] { timeout_occured = true; });
self->on_sync_failure(CPPA_UNEXPECTED_MSG_CB());
self->on_sync_failure(CPPA_UNEXPECTED_MSG_CB_REF(self));
self->timed_sync_send(c, std::chrono::milliseconds(500), atom("HiThere"))
.await(CPPA_FAILURE_CB("C replied to 'HiThere'!"));
CPPA_CHECK_EQUAL(timeout_occured, true);
self->on_sync_failure(CPPA_UNEXPECTED_MSG_CB());
self->on_sync_failure(CPPA_UNEXPECTED_MSG_CB_REF(self));
self->sync_send(c, atom("gogo")).await(CPPA_CHECKPOINT_CB());
self->send_exit(c, exit_reason::user_shutdown);
self->await_all_other_actors_done();
......@@ -320,17 +320,17 @@ void test_sync_send() {
CPPA_CHECKPOINT();
CPPA_CHECK_EQUAL(s->last_sender(), work);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB(s)
);
s->send(s, "Ever danced with the devil in the pale moonlight?");
// response: {'EXIT', exit_reason::user_shutdown}
s->receive_loop(others() >> CPPA_UNEXPECTED_MSG_CB());
s->receive_loop(others() >> CPPA_UNEXPECTED_MSG_CB(s));
});
self->receive (
on_arg_match >> [&](const down_msg& dm) {
CPPA_CHECK_EQUAL(dm.reason, exit_reason::user_shutdown);
},
others() >> CPPA_UNEXPECTED_MSG_CB()
others() >> CPPA_UNEXPECTED_MSG_CB_REF(self)
);
}
......
......@@ -204,11 +204,12 @@ void test_event_testee() {
self->send(et, "hello again event testee!");
self->send(et, "goodbye event testee!");
typed_actor<replies_to<get_state_msg>::with<string>> sub_et = et;
set<string> iface{"cppa::replies_to<get_state_msg>::with<@str>",
// $:: is the anonymous namespace
set<string> iface{"cppa::replies_to<$::get_state_msg>::with<@str>",
"cppa::replies_to<@str>::with<void>",
"cppa::replies_to<float>::with<void>",
"cppa::replies_to<@i32>::with<@i32>"};
CPPA_CHECK(sub_et->interface() == iface);
CPPA_CHECK_EQUAL(util::join(sub_et->interface()), util::join(iface));
self->send(sub_et, get_state_msg{});
// we expect three 42s
int i = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment