Commit af0866e2 authored by neverlord's avatar neverlord

actor_proxy synchronization(s)

parent 71144fb7
......@@ -176,4 +176,4 @@ cppa/detail/atom_val.hpp
src/atom.cpp
src/cppa.cpp
cppa/exit_reason.hpp
cppa/detail/actor_impl_util.hpp
cppa/detail/abstract_actor.hpp
......@@ -15,7 +15,6 @@
namespace cppa {
class serializer;
class actor_proxy;
class deserializer;
/**
......@@ -24,16 +23,13 @@ class deserializer;
class actor : public channel
{
friend class actor_proxy;
bool m_is_proxy;
std::uint32_t m_id;
actor(std::uint32_t aid);
protected:
actor();
actor(std::uint32_t aid);
public:
......@@ -86,13 +82,18 @@ class actor : public channel
* @brief
* @return
*/
virtual bool remove_backlink(const intrusive_ptr<actor>& to) = 0;
virtual bool remove_backlink(intrusive_ptr<actor>& to) = 0;
/**
* @brief
* @return
*/
virtual bool establish_backlink(const intrusive_ptr<actor>& to) = 0;
virtual bool establish_backlink(intrusive_ptr<actor>& to) = 0;
void link_to(intrusive_ptr<actor>&& other);
void unlink_from(intrusive_ptr<actor>&& other);
bool remove_backlink(intrusive_ptr<actor>&& to);
bool establish_backlink(intrusive_ptr<actor>&& to);
/**
* @brief Gets the {@link process_information} of the parent process.
......
#ifndef ACTOR_PROXY_HPP
#define ACTOR_PROXY_HPP
#include "cppa/config.hpp"
#include <list>
#include <mutex>
#include <atomic>
#include <vector>
#include <memory>
#include <cstdint>
#include "cppa/actor.hpp"
#include "cppa/detail/abstract_actor.hpp"
namespace cppa {
class actor_proxy : public actor
class actor_proxy : public detail::abstract_actor<actor>
{
typedef detail::abstract_actor<actor> super;
// implemented in unicast_network.cpp
static void forward_message(const process_information_ptr&, const message&);
process_information_ptr m_parent;
public:
actor_proxy(std::uint32_t mid, process_information_ptr&& parent);
actor_proxy(std::uint32_t mid, const process_information_ptr& parent);
bool attach(attachable* ptr);
void detach(const attachable::token&);
// implemented in unicast_network.cpp
void enqueue(const message& msg);
void link_to(intrusive_ptr<actor>& other);
void unlink_from(intrusive_ptr<actor>& other);
bool remove_backlink(const intrusive_ptr<actor>& to);
bool remove_backlink(intrusive_ptr<actor>& to);
bool establish_backlink(const intrusive_ptr<actor>& to);
bool establish_backlink(intrusive_ptr<actor>& to);
const process_information& parent_process() const;
process_information_ptr parent_process_ptr() const;
private:
// implemented in unicast_network.cpp
static void forward_message(const process_information_ptr&, const message&);
process_information_ptr m_parent;
std::atomic<std::uint32_t> m_exit_reason;
std::mutex m_mtx;
std::vector<unique_attachable_ptr> m_attachables;
};
typedef intrusive_ptr<actor_proxy> actor_proxy_ptr;
......
#ifndef ATTACHABLE_HPP
#define ATTACHABLE_HPP
#include <memory>
#include <cstdint>
#include <typeinfo>
......@@ -46,8 +45,6 @@ class attachable
};
typedef std::unique_ptr<attachable> unique_attachable_ptr;
} // namespace cppa
#endif // ATTACHABLE_HPP
......@@ -8,6 +8,7 @@
#if defined(__APPLE__)
# define CPPA_MACOS
# define _GLIBCXX_HAS_GTHREADS
# define _GLIBCXX_HAS_GTHREADS
#elif defined(__GNUC__) && defined(__linux__)
# define CPPA_LINUX
#elif defined(WIN32)
......
......@@ -71,6 +71,16 @@ inline void link(actor_ptr&& other)
self()->link_to(other);
}
inline void link(actor_ptr& lhs, actor_ptr& rhs)
{
if (lhs && rhs) lhs->link_to(rhs);
}
inline void unlink(actor_ptr& lhs, actor_ptr& rhs)
{
if (lhs && rhs) lhs->unlink_from(rhs);
}
void monitor(actor_ptr& whom);
void monitor(actor_ptr&& whom);
......
#ifndef ABSTRACT_ACTOR_HPP
#define ABSTRACT_ACTOR_HPP
#include "cppa/config.hpp"
#include <list>
#include <mutex>
#include <atomic>
#include <vector>
#include <memory>
#include <algorithm>
#include "cppa/atom.hpp"
#include "cppa/actor.hpp"
#include "cppa/context.hpp"
#include "cppa/attachable.hpp"
#include "cppa/exit_reason.hpp"
namespace cppa { namespace detail {
template<class Base>
class abstract_actor /*[[base_check]]*/ : public Base
{
typedef std::lock_guard<std::mutex> guard_type;
typedef std::unique_ptr<attachable> attachable_ptr;
// true if the associated thread has finished execution
std::atomic<std::uint32_t> m_exit_reason;
// guards access to m_exited, m_subscriptions and m_links
std::mutex m_mtx;
// manages actor links
std::list<actor_ptr> m_links;
std::vector<attachable_ptr> m_attachables;
// @pre m_mtx.locked()
bool exited() const
{
return m_exit_reason.load() != exit_reason::not_exited;
}
template<class List, typename Element>
bool unique_insert(List& lst, const Element& e)
{
auto end = lst.end();
auto i = std::find(lst.begin(), end, e);
if (i == end)
{
lst.push_back(e);
return true;
}
return false;
}
template<class List, typename Iterator, typename Element>
int erase_all(List& lst, Iterator begin, Iterator end, const Element& e)
{
auto i = std::find(begin, end, e);
if (i != end)
{
return 1 + erase_all(lst, lst.erase(i), end, e);
}
return 0;
}
template<class List, typename Element>
int erase_all(List& lst, const Element& e)
{
return erase_all(lst, lst.begin(), lst.end(), e);
}
protected:
//abstract_actor() : Base() { }
template<typename... Args>
abstract_actor(const Args&... args) : Base(args...), m_exit_reason(0) { }
void cleanup(std::uint32_t reason)
{
if (reason == exit_reason::not_exited) return;
decltype(m_links) mlinks;
decltype(m_attachables) mattachables;
// lifetime scope of guard
{
std::lock_guard<std::mutex> guard(m_mtx);
m_exit_reason = reason;
mlinks = std::move(m_links);
mattachables = std::move(m_attachables);
// make sure lists are definitely empty
m_links.clear();
m_attachables.clear();
}
if (!mlinks.empty())
{
actor_ptr mself = this;
// send exit messages
for (actor_ptr& aptr : mlinks)
{
aptr->enqueue(message(mself, aptr, atom(":Exit"), reason));
}
}
for (attachable_ptr& ptr : mattachables)
{
ptr->detach(reason);
}
}
bool link_to_impl(intrusive_ptr<actor>& other)
{
guard_type guard(m_mtx);
if (other && !exited() && other->establish_backlink(this))
{
m_links.push_back(other);
return true;
}
return false;
}
bool unlink_from_impl (intrusive_ptr<actor>& other)
{
std::lock_guard<std::mutex> guard(m_mtx);
if (other && !exited() && other->remove_backlink(this))
{
return erase_all(m_links, other) > 0;
}
return false;
}
public:
bool attach /*[[override]]*/ (attachable* ptr)
{
if (ptr == nullptr)
{
guard_type guard(m_mtx);
return m_exit_reason.load() == exit_reason::not_exited;
}
else
{
attachable_ptr uptr(ptr);
std::uint32_t reason;
// lifetime scope of guard
{
guard_type guard(m_mtx);
reason = m_exit_reason.load();
if (reason == exit_reason::not_exited)
{
m_attachables.push_back(std::move(uptr));
return true;
}
}
uptr->detach(reason);
return false;
}
}
void detach /*[[override]]*/ (const attachable::token& what)
{
attachable_ptr uptr;
// lifetime scope of guard
{
guard_type guard(m_mtx);
for (auto i = m_attachables.begin(); i != m_attachables.end(); ++i)
{
if ((*i)->matches(what))
{
uptr = std::move(*i);
m_attachables.erase(i);
// exit loop (and release lock)
break;
}
}
}
// uptr will be destroyed here, without locked mutex
}
void link_to /*[[override]]*/ (intrusive_ptr<actor>& other)
{
(void) link_to_impl(other);
}
void unlink_from /*[[override]]*/ (intrusive_ptr<actor>& other)
{
(void) unlink_from_impl(other);
}
bool remove_backlink /*[[override]]*/ (intrusive_ptr<actor>& other)
{
if (other && other != this)
{
std::lock_guard<std::mutex> guard(m_mtx);
return erase_all(m_links, other) > 0;//m_links.erase(other) > 0;
}
return false;
}
bool establish_backlink /*[[override]]*/ (intrusive_ptr<actor>& other)
{
bool result = false;
std::uint32_t reason = exit_reason::not_exited;
if (other && other != this)
{
// lifetime scope of guard
{
std::lock_guard<std::mutex> guard(m_mtx);
reason = m_exit_reason.load();
if (reason == exit_reason::not_exited)
{
result = unique_insert(m_links, other);
//result = m_links.insert(other).second;
}
}
}
if (reason != exit_reason::not_exited)
{
other->enqueue(message(this, other, atom(":Exit"), reason));
}
return result;
}
};
} } // namespace cppa::detail
#endif // ABSTRACT_ACTOR_HPP
#ifndef ACTOR_IMPL_UTIL_HPP
#define ACTOR_IMPL_UTIL_HPP
#include <atomic>
#include <memory>
#include "cppa/attachable.hpp"
#include "cppa/exit_reason.hpp"
namespace cppa { namespace detail {
template<class Guard, class List, class Mutex>
bool do_attach(std::atomic<std::uint32_t>& reason,
unique_attachable_ptr&& uptr,
List& ptr_list,
Mutex& mtx)
{
if (uptr == nullptr)
{
Guard guard(mtx);
return reason.load() == exit_reason::not_exited;
}
else
{
std::uint32_t reason_value;
// lifetime scope of guard
{
Guard guard(mtx);
reason_value = reason.load();
if (reason_value == exit_reason::not_exited)
{
ptr_list.push_back(std::move(uptr));
return true;
}
}
uptr->detach(reason_value);
return false;
}
}
template<class Guard, class List, class Mutex>
void do_detach(const attachable::token& what, List& ptr_list, Mutex& mtx)
{
Guard guard(mtx);
for (auto i = ptr_list.begin(); i != ptr_list.end(); ++i)
{
if ((*i)->matches(what))
{
ptr_list.erase(i);
return;
}
}
}
} } // namespace cppa::detail
#endif // ACTOR_IMPL_UTIL_HPP
......@@ -13,56 +13,28 @@
#include "cppa/context.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/abstract_actor.hpp"
#include "cppa/detail/blocking_message_queue.hpp"
namespace cppa { namespace detail {
class converted_thread_context : public context
class converted_thread_context : public abstract_actor<context>
{
// true if the associated thread has finished execution
std::atomic<std::uint32_t> m_exit_reason;
typedef abstract_actor<context> super;
// mailbox implementation
detail::blocking_message_queue m_mailbox;
// guards access to m_exited, m_subscriptions and m_links
std::mutex m_mtx;
// manages actor links
std::list<actor_ptr> m_links;
std::vector<unique_attachable_ptr> m_attachables;
// @pre m_mtx is locked
inline bool exited() const
{
return m_exit_reason != exit_reason::not_exited;
}
public:
converted_thread_context();
message_queue& mailbox /*[[override]]*/ ();
// called if the converted thread finished execution
void cleanup(std::uint32_t reason = exit_reason::normal);
bool attach /*[[override]]*/ (attachable* ptr);
void detach /*[[override]]*/ (const attachable::token&);
void quit /*[[override]]*/ (std::uint32_t reason);
void link_to /*[[override]]*/ (intrusive_ptr<actor>& other);
void unlink_from /*[[override]]*/ (intrusive_ptr<actor>& other);
bool remove_backlink /*[[override]]*/ (const intrusive_ptr<actor>& to);
bool establish_backlink /*[[override]]*/ (const intrusive_ptr<actor>& to);
};
} } // namespace cppa::detail
......
......@@ -12,7 +12,7 @@ class mock_scheduler : public scheduler
void await_others_done();
void register_converted_context(context*);
void unregister_converted_context(context*);
//void unregister_converted_context(context*);
actor_ptr spawn(actor_behavior*, scheduling_hint);
};
......
......@@ -143,6 +143,20 @@ class intrusive_ptr : util::comparable<intrusive_ptr<T>, const T*>,
return compare(other.get());
}
template<class C>
intrusive_ptr<C> downcast() const
{
if (m_ptr) return dynamic_cast<C*>(const_cast<T*>(m_ptr));
return nullptr;
}
template<class C>
intrusive_ptr<C> upcast() const
{
if (m_ptr) return static_cast<C*>(const_cast<T*>(m_ptr));
return nullptr;
}
};
template<typename X, typename Y>
......
......@@ -43,7 +43,7 @@ class scheduler
* @brief Informs the scheduler that the convertex context @p what
* finished execution.
*/
virtual void unregister_converted_context(context* what) = 0;
//virtual void unregister_converted_context(context* what) = 0;
/**
* @brief Wait until all other actors finished execution.
......
......@@ -68,4 +68,24 @@ void actor::leave(const group_ptr& what)
detach(group_token);
}
void actor::link_to(intrusive_ptr<actor>&& other)
{
link_to(static_cast<actor_ptr&>(other));
}
void actor::unlink_from(intrusive_ptr<actor>&& other)
{
unlink_from(static_cast<actor_ptr&>(other));
}
bool actor::remove_backlink(intrusive_ptr<actor>&& to)
{
return remove_backlink(static_cast<actor_ptr&>(to));
}
bool actor::establish_backlink(intrusive_ptr<actor>&& to)
{
return establish_backlink(static_cast<actor_ptr&>(to));
}
} // namespace cppa
......@@ -2,27 +2,17 @@
#include "cppa/message.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/actor_impl_util.hpp"
using cppa::exit_reason::not_exited;
namespace {
constexpr auto s_kp = cppa::atom(":KillProxy");
typedef std::lock_guard<std::mutex> guard_type;
} // namespace <anonymous>
namespace cppa {
actor_proxy::actor_proxy(std::uint32_t mid, const process_information_ptr& pptr)
: actor(mid), m_parent(pptr), m_exit_reason(not_exited)
: super(mid), m_parent(pptr)
{
if (!m_parent) throw std::runtime_error("parent == nullptr");
}
actor_proxy::actor_proxy(std::uint32_t mid, process_information_ptr&& pptr)
: actor(mid), m_parent(std::move(pptr)), m_exit_reason(not_exited)
: super(mid), m_parent(std::move(pptr))
{
if (!m_parent) throw std::runtime_error("parent == nullptr");
}
......@@ -30,60 +20,80 @@ actor_proxy::actor_proxy(std::uint32_t mid, process_information_ptr&& pptr)
void actor_proxy::enqueue(const message& msg)
{
const any_tuple& content = msg.content();
if ( content.size() == 2
&& content.utype_info_at(0) == typeid(atom_value)
&& *reinterpret_cast<const atom_value*>(content.at(0)) == s_kp
&& content.utype_info_at(1) == typeid(std::uint32_t))
if ( content.size() > 0
&& content.utype_info_at(0) == typeid(atom_value))
{
decltype(m_attachables) mattachables;
auto r = *reinterpret_cast<const std::uint32_t*>(content.at(1));
// lifetime scope of guard
{
guard_type guard(m_mtx);
m_exit_reason = r;
mattachables = std::move(m_attachables);
m_attachables.clear();
}
for (auto i = mattachables.begin(); i != mattachables.end(); ++i)
auto val = *reinterpret_cast<const atom_value*>(content.at(0));
switch(val)
{
(*i)->detach(r);
case atom(":Link"):
{
auto s = msg.sender();
link_to(s);
return;
}
case atom(":Unlink"):
{
auto s = msg.sender();
unlink_from(s);
return;
}
case atom(":KillProxy"):
{
if ( content.size() == 2
&& content.utype_info_at(1) == typeid(std::uint32_t))
{
const void* reason = content.at(1);
cleanup(*reinterpret_cast<const std::uint32_t*>(reason));
}
return;
}
default: break;
}
}
else
{
forward_message(m_parent, msg);
}
}
bool actor_proxy::attach(attachable* ptr)
{
return detail::do_attach<guard_type>(m_exit_reason,
unique_attachable_ptr(ptr),
m_attachables,
m_mtx);
}
void actor_proxy::detach(const attachable::token& what)
{
detail::do_detach<guard_type>(what, m_attachables, m_mtx);
forward_message(m_parent, msg);
}
void actor_proxy::link_to(intrusive_ptr<actor>& other)
{
if (link_to_impl(other))
{
// causes remote actor to link to (proxy of) other
forward_message(m_parent, message(this, other, atom(":Link")));
//enqueue(message(this, other, atom(":Link")));
}
}
void actor_proxy::unlink_from(intrusive_ptr<actor>& other)
{
if (unlink_from_impl(other))
{
// causes remote actor to unlink from (proxy of) other
forward_message(m_parent, message(this, other, atom(":Unlink")));
//enqueue(message(this, other, atom(":Unlink")));
}
}
bool actor_proxy::remove_backlink(const intrusive_ptr<actor>& to)
bool actor_proxy::establish_backlink(intrusive_ptr<actor>& other)
{
return true;
bool result = super::establish_backlink(other);
if (result)
{
forward_message(m_parent, message(this, other, atom(":Link")));
}
//enqueue(message(to, this, atom(":Link")));
return result;
}
bool actor_proxy::establish_backlink(const intrusive_ptr<actor>& to)
bool actor_proxy::remove_backlink(intrusive_ptr<actor>& other)
{
return true;
bool result = super::remove_backlink(other);
if (result)
{
forward_message(m_parent, message(this, other, atom(":Unlink")));
}
//enqueue(message(to, this, atom(":Unlink")));
return result;
}
const process_information& actor_proxy::parent_process() const
......
#include <iostream>
// for thread_specific_ptr
// needed unless the new keyword "thread_local" works in GCC
#include <boost/thread.hpp>
#include "cppa/context.hpp"
......@@ -6,24 +10,19 @@
#include "cppa/detail/converted_thread_context.hpp"
using std::cout;
using std::endl;
using cppa::detail::converted_thread_context;
namespace {
void cleanup_fun(cppa::context* what)
{
if (what)
{
auto converted = dynamic_cast<converted_thread_context*>(what);
if (converted)
{
converted->cleanup();
}
if (!what->deref()) delete what;
}
if (what && !what->deref()) delete what;
}
boost::thread_specific_ptr<cppa::context> m_this_context(cleanup_fun);
boost::thread_specific_ptr<cppa::context> s_this_context(cleanup_fun);
} // namespace <anonymous>
......@@ -41,18 +40,19 @@ void context::trap_exit(bool new_value)
context* unchecked_self()
{
return m_this_context.get();
return s_this_context.get();
}
context* self()
{
context* result = m_this_context.get();
if (!result)
context* result = s_this_context.get();
if (result == nullptr)
{
cout << "converted a native thread to an actor" << endl;
result = new converted_thread_context;
result->ref();
get_scheduler()->register_converted_context(result);
m_this_context.reset(result);
s_this_context.reset(result);
}
return result;
}
......@@ -60,12 +60,9 @@ context* self()
void set_self(context* ctx)
{
if (ctx) ctx->ref();
context* old = m_this_context.get();
m_this_context.reset(ctx);
if (old)
{
cleanup_fun(ctx);
}
context* old = s_this_context.get();
s_this_context.reset(ctx);
cleanup_fun(old);
}
} // namespace cppa
......@@ -3,153 +3,22 @@
#include "cppa/atom.hpp"
#include "cppa/exception.hpp"
#include "cppa/detail/actor_impl_util.hpp"
#include "cppa/detail/converted_thread_context.hpp"
namespace {
template<class List, typename Element>
bool unique_insert(List& lst, const Element& e)
{
auto end = lst.end();
auto i = std::find(lst.begin(), end, e);
if (i == end)
{
lst.push_back(e);
return true;
}
return false;
}
template<class List, typename Iterator, typename Element>
int erase_all(List& lst, Iterator from, Iterator end, const Element& e)
{
auto i = std::find(from, end, e);
if (i != end)
{
return 1 + erase_all(lst, lst.erase(i), end, e);
}
return 0;
}
template<class List, typename Element>
int erase_all(List& lst, const Element& e)
{
return erase_all(lst, lst.begin(), lst.end(), e);
}
typedef std::lock_guard<std::mutex> guard_type;
} // namespace <anonymous>
namespace cppa { namespace detail {
converted_thread_context::converted_thread_context()
: m_exit_reason(exit_reason::not_exited)
{
}
bool converted_thread_context::attach(attachable* ptr)
{
return detail::do_attach<guard_type>(m_exit_reason,
unique_attachable_ptr(ptr),
m_attachables,
m_mtx);
}
void converted_thread_context::detach(const attachable::token& what)
{
detail::do_detach<guard_type>(what, m_attachables, m_mtx);
}
void converted_thread_context::cleanup(std::uint32_t reason)
{
if (reason == exit_reason::not_exited) return;
decltype(m_links) mlinks;
decltype(m_attachables) mattachables;
// lifetime scope of guard
{
std::lock_guard<std::mutex> guard(m_mtx);
m_exit_reason = reason;
mlinks = std::move(m_links);
mattachables = std::move(m_attachables);
// make sure lists are definitely empty
m_links.clear();
m_attachables.clear();
}
actor_ptr mself = self();
// send exit messages
for (actor_ptr& aptr : mlinks)
{
aptr->enqueue(message(mself, aptr, atom(":Exit"), reason));
}
for (std::unique_ptr<attachable>& ptr : mattachables)
{
ptr->detach(reason);
}
}
void converted_thread_context::quit(std::uint32_t reason)
{
cleanup(reason);
super::cleanup(reason);
// actor_exited should not be catched, but if anyone does,
// the next call to self() must return a newly created instance
set_self(nullptr);
throw actor_exited(reason);
}
void converted_thread_context::link_to(intrusive_ptr<actor>& other)
{
std::lock_guard<std::mutex> guard(m_mtx);
if (other && !exited() && other->establish_backlink(this))
{
m_links.push_back(other);
//m_links.insert(other);
}
}
bool converted_thread_context::remove_backlink(const intrusive_ptr<actor>& other)
{
if (other && other != this)
{
std::lock_guard<std::mutex> guard(m_mtx);
return erase_all(m_links, other) > 0;//m_links.erase(other) > 0;
}
return false;
}
bool converted_thread_context::establish_backlink(const intrusive_ptr<actor>& other)
{
bool send_exit_message = false;
bool result = false;
if (other && other != this)
{
// lifetime scope of guard
{
std::lock_guard<std::mutex> guard(m_mtx);
if (!exited())
{
result = unique_insert(m_links, other);
//result = m_links.insert(other).second;
}
else
{
send_exit_message = true;
}
}
}
if (send_exit_message)
{
}
return result;
}
void converted_thread_context::unlink_from(intrusive_ptr<actor>& other)
void converted_thread_context::cleanup(std::uint32_t reason)
{
std::lock_guard<std::mutex> guard(m_mtx);
if (other && !exited() && other->remove_backlink(this))
{
erase_all(m_links, other);
//m_links.erase(other);
}
super::cleanup(reason);
}
message_queue& converted_thread_context::mailbox()
......
#include "cppa/config.hpp"
#include <set>
#include <map>
#include <thread>
#include <atomic>
#include <iostream>
// for thread_specific_ptr
// needed unless the new keyword "thread_local" works in GCC
#include <boost/thread.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include "cppa/message.hpp"
#include "cppa/context.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/attachable.hpp"
#include "cppa/invoke_rules.hpp"
#include "cppa/actor_behavior.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/to_uniform_name.hpp"
#include "cppa/detail/converted_thread_context.hpp"
using std::cout;
using std::endl;
namespace {
std::atomic<int> m_running_actors(0);
boost::mutex m_ra_mtx;
boost::condition_variable m_ra_cv;
boost::mutex s_ra_mtx;
boost::condition_variable s_ra_cv;
std::atomic<int> s_running_actors(0);
typedef boost::unique_lock<boost::mutex> guard_type;
void inc_actor_count()
{
++s_running_actors;
}
void dec_actor_count()
{
if (--s_running_actors <= 1)
{
guard_type guard(s_ra_mtx);
s_ra_cv.notify_all();
}
}
void run_actor(cppa::intrusive_ptr<cppa::context> m_self,
cppa::actor_behavior* behavior)
......@@ -29,17 +48,21 @@ void run_actor(cppa::intrusive_ptr<cppa::context> m_self,
if (behavior)
{
try { behavior->act(); }
catch(...) { }
catch (...) { }
try { behavior->on_exit(); }
catch(...) { }
catch (...) { }
delete behavior;
}
if (--m_running_actors <= 1)
dec_actor_count();
}
struct exit_observer : cppa::attachable
{
~exit_observer()
{
boost::mutex::scoped_lock lock(m_ra_mtx);
m_ra_cv.notify_all();
dec_actor_count();
}
}
};
} // namespace <anonymous>
......@@ -47,33 +70,28 @@ namespace cppa { namespace detail {
actor_ptr mock_scheduler::spawn(actor_behavior* ab, scheduling_hint)
{
++m_running_actors;
inc_actor_count();
intrusive_ptr<context> ctx(new detail::converted_thread_context);
boost::thread(run_actor, ctx, ab).detach();
return ctx;
}
void mock_scheduler::register_converted_context(context*)
{
++m_running_actors;
}
void mock_scheduler::unregister_converted_context(context*)
void mock_scheduler::register_converted_context(context* ctx)
{
if (--m_running_actors <= 1)
if (ctx)
{
boost::mutex::scoped_lock lock(m_ra_mtx);
m_ra_cv.notify_all();
inc_actor_count();
ctx->attach(new exit_observer);
}
}
void mock_scheduler::await_others_done()
{
auto expected = (unchecked_self() == nullptr) ? 0 : 1;
boost::mutex::scoped_lock lock(m_ra_mtx);
while (m_running_actors.load() > expected)
guard_type lock(s_ra_mtx);
while (s_running_actors.load() != expected)
{
m_ra_cv.wait(lock);
s_ra_cv.wait(lock);
}
}
......
......@@ -25,7 +25,6 @@
#include "cppa/detail/actor_proxy_cache.hpp"
using std::cout;
using std::cerr;
using std::endl;
using cppa::detail::get_actor_proxy_cache;
......@@ -295,7 +294,7 @@ void mailman_loop()
{
bs << out_msg;
auto size32 = static_cast<std::uint32_t>(bs.size());
cout << "--> " << to_string(out_msg) << endl;
cout << "--> " << (to_string(out_msg) + "\n");
auto sent = ::send(peer, &size32, sizeof(size32), flags);
if (sent != -1)
{
......@@ -405,7 +404,7 @@ void post_office_loop(native_socket_t socket_fd,
read_from_socket(socket_fd, buf, buf_size);
binary_deserializer bd(buf, buf_size);
meta_msg->deserialize(&msg, &bd);
cout << "<-- " << to_string(msg) << endl;
cout << "<-- " << (to_string(msg) + "\n");
if ( msg.content().size() == 1
&& msg.content().utype_info_at(0) == atom_tinfo
&& *reinterpret_cast<const atom_value*>(msg.content().at(0))
......@@ -420,11 +419,12 @@ cout << "<-- " << to_string(msg) << endl;
// this message was send from a proxy
sender->attach(new remote_observer(peer));
}
// don't "deliver" message
continue;
}
auto r = msg.receiver();
if (r) r->enqueue(msg);
else
{
auto r = msg.receiver();
if (r) r->enqueue(msg);
}
}
}
catch (std::ios_base::failure& e)
......@@ -535,6 +535,7 @@ void middle_man_loop(native_socket_t server_socket_fd,
children.clear();
// wait for handshake
barrier->wait();
//cout << "middle_man_loop finished\n";
}
} // namespace <anonmyous>
......
#include "ping_pong.hpp"
#include "cppa/cppa.hpp"
#include "cppa/to_string.hpp"
namespace { int s_pongs = 0; }
......@@ -9,15 +10,16 @@ using namespace cppa;
void pong(actor_ptr ping_actor)
{
link(ping_actor);
bool done = false;
// kickoff
ping_actor << make_tuple(atom("Pong"), 0); // or: send(ping_actor, 0);
// invoke rules
auto pattern =
(
on<atom("Ping"), std::int32_t>(9) >> [&]()
on<atom("Ping"), std::int32_t>(9) >> []()
{
done = true;
// terminate with non-normal exit reason
// to force ping actor to quit
quit(exit_reason::user_defined);
},
on<atom("Ping"), std::int32_t>() >> [](int v)
{
......@@ -25,10 +27,7 @@ void pong(actor_ptr ping_actor)
}
);
// loop
while (!done) receive(pattern);
// terminate with non-normal exit reason
// to force ping actor to quit
quit(exit_reason::user_defined);
for (;;) receive(pattern);
}
void ping()
......@@ -37,11 +36,17 @@ void ping()
// invoke rule
auto pattern =
(
on<atom("Pong"), std::int32_t>() >> [](std::int32_t v)
on<atom("Pong"), std::int32_t>() >> [](std::int32_t value)
{
++s_pongs;
reply(atom("Ping"), v+1);
reply(atom("Ping"), value + 1);
},
others() >> []()
{
throw std::runtime_error( "unexpected message: "
+ to_string(last_received()));
}
);
// loop
for (;;) receive(pattern);
......
#include <string>
#include <iostream>
#include <boost/thread.hpp>
#include "test.hpp"
......@@ -13,6 +14,7 @@ namespace {
void client_part(const std::vector<std::string>& argv)
{
if (argv.size() != 2) throw std::logic_error("argv.size() != 2");
(void) self();
std::istringstream iss(argv[1]);
std::uint16_t port;
iss >> port;
......@@ -57,7 +59,7 @@ size_t test__remote_actor(const char* app_path, bool is_client,
std::string cmd;
{
std::ostringstream oss;
oss << app_path << " test__remote_actor " << port << " &>/dev/null";
oss << app_path << " test__remote_actor " << port;// << " &>/dev/null";
cmd = oss.str();
}
// execute client_part() in a separate process,
......@@ -66,6 +68,7 @@ size_t test__remote_actor(const char* app_path, bool is_client,
await_all_others_done();
CPPA_CHECK_EQUAL(pongs(), 5);
// wait until separate process (in sep. thread) finished execution
std::cout << "child.join()" << std::endl;
child.join();
return CPPA_TEST_RESULT;
}
......@@ -24,10 +24,17 @@ size_t test__spawn()
cerr << "unexpected message: " << to_string(last_received()) << endl;
CPPA_CHECK(false);
};
trap_exit(true);
auto pong_actor = spawn(pong, spawn(ping));
monitor(pong_actor);
link(pong_actor);
auto pattern =
(
on<atom(":Exit"), std::uint32_t>() >> [&] (std::uint32_t reason)
{
CPPA_CHECK_EQUAL(reason, exit_reason::user_defined);
CPPA_CHECK_EQUAL(last_received().sender(), pong_actor);
},
on<atom(":Down"), std::uint32_t>() >> [&] (std::uint32_t reason)
{
CPPA_CHECK_EQUAL(reason, exit_reason::user_defined);
......@@ -38,8 +45,8 @@ size_t test__spawn()
report_unexpected();
}
);
// wait for :Down message of pong
receive(pattern);
// wait for :Down and :Exit messages of pong
for (auto i = 0; i < 2; ++i) receive(pattern);
// wait for termination of all spawned actors
await_all_others_done();
// mailbox has to be empty
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment