Commit 070eeb98 authored by Dominik Charousset's avatar Dominik Charousset

maintenance

parent 5aa929e6
......@@ -264,4 +264,4 @@ unit_testing/test__tuple.cpp
unit_testing/test__type_list.cpp
unit_testing/test__uniform_type.cpp
unit_testing/test__yield_interface.cpp
cppa/detail/nestable_receive_policy.hpp
cppa/detail/receive_policy.hpp
......@@ -40,6 +40,7 @@
#include "cppa/pattern.hpp"
#include "cppa/behavior.hpp"
#include "cppa/detail/disablable_delete.hpp"
#include "cppa/detail/receive_policy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa {
......@@ -49,6 +50,7 @@ namespace cppa {
*/
class abstract_event_based_actor : public detail::abstract_scheduled_actor {
friend class detail::receive_policy;
typedef detail::abstract_scheduled_actor super;
public:
......@@ -71,15 +73,10 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor {
protected:
std::vector<std::unique_ptr<detail::recursive_queue_node> > m_cache;
enum handle_message_result {
drop_msg,
msg_handled,
cache_msg
};
auto handle_message(mailbox_element& node) -> handle_message_result;
inline behavior& current_behavior() {
CPPA_REQUIRE(m_behavior_stack.empty() == false);
return *(m_behavior_stack.back());
}
abstract_event_based_actor();
......@@ -87,7 +84,8 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor {
typedef std::unique_ptr<behavior, detail::disablable_delete<behavior>>
stack_element;
std::vector<stack_element> m_loop_stack;
std::vector<stack_element> m_behavior_stack;
detail::receive_policy m_recv_policy;
// provoke compiler errors for usage of receive() and related functions
......@@ -126,6 +124,20 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor {
receive(std::forward<Args>(args)...);
}
private:
// required by detail::nestable_receive_policy
static const detail::receive_policy_flag receive_flag = detail::rp_event_based;
inline void handle_timeout(behavior& bhvr) {
m_has_pending_timeout_request = false;
CPPA_REQUIRE(bhvr.timeout().valid());
bhvr.handle_timeout();
if (!m_behavior_stack.empty()) {
auto& next_bhvr = *(m_behavior_stack.back());
request_timeout(next_bhvr.timeout());
}
}
};
} // namespace cppa
......
......@@ -41,7 +41,7 @@
#include "cppa/pattern.hpp"
#include "cppa/detail/yield_interface.hpp"
#include "cppa/detail/nestable_receive_policy.hpp"
#include "cppa/detail/receive_policy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa {
......@@ -69,7 +69,7 @@ class context_switching_actor : public scheduled_actor {
class context_switching_actor : public detail::abstract_scheduled_actor {
friend class detail::nestable_receive_policy;
friend class detail::receive_policy;
typedef detail::abstract_scheduled_actor super;
......@@ -92,6 +92,7 @@ class context_switching_actor : public detail::abstract_scheduled_actor {
private:
// required by detail::nestable_receive_policy
static const detail::receive_policy_flag receive_flag = detail::rp_nestable;
detail::recursive_queue_node* receive_node();
inline void push_timeout() { ++m_active_timeout_id; }
inline void pop_timeout() { --m_active_timeout_id; }
......@@ -102,7 +103,7 @@ class context_switching_actor : public detail::abstract_scheduled_actor {
// members
util::fiber m_fiber;
std::function<void()> m_behavior;
detail::nestable_receive_policy m_recv_policy;
detail::receive_policy m_recv_policy;
};
......
......@@ -105,6 +105,11 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
}
}
inline void handle_timeout(behavior& bhvr) {
bhvr.handle_timeout();
reset_timeout();
}
bool m_has_pending_timeout_request;
std::uint32_t m_active_timeout_id;
......
......@@ -33,6 +33,7 @@
#include <list>
#include <memory>
#include <type_traits>
#include "cppa/behavior.hpp"
#include "cppa/partial_function.hpp"
......@@ -41,7 +42,12 @@
namespace cppa { namespace detail {
class nestable_receive_policy {
enum receive_policy_flag {
rp_nestable,
rp_event_based
};
class receive_policy {
public:
......@@ -50,16 +56,17 @@ class nestable_receive_policy {
hm_skip_msg,
hm_drop_msg,
hm_cache_msg,
hm_success
hm_msg_handled
};
template<class Client, class FunOrBehavior>
bool invoke_from_cache(Client* client, FunOrBehavior& fun) {
std::integral_constant<receive_policy_flag, Client::receive_flag> token;
auto i = m_cache.begin();
auto e = m_cache.end();
while (i != e) {
switch (this->handle_message(client, *(*i), fun)) {
case hm_success: {
switch (this->handle_message(client, *(*i), fun, token)) {
case hm_msg_handled: {
client->release_node(i->release());
m_cache.erase(i);
return true;
......@@ -84,8 +91,9 @@ class nestable_receive_policy {
template<class Client, class FunOrBehavior>
bool invoke(Client* client, recursive_queue_node* node, FunOrBehavior& fun) {
switch (this->handle_message(client, *node, fun)) {
case hm_success: {
std::integral_constant<receive_policy_flag, Client::receive_flag> token;
switch (this->handle_message(client, *node, fun, token)) {
case hm_msg_handled: {
client->release_node(node);
return true;
}
......@@ -116,20 +124,26 @@ class nestable_receive_policy {
private:
typedef std::integral_constant<receive_policy_flag, rp_nestable> rp_n;
typedef std::integral_constant<receive_policy_flag, rp_event_based> rp_eb;
std::list<std::unique_ptr<recursive_queue_node> > m_cache;
inline void handle_timeout(behavior& bhvr) {
bhvr.handle_timeout();
template<class Client>
inline void handle_timeout(Client* client, behavior& bhvr) {
client->handle_timeout(bhvr);
}
inline void handle_timeout(partial_function&) {
template<class Client>
inline void handle_timeout(Client*, partial_function&) {
CPPA_CRITICAL("handle_timeout(partial_function&)");
}
template<class Client, class FunOrBehavior>
handle_message_result handle_message(Client* client,
recursive_queue_node& node,
FunOrBehavior& fun) {
FunOrBehavior& fun,
rp_n) {
if (node.marked) {
return hm_skip_msg;
}
......@@ -139,8 +153,8 @@ class nestable_receive_policy {
return hm_drop_msg;
}
case timeout_message: {
handle_timeout(fun);
return hm_success;
handle_timeout(client, fun);
return hm_msg_handled;
}
case ordinary_message: {
std::swap(client->m_last_dequeued, node.msg);
......@@ -150,7 +164,7 @@ class nestable_receive_policy {
if (fun(client->m_last_dequeued)) {
client->m_last_dequeued.reset();
client->m_last_sender.reset();
return hm_success;
return hm_msg_handled;
}
// no match (restore client members)
std::swap(client->m_last_dequeued, node.msg);
......@@ -159,9 +173,41 @@ class nestable_receive_policy {
node.marked = false;
return hm_cache_msg;
}
default: {
CPPA_CRITICAL("illegal result of filter_msg");
default: CPPA_CRITICAL("illegal result of filter_msg");
}
}
template<class Client, class FunOrBehavior>
handle_message_result handle_message(Client* client,
recursive_queue_node& node,
FunOrBehavior& fun,
rp_eb) {
CPPA_REQUIRE(node.marked == false);
switch (client->filter_msg(node.msg)) {
case normal_exit_signal:
case expired_timeout_message: {
return hm_drop_msg;
}
case timeout_message: {
handle_timeout(client, fun);
return hm_msg_handled;
}
case ordinary_message: {
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
if (fun(client->m_last_dequeued)) {
client->m_last_dequeued.reset();
client->m_last_sender.reset();
// we definitely don't have a pending timeout now
client->m_has_pending_timeout_request = false;
return hm_msg_handled;
}
// no match, restore members
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
return hm_cache_msg;
}
default: CPPA_CRITICAL("illegal result of filter_msg");
}
}
......
......@@ -52,7 +52,7 @@
#include "cppa/intrusive/singly_linked_list.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/detail/nestable_receive_policy.hpp"
#include "cppa/detail/receive_policy.hpp"
namespace cppa {
......@@ -68,7 +68,7 @@ class thread_mapped_actor : public local_actor { };
class thread_mapped_actor : public abstract_actor<local_actor> {
friend class detail::nestable_receive_policy;
friend class detail::receive_policy;
typedef abstract_actor<local_actor> super;
......@@ -88,14 +88,18 @@ class thread_mapped_actor : public abstract_actor<local_actor> {
private:
detail::nestable_receive_policy m_recv_policy;
detail::receive_policy m_recv_policy;
// required by nestable_receive_policy
static const detail::receive_policy_flag receive_flag = detail::rp_nestable;
inline void push_timeout() { }
inline void pop_timeout() { }
inline detail::recursive_queue_node* receive_node() {
return m_mailbox.pop();
}
inline void handle_timeout(behavior& bhvr) {
bhvr.handle_timeout();
}
};
......
......@@ -38,8 +38,8 @@
namespace cppa {
abstract_event_based_actor::abstract_event_based_actor() : super(super::blocked) {
}
abstract_event_based_actor::abstract_event_based_actor()
: super(super::blocked) { }
void abstract_event_based_actor::dequeue(behavior&) {
quit(exit_reason::unallowed_function_call);
......@@ -49,47 +49,10 @@ void abstract_event_based_actor::dequeue(partial_function&) {
quit(exit_reason::unallowed_function_call);
}
auto abstract_event_based_actor::handle_message(mailbox_element& node) -> handle_message_result {
CPPA_REQUIRE(node.marked == false);
CPPA_REQUIRE(m_loop_stack.empty() == false);
auto& bhvr = *(m_loop_stack.back());
switch (filter_msg(node.msg)) {
case detail::normal_exit_signal:
case detail::expired_timeout_message:
return drop_msg;
case detail::timeout_message:
m_has_pending_timeout_request = false;
CPPA_REQUIRE(bhvr.timeout().valid());
bhvr.handle_timeout();
if (!m_loop_stack.empty()) {
auto& next_bhvr = *(m_loop_stack.back());
request_timeout(next_bhvr.timeout());
}
return msg_handled;
default:
break;
}
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
if ((bhvr.get_partial_function())(m_last_dequeued)) {
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return msg_handled;
}
// no match, restore members
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
return cache_msg;
}
resume_result abstract_event_based_actor::resume(util::fiber*) {
auto done_cb = [&]() {
m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear();
m_behavior_stack.clear();
on_exit();
};
self.set(this);
......@@ -113,50 +76,14 @@ resume_result abstract_event_based_actor::resume(util::fiber*) {
}
}
else {
switch (handle_message(*e)) {
case drop_msg: {
release_node(e);
break; // nop
}
case msg_handled: {
release_node(e);
if (m_loop_stack.empty()) {
if (m_recv_policy.invoke(this, e, current_behavior())) {
// try to match cached message before receiving new ones
do {
if (m_behavior_stack.empty()) {
done_cb();
return resume_result::actor_done;
}
// try to match cached messages before receiving new ones
auto i = m_cache.begin();
while (i != m_cache.end()) {
switch (handle_message(*(*i))) {
case drop_msg: {
release_node(i->release());
i = m_cache.erase(i);
break;
}
case msg_handled: {
release_node(i->release());
m_cache.erase(i);
if (m_loop_stack.empty()) {
done_cb();
return resume_result::actor_done;
}
i = m_cache.begin();
break;
}
case cache_msg: {
++i;
break;
}
default: CPPA_CRITICAL("illegal result of handle_message");
}
}
break;
}
case cache_msg: {
m_cache.emplace_back(e);
break;
}
default: CPPA_CRITICAL("illegal result of handle_message");
} while (m_recv_policy.invoke_from_cache(this, current_behavior()));
}
}
}
......
......@@ -33,13 +33,13 @@
namespace cppa {
event_based_actor::event_based_actor() {
m_loop_stack.reserve(2);
m_behavior_stack.reserve(2);
}
void event_based_actor::quit(std::uint32_t reason) {
if (reason == exit_reason::normal) {
cleanup(exit_reason::normal);
m_loop_stack.clear();
m_behavior_stack.clear();
}
else {
abstract_scheduled_actor::quit(reason);
......@@ -54,12 +54,12 @@ void event_based_actor::do_become(behavior* bhvr, bool has_ownership) {
// keep always the latest element in the stack to prevent subtle errors,
// e.g., the addresses of all variables in a lambda expression calling
// become() suddenly are invalid if we would pop the behavior!
if (m_loop_stack.size() < 2) {
m_loop_stack.push_back(std::move(new_element));
if (m_behavior_stack.size() < 2) {
m_behavior_stack.push_back(std::move(new_element));
}
else {
m_loop_stack[0] = std::move(m_loop_stack[1]);
m_loop_stack[1] = std::move(new_element);
m_behavior_stack[0] = std::move(m_behavior_stack[1]);
m_behavior_stack[1] = std::move(new_element);
}
}
......
......@@ -33,8 +33,8 @@
namespace cppa {
void stacked_event_based_actor::unbecome() {
if (!m_loop_stack.empty()) {
m_loop_stack.pop_back();
if (!m_behavior_stack.empty()) {
m_behavior_stack.pop_back();
}
}
......@@ -44,7 +44,7 @@ void stacked_event_based_actor::do_become(behavior* bhvr,
request_timeout(bhvr->timeout());
stack_element se{bhvr};
if (!has_ownership) se.get_deleter().disable();
m_loop_stack.push_back(std::move(se));
m_behavior_stack.push_back(std::move(se));
}
} // namespace cppa
......@@ -61,8 +61,7 @@ struct thread_pool_scheduler::worker {
job_ptr m_dummy;
std::thread m_thread;
worker(job_queue* jq, job_ptr dummy) : m_job_queue(jq), m_dummy(dummy) {
}
worker(job_queue* jq, job_ptr dummy) : m_job_queue(jq), m_dummy(dummy) { }
void start() {
m_thread = std::thread(&thread_pool_scheduler::worker_loop, this);
......@@ -103,26 +102,17 @@ struct thread_pool_scheduler::worker {
if (result) {
return result;
}
# ifdef CPPA_USE_BOOST_THREADS
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(10);
boost::this_thread::sleep(timeout);
# else
std::this_thread::sleep_for(std::chrono::milliseconds(10));
# endif
}
}
void operator()() {
util::fiber fself;
scheduled_actor* job = nullptr;
auto fetch_pending = [&job]() -> scheduled_actor* {
job_ptr job = nullptr;
auto fetch_pending = [&job]() -> job_ptr {
CPPA_REQUIRE(job != nullptr);
scheduled_actor* result = nullptr;
if (job->chained_actor() != nullptr) {
result = static_cast<scheduled_actor*>(job->chained_actor().release());
}
return result;
auto ptr = job->chained_actor().release();
return ptr ? static_cast<scheduled_actor*>(ptr) : nullptr;
};
for (;;) {
job = aggressive_polling();
......@@ -135,13 +125,13 @@ struct thread_pool_scheduler::worker {
if (job == m_dummy) {
// dummy of doom received ...
m_job_queue->push_back(job); // kill the next guy
return; // and say goodbye
return; // and say goodbye
}
else {
do {
switch (job->resume(&fself)) {
case resume_result::actor_done: {
scheduled_actor* pending = fetch_pending();
auto pending = fetch_pending();
if (!job->deref()) delete job;
std::atomic_thread_fence(std::memory_order_seq_cst);
dec_actor_count();
......@@ -166,9 +156,8 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w) {
void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
scheduled_actor* dummy) {
typedef std::unique_ptr<thread_pool_scheduler::worker> worker_ptr;
std::vector<worker_ptr> workers;
size_t num_workers = std::max<size_t>(std::thread::hardware_concurrency() * 2, 8);
std::vector<std::unique_ptr<thread_pool_scheduler::worker> > workers;
size_t num_workers = std::max<size_t>(std::thread::hardware_concurrency() * 2, 4);
for (size_t i = 0; i < num_workers; ++i) {
workers.emplace_back(new worker(jqueue, dummy));
workers.back()->start();
......@@ -188,6 +177,17 @@ void thread_pool_scheduler::start() {
void thread_pool_scheduler::stop() {
m_queue.push_back(&m_dummy);
m_supervisor.join();
// make sure job queue is empty, because destructor of m_queue would
// otherwise delete elements it shouldn't
auto ptr = m_queue.try_pop();
while (ptr != nullptr) {
if (ptr != &m_dummy) {
if (!ptr->deref()) delete ptr;
std::atomic_thread_fence(std::memory_order_seq_cst);
dec_actor_count();
ptr = m_queue.try_pop();
}
}
super::stop();
}
......@@ -205,7 +205,6 @@ actor_ptr thread_pool_scheduler::spawn_impl(scheduled_actor* what,
return std::move(ctx);
}
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* what) {
// do NOT push event-based actors to the queue on startup
return spawn_impl(what->attach_to_scheduler(this), false);
......
......@@ -383,8 +383,6 @@ void foobar(const str_wrapper& x, const std::string& y) {
);
}
size_t test__spawn() {
using std::string;
CPPA_TEST(test__spawn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment