Commit c0935b98 authored by Dominik Charousset's avatar Dominik Charousset

solved bug in chaining impl, fixes #83

parent 48ad408a
...@@ -80,7 +80,7 @@ class context_switching_actor : public detail::stacked_actor_mixin< ...@@ -80,7 +80,7 @@ class context_switching_actor : public detail::stacked_actor_mixin<
context_switching_actor(std::function<void()> fun); context_switching_actor(std::function<void()> fun);
resume_result resume(util::fiber* from); //override resume_result resume(util::fiber* from, actor_ptr& next_job); //override
scheduled_actor_type impl_type(); scheduled_actor_type impl_type();
......
...@@ -64,8 +64,10 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> { ...@@ -64,8 +64,10 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
if (d.valid()) { if (d.valid()) {
if (d.is_zero()) { if (d.is_zero()) {
// immediately enqueue timeout // immediately enqueue timeout
enqueue(nullptr, make_any_tuple(atom("TIMEOUT"), auto node = super::fetch_node(this,
++m_active_timeout_id)); make_any_tuple(atom("TIMEOUT"),
++m_active_timeout_id));
this->m_mailbox._push_back(node);
} }
else { else {
get_scheduler()->delayed_send( get_scheduler()->delayed_send(
...@@ -125,9 +127,7 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> { ...@@ -125,9 +127,7 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
return enqueue_node(super::fetch_node(sender, std::move(msg)), pending); return enqueue_node(super::fetch_node(sender, std::move(msg)), pending);
} }
bool chained_sync_enqueue(actor* sender, bool chained_sync_enqueue(actor* sender, message_id_t id, any_tuple msg) {
message_id_t id,
any_tuple msg) {
return enqueue_node(super::fetch_node(sender, std::move(msg), id), pending); return enqueue_node(super::fetch_node(sender, std::move(msg), id), pending);
} }
......
...@@ -101,7 +101,6 @@ class behavior_stack ...@@ -101,7 +101,6 @@ class behavior_stack
m_elements.erase(i); m_elements.erase(i);
} }
} }
cleanup();
id = empty() ? message_id_t() : m_elements.back().second; id = empty() ? message_id_t() : m_elements.back().second;
} while (!empty() && policy.invoke_from_cache(client, back(), id)); } while (!empty() && policy.invoke_from_cache(client, back(), id));
return true; return true;
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
namespace cppa { namespace detail { namespace cppa { namespace detail {
struct scheduled_actor_dummy : abstract_scheduled_actor { struct scheduled_actor_dummy : abstract_scheduled_actor {
resume_result resume(util::fiber*); resume_result resume(util::fiber*, actor_ptr&);
void quit(std::uint32_t); void quit(std::uint32_t);
void dequeue(behavior&); void dequeue(behavior&);
void dequeue(partial_function&); void dequeue(partial_function&);
......
...@@ -75,7 +75,7 @@ class event_based_actor : public detail::abstract_scheduled_actor { ...@@ -75,7 +75,7 @@ class event_based_actor : public detail::abstract_scheduled_actor {
*/ */
void dequeue_response(behavior&, message_id_t); void dequeue_response(behavior&, message_id_t);
resume_result resume(util::fiber*); //override resume_result resume(util::fiber*, actor_ptr&); //override
/** /**
* @brief Initializes the actor. * @brief Initializes the actor.
......
...@@ -63,8 +63,9 @@ class scheduled_actor : public local_actor { ...@@ -63,8 +63,9 @@ class scheduled_actor : public local_actor {
*/ */
scheduled_actor* next; scheduled_actor* next;
// called from worker thread // called from worker thread,
virtual resume_result resume(util::fiber* from) = 0; // actors sets next_job to its chained actor
virtual resume_result resume(util::fiber* from, actor_ptr& next_job) = 0;
void attach_to_scheduler(scheduler* sched, bool hidden); void attach_to_scheduler(scheduler* sched, bool hidden);
......
...@@ -88,7 +88,9 @@ scheduled_actor_type context_switching_actor::impl_type() { ...@@ -88,7 +88,9 @@ scheduled_actor_type context_switching_actor::impl_type() {
return context_switching_impl; return context_switching_impl;
} }
resume_result context_switching_actor::resume(util::fiber* from) { resume_result context_switching_actor::resume(util::fiber* from, actor_ptr& next_job) {
CPPA_REQUIRE(from != nullptr);
CPPA_REQUIRE(next_job == nullptr);
using namespace detail; using namespace detail;
scoped_self_setter sss{this}; scoped_self_setter sss{this};
for (;;) { for (;;) {
...@@ -100,9 +102,13 @@ resume_result context_switching_actor::resume(util::fiber* from) { ...@@ -100,9 +102,13 @@ resume_result context_switching_actor::resume(util::fiber* from) {
break; break;
} }
case yield_state::blocked: { case yield_state::blocked: {
m_chained_actor.swap(next_job);
CPPA_REQUIRE(m_chained_actor == nullptr);
switch (compare_exchange_state(abstract_scheduled_actor::about_to_block, switch (compare_exchange_state(abstract_scheduled_actor::about_to_block,
abstract_scheduled_actor::blocked)) { abstract_scheduled_actor::blocked)) {
case abstract_scheduled_actor::ready: { case abstract_scheduled_actor::ready: {
// restore variables
m_chained_actor.swap(next_job);
break; break;
} }
case abstract_scheduled_actor::blocked: { case abstract_scheduled_actor::blocked: {
......
...@@ -50,7 +50,7 @@ void event_based_actor::dequeue_response(behavior&, message_id_t) { ...@@ -50,7 +50,7 @@ void event_based_actor::dequeue_response(behavior&, message_id_t) {
quit(exit_reason::unallowed_function_call); quit(exit_reason::unallowed_function_call);
} }
resume_result event_based_actor::resume(util::fiber*) { resume_result event_based_actor::resume(util::fiber*, actor_ptr& next_job) {
# ifdef CPPA_DEBUG # ifdef CPPA_DEBUG
auto st = m_state.load(); auto st = m_state.load();
switch (st) { switch (st) {
...@@ -72,32 +72,54 @@ resume_result event_based_actor::resume(util::fiber*) { ...@@ -72,32 +72,54 @@ resume_result event_based_actor::resume(util::fiber*) {
m_bhvr_stack.clear(); m_bhvr_stack.clear();
m_bhvr_stack.cleanup(); m_bhvr_stack.cleanup();
on_exit(); on_exit();
CPPA_REQUIRE(next_job == nullptr);
next_job.swap(m_chained_actor);
}; };
CPPA_REQUIRE(next_job == nullptr);
try { try {
detail::recursive_queue_node* e; detail::recursive_queue_node* e = nullptr;
for (;;) { for (;;) {
e = m_mailbox.try_pop(); e = m_mailbox.try_pop();
if (!e) { if (e == nullptr) {
CPPA_REQUIRE(next_job == nullptr);
next_job.swap(m_chained_actor);
m_state.store(abstract_scheduled_actor::about_to_block); m_state.store(abstract_scheduled_actor::about_to_block);
std::atomic_thread_fence(std::memory_order_seq_cst);
if (m_mailbox.can_fetch_more() == false) { if (m_mailbox.can_fetch_more() == false) {
switch (compare_exchange_state( switch (compare_exchange_state(
abstract_scheduled_actor::about_to_block, abstract_scheduled_actor::about_to_block,
abstract_scheduled_actor::blocked)) { abstract_scheduled_actor::blocked)) {
case abstract_scheduled_actor::ready: { case abstract_scheduled_actor::ready:
// interrupted by arriving message
// restore members
CPPA_REQUIRE(m_chained_actor == nullptr);
next_job.swap(m_chained_actor);
break; break;
} case abstract_scheduled_actor::blocked:
case abstract_scheduled_actor::blocked: { // done setting actor to blocked
return resume_result::actor_blocked; return resume_result::actor_blocked;
} case abstract_scheduled_actor::pending:
default: CPPA_CRITICAL("illegal actor state"); CPPA_CRITICAL("illegal state: pending");
case abstract_scheduled_actor::done:
CPPA_CRITICAL("illegal state: done");
case abstract_scheduled_actor::about_to_block:
CPPA_CRITICAL("illegal state: about_to_block");
default:
CPPA_CRITICAL("invalid state");
}; };
} }
else {
m_state.store(abstract_scheduled_actor::ready);
CPPA_REQUIRE(m_chained_actor == nullptr);
next_job.swap(m_chained_actor);
}
} }
else if (m_bhvr_stack.invoke(m_policy, this, e)) { else if (m_bhvr_stack.invoke(m_policy, this, e)) {
if (m_bhvr_stack.empty()) { if (m_bhvr_stack.empty()) {
done_cb(); done_cb();
return resume_result::actor_done; return resume_result::actor_done;
} }
m_bhvr_stack.cleanup();
} }
} }
} }
......
...@@ -45,7 +45,7 @@ void scheduled_actor_dummy::do_become(behavior&&, bool) { } ...@@ -45,7 +45,7 @@ void scheduled_actor_dummy::do_become(behavior&&, bool) { }
void scheduled_actor_dummy::become_waiting_for(behavior&&, message_id_t) { } void scheduled_actor_dummy::become_waiting_for(behavior&&, message_id_t) { }
bool scheduled_actor_dummy::has_behavior() { return false; } bool scheduled_actor_dummy::has_behavior() { return false; }
resume_result scheduled_actor_dummy::resume(util::fiber*) { resume_result scheduled_actor_dummy::resume(util::fiber*,actor_ptr&) {
return resume_result::actor_blocked; return resume_result::actor_blocked;
} }
......
...@@ -110,15 +110,7 @@ struct thread_pool_scheduler::worker { ...@@ -110,15 +110,7 @@ struct thread_pool_scheduler::worker {
void operator()() { void operator()() {
util::fiber fself; util::fiber fself;
job_ptr job = nullptr; job_ptr job = nullptr;
auto fetch_pending = [&job]() -> job_ptr { actor_ptr next_job;
CPPA_REQUIRE(job != nullptr);
auto ptr = job->chained_actor().get();
if (ptr) {
job->chained_actor(nullptr);
return static_cast<scheduled_actor*>(ptr);
}
return nullptr;
};
for (;;) { for (;;) {
job = aggressive_polling(); job = aggressive_polling();
if (job == nullptr) { if (job == nullptr) {
...@@ -134,22 +126,20 @@ struct thread_pool_scheduler::worker { ...@@ -134,22 +126,20 @@ struct thread_pool_scheduler::worker {
} }
else { else {
do { do {
switch (job->resume(&fself)) { next_job.reset();
case resume_result::actor_done: { if (job->resume(&fself, next_job) == resume_result::actor_done) {
auto pending = fetch_pending(); bool hidden = job->is_hidden();
bool hidden = job->is_hidden(); job->deref();
job->deref(); //std::atomic_thread_fence(std::memory_order_seq_cst);
std::atomic_thread_fence(std::memory_order_seq_cst); if (!hidden) dec_actor_count();
if (!hidden) dec_actor_count(); }
job = pending; if (next_job) {
break; job = static_cast<job_ptr>(next_job.get());
} //get_scheduler()->printer()->enqueue(job, make_any_tuple("fast-forwarded execution (chained actor)\n"));
case resume_result::actor_blocked: {
job = fetch_pending();
}
} }
else job = nullptr;
} }
while (job); while (job); // loops until next_job was nullptr
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment