Commit c7f82fad authored by Dominik Charousset's avatar Dominik Charousset

maintenance

parent 8cdb0a4f
...@@ -84,6 +84,10 @@ class converted_thread_context : public abstract_actor<local_actor> { ...@@ -84,6 +84,10 @@ class converted_thread_context : public abstract_actor<local_actor> {
nestable_receive_policy m_recv_policy; nestable_receive_policy m_recv_policy;
inline recursive_queue_node* receive_node() {
return m_mailbox.pop();
}
}; };
} } // namespace cppa::detail } } // namespace cppa::detail
......
...@@ -107,6 +107,13 @@ class nestable_receive_policy { ...@@ -107,6 +107,13 @@ class nestable_receive_policy {
return false; return false;
} }
template<class Client>
void receive(Client* client, partial_function& fun) {
if (invoke_from_cache(client, fun) == false) {
while (invoke(client, client->receive_node(), fun) == false) { }
}
}
private: private:
std::list<std::unique_ptr<recursive_queue_node> > m_cache; std::list<std::unique_ptr<recursive_queue_node> > m_cache;
...@@ -136,27 +143,26 @@ class nestable_receive_policy { ...@@ -136,27 +143,26 @@ class nestable_receive_policy {
return hm_success; return hm_success;
} }
case ordinary_message: { case ordinary_message: {
break; std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->push_timeout();
node.marked = true;
if (fun(client->m_last_dequeued)) {
client->m_last_dequeued.reset();
client->m_last_sender.reset();
return hm_success;
}
// no match (restore client members)
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->pop_timeout();
node.marked = false;
return hm_cache_msg;
} }
default: { default: {
CPPA_CRITICAL("illegal result of filter_msg"); CPPA_CRITICAL("illegal result of filter_msg");
} }
} }
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->push_timeout();
node.marked = true;
if (fun(client->m_last_dequeued)) {
client->m_last_dequeued.reset();
client->m_last_sender.reset();
return hm_success;
}
// no match (restore client members)
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->pop_timeout();
node.marked = false;
return hm_cache_msg;
} }
}; };
......
...@@ -78,20 +78,11 @@ class yielding_actor : public abstract_scheduled_actor { ...@@ -78,20 +78,11 @@ class yielding_actor : public abstract_scheduled_actor {
static void trampoline(void* _this); static void trampoline(void* _this);
void yield_until_not_empty();
util::fiber m_fiber; util::fiber m_fiber;
std::function<void()> m_behavior; std::function<void()> m_behavior;
nestable_receive_policy m_recv_policy; nestable_receive_policy m_recv_policy;
inline recursive_queue_node* receive_node() { recursive_queue_node* receive_node();
recursive_queue_node* e = m_mailbox.try_pop();
while (e == nullptr) {
yield_until_not_empty();
e = m_mailbox.try_pop();
}
return e;
}
}; };
......
...@@ -57,13 +57,7 @@ void converted_thread_context::enqueue(actor* sender, any_tuple msg) { ...@@ -57,13 +57,7 @@ void converted_thread_context::enqueue(actor* sender, any_tuple msg) {
} }
void converted_thread_context::dequeue(partial_function& fun) { // override void converted_thread_context::dequeue(partial_function& fun) { // override
if (m_recv_policy.invoke_from_cache(this, fun) == false) { m_recv_policy.receive(this, fun);
recursive_queue_node* e = m_mailbox.pop();
CPPA_REQUIRE(e->marked == false);
while (m_recv_policy.invoke(this, e, fun) == false) {
e = m_mailbox.pop();
}
}
} }
void converted_thread_context::dequeue(behavior& bhvr) { // override void converted_thread_context::dequeue(behavior& bhvr) { // override
......
...@@ -66,26 +66,30 @@ void yielding_actor::trampoline(void* ptr_arg) { ...@@ -66,26 +66,30 @@ void yielding_actor::trampoline(void* ptr_arg) {
reinterpret_cast<yielding_actor*>(ptr_arg)->run(); reinterpret_cast<yielding_actor*>(ptr_arg)->run();
} }
void yielding_actor::yield_until_not_empty() { recursive_queue_node* yielding_actor::receive_node() {
if (m_mailbox.can_fetch_more() == false) { recursive_queue_node* e = m_mailbox.try_pop();
m_state.store(abstract_scheduled_actor::about_to_block); while (e == nullptr) {
std::atomic_thread_fence(std::memory_order_seq_cst);
//CPPA_MEMORY_BARRIER();
// make sure mailbox is empty
if (m_mailbox.can_fetch_more() == false) { if (m_mailbox.can_fetch_more() == false) {
m_state.store(abstract_scheduled_actor::ready); m_state.store(abstract_scheduled_actor::about_to_block);
return; std::atomic_thread_fence(std::memory_order_seq_cst);
} //CPPA_MEMORY_BARRIER();
else { // make sure mailbox is empty
yield(yield_state::blocked); if (m_mailbox.can_fetch_more()) {
// someone preempt us => continue
m_state.store(abstract_scheduled_actor::ready);
}
else {
// wait until actor becomes rescheduled
yield(yield_state::blocked);
}
} }
e = m_mailbox.try_pop();
} }
return e;
} }
void yielding_actor::dequeue(partial_function& fun) { void yielding_actor::dequeue(partial_function& fun) {
if (m_recv_policy.invoke_from_cache(this, fun) == false) { m_recv_policy.receive(this, fun);
while (m_recv_policy.invoke(this, receive_node(), fun) == false) { }
}
} }
void yielding_actor::dequeue(behavior& bhvr) { void yielding_actor::dequeue(behavior& bhvr) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment