Commit c7f82fad authored by Dominik Charousset's avatar Dominik Charousset

maintenance

parent 8cdb0a4f
......@@ -84,6 +84,10 @@ class converted_thread_context : public abstract_actor<local_actor> {
nestable_receive_policy m_recv_policy;
inline recursive_queue_node* receive_node() {
return m_mailbox.pop();
}
};
} } // namespace cppa::detail
......
......@@ -107,6 +107,13 @@ class nestable_receive_policy {
return false;
}
template<class Client>
void receive(Client* client, partial_function& fun) {
if (invoke_from_cache(client, fun) == false) {
while (invoke(client, client->receive_node(), fun) == false) { }
}
}
private:
std::list<std::unique_ptr<recursive_queue_node> > m_cache;
......@@ -136,12 +143,6 @@ class nestable_receive_policy {
return hm_success;
}
case ordinary_message: {
break;
}
default: {
CPPA_CRITICAL("illegal result of filter_msg");
}
}
std::swap(client->m_last_dequeued, node.msg);
std::swap(client->m_last_sender, node.sender);
client->push_timeout();
......@@ -158,6 +159,11 @@ class nestable_receive_policy {
node.marked = false;
return hm_cache_msg;
}
default: {
CPPA_CRITICAL("illegal result of filter_msg");
}
}
}
};
......
......@@ -78,20 +78,11 @@ class yielding_actor : public abstract_scheduled_actor {
static void trampoline(void* _this);
void yield_until_not_empty();
util::fiber m_fiber;
std::function<void()> m_behavior;
nestable_receive_policy m_recv_policy;
inline recursive_queue_node* receive_node() {
recursive_queue_node* e = m_mailbox.try_pop();
while (e == nullptr) {
yield_until_not_empty();
e = m_mailbox.try_pop();
}
return e;
}
recursive_queue_node* receive_node();
};
......
......@@ -57,13 +57,7 @@ void converted_thread_context::enqueue(actor* sender, any_tuple msg) {
}
void converted_thread_context::dequeue(partial_function& fun) { // override
if (m_recv_policy.invoke_from_cache(this, fun) == false) {
recursive_queue_node* e = m_mailbox.pop();
CPPA_REQUIRE(e->marked == false);
while (m_recv_policy.invoke(this, e, fun) == false) {
e = m_mailbox.pop();
}
}
m_recv_policy.receive(this, fun);
}
void converted_thread_context::dequeue(behavior& bhvr) { // override
......
......@@ -66,26 +66,30 @@ void yielding_actor::trampoline(void* ptr_arg) {
reinterpret_cast<yielding_actor*>(ptr_arg)->run();
}
void yielding_actor::yield_until_not_empty() {
recursive_queue_node* yielding_actor::receive_node() {
recursive_queue_node* e = m_mailbox.try_pop();
while (e == nullptr) {
if (m_mailbox.can_fetch_more() == false) {
m_state.store(abstract_scheduled_actor::about_to_block);
std::atomic_thread_fence(std::memory_order_seq_cst);
//CPPA_MEMORY_BARRIER();
// make sure mailbox is empty
if (m_mailbox.can_fetch_more() == false) {
if (m_mailbox.can_fetch_more()) {
// someone preempt us => continue
m_state.store(abstract_scheduled_actor::ready);
return;
}
else {
// wait until actor becomes rescheduled
yield(yield_state::blocked);
}
}
e = m_mailbox.try_pop();
}
return e;
}
void yielding_actor::dequeue(partial_function& fun) {
if (m_recv_policy.invoke_from_cache(this, fun) == false) {
while (m_recv_policy.invoke(this, receive_node(), fun) == false) { }
}
m_recv_policy.receive(this, fun);
}
void yielding_actor::dequeue(behavior& bhvr) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment