Commit 136c5494 authored by neverlord's avatar neverlord

maintenance

parent 9916b74e
...@@ -231,17 +231,20 @@ class single_reader_queue ...@@ -231,17 +231,20 @@ class single_reader_queue
{ {
if (m_stack.compare_exchange_weak(e, 0)) if (m_stack.compare_exchange_weak(e, 0))
{ {
auto insert_pos = m_cache.before_end(); // temporary list to convert LIFO to FIFO order
cache_type tmp;
while (e) while (e)
{ {
// next iteration element // next iteration element
pointer next = e->next; pointer next = e->next;
// insert e to private cache (convert to LIFO order) // insert e to private cache (convert to LIFO order)
m_cache.insert_after(insert_pos, e); tmp.push_front(e);
//m_cache.insert(iter, unique_value_ptr{e});
// next iteration // next iteration
e = next; e = next;
} }
if (iter) *iter = insert_pos; if (iter) *iter = m_cache.before_end();
m_cache.splice_after(m_cache.before_end(), std::move(tmp));
return true; return true;
} }
// next iteration // next iteration
......
...@@ -208,6 +208,7 @@ class singly_linked_list ...@@ -208,6 +208,7 @@ class singly_linked_list
{ {
if (next == m_tail) m_tail = pos.ptr(); if (next == m_tail) m_tail = pos.ptr();
pos->next = next->next; pos->next = next->next;
next->next = nullptr;
} }
return next; return next;
} }
......
...@@ -103,36 +103,45 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -103,36 +103,45 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
} }
auto mbox_end = m_mailbox.cache().end(); auto mbox_end = m_mailbox.cache().end();
auto rm_fun = [this](queue_node& node) { return handle_message(node); }; auto rm_fun = [this](queue_node& node) { return handle_message(node); };
for (;;)
{
try try
{
for (;;)
{ {
auto iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox_pos); auto iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox_pos);
if (iter == mbox_end) if (iter == mbox_end)
{ {
// try fetch more
m_mailbox_pos = m_mailbox.cache().before_end(); m_mailbox_pos = m_mailbox.cache().before_end();
// try fetch more
if (m_mailbox.can_fetch_more() == false) if (m_mailbox.can_fetch_more() == false)
{ {
m_state.store(abstract_scheduled_actor::about_to_block); m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER(); CPPA_MEMORY_BARRIER();
if ( m_mailbox.can_fetch_more() if (m_mailbox.can_fetch_more() == false)
|| compare_exchange_state(abstract_scheduled_actor::about_to_block, {
abstract_scheduled_actor::blocked ) != abstract_scheduled_actor::blocked) switch (compare_exchange_state(abstract_scheduled_actor::about_to_block,
abstract_scheduled_actor::blocked))
{
case abstract_scheduled_actor::ready:
{ {
// someone preempt us // someone preempt us
m_mailbox.try_fetch_more(); break;
} }
else case abstract_scheduled_actor::blocked:
{ {
// try again next time // done
return; return;
} }
default: exit(7); // illegal state
};
}
} }
else
{
m_mailbox.try_fetch_more(); m_mailbox.try_fetch_more();
} }
else if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb();
return;
} }
else else
{ {
...@@ -140,25 +149,16 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -140,25 +149,16 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
m_mailbox_pos = m_mailbox.cache().before_begin(); m_mailbox_pos = m_mailbox.cache().before_begin();
} }
} }
}
catch (actor_exited& what) catch (actor_exited& what)
{ {
cleanup(what.reason()); cleanup(what.reason());
done_cb();
return;
} }
catch (...) catch (...)
{ {
cleanup(exit_reason::unhandled_exception); cleanup(exit_reason::unhandled_exception);
done_cb();
return;
} }
if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb(); done_cb();
return;
}
}
} }
void abstract_event_based_actor::on_exit() void abstract_event_based_actor::on_exit()
......
...@@ -142,7 +142,7 @@ void abstract_scheduled_actor::request_timeout(util::duration const& d) ...@@ -142,7 +142,7 @@ void abstract_scheduled_actor::request_timeout(util::duration const& d)
} }
} }
auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result
{ {
if ( msg.size() == 2 if ( msg.size() == 2
&& msg.type_at(0) == t_atom_ui32_types[0] && msg.type_at(0) == t_atom_ui32_types[0]
...@@ -194,11 +194,11 @@ auto abstract_scheduled_actor::dq(queue_node& node, ...@@ -194,11 +194,11 @@ auto abstract_scheduled_actor::dq(queue_node& node,
std::swap(m_last_sender, node.sender); std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg; //m_last_dequeued = node.msg;
//m_last_sender = node.sender; //m_last_sender = node.sender;
// make sure no timeout is handled incorrectly // make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id; ++m_active_timeout_id;
// lifetime scope of qguard // lifetime scope of qguard
{ {
// make sure nested received do not process this node again // make sure nested receives do not process this node again
queue_node_guard qguard{&node}; queue_node_guard qguard{&node};
// try to invoke given function // try to invoke given function
if (rules(m_last_dequeued)) if (rules(m_last_dequeued))
...@@ -213,7 +213,7 @@ auto abstract_scheduled_actor::dq(queue_node& node, ...@@ -213,7 +213,7 @@ auto abstract_scheduled_actor::dq(queue_node& node,
return dq_done; return dq_done;
} }
} }
// no match (restore members) // no match, restore members
--m_active_timeout_id; --m_active_timeout_id;
std::swap(m_last_dequeued, node.msg); std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender); std::swap(m_last_sender, node.sender);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment