Commit 136c5494 authored by neverlord's avatar neverlord

maintenance

parent 9916b74e
......@@ -231,17 +231,20 @@ class single_reader_queue
{
if (m_stack.compare_exchange_weak(e, 0))
{
auto insert_pos = m_cache.before_end();
// temporary list to convert LIFO to FIFO order
cache_type tmp;
while (e)
{
// next iteration element
pointer next = e->next;
// insert e to private cache (convert to LIFO order)
m_cache.insert_after(insert_pos, e);
tmp.push_front(e);
//m_cache.insert(iter, unique_value_ptr{e});
// next iteration
e = next;
}
if (iter) *iter = insert_pos;
if (iter) *iter = m_cache.before_end();
m_cache.splice_after(m_cache.before_end(), std::move(tmp));
return true;
}
// next iteration
......
......@@ -208,6 +208,7 @@ class singly_linked_list
{
if (next == m_tail) m_tail = pos.ptr();
pos->next = next->next;
next->next = nullptr;
}
return next;
}
......
......@@ -103,36 +103,45 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
}
auto mbox_end = m_mailbox.cache().end();
auto rm_fun = [this](queue_node& node) { return handle_message(node); };
for (;;)
{
try
{
for (;;)
{
auto iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox_pos);
if (iter == mbox_end)
{
// try fetch more
m_mailbox_pos = m_mailbox.cache().before_end();
// try fetch more
if (m_mailbox.can_fetch_more() == false)
{
m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER();
if ( m_mailbox.can_fetch_more()
|| compare_exchange_state(abstract_scheduled_actor::about_to_block,
abstract_scheduled_actor::blocked ) != abstract_scheduled_actor::blocked)
if (m_mailbox.can_fetch_more() == false)
{
switch (compare_exchange_state(abstract_scheduled_actor::about_to_block,
abstract_scheduled_actor::blocked))
{
case abstract_scheduled_actor::ready:
{
// someone preempt us
m_mailbox.try_fetch_more();
break;
}
else
case abstract_scheduled_actor::blocked:
{
// try again next time
// done
return;
}
default: exit(7); // illegal state
};
}
}
else
{
m_mailbox.try_fetch_more();
}
else if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb();
return;
}
else
{
......@@ -140,25 +149,16 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
m_mailbox_pos = m_mailbox.cache().before_begin();
}
}
}
catch (actor_exited& what)
{
cleanup(what.reason());
done_cb();
return;
}
catch (...)
{
cleanup(exit_reason::unhandled_exception);
done_cb();
return;
}
if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb();
return;
}
}
}
void abstract_event_based_actor::on_exit()
......
......@@ -142,7 +142,7 @@ void abstract_scheduled_actor::request_timeout(util::duration const& d)
}
}
auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result
auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result
{
if ( msg.size() == 2
&& msg.type_at(0) == t_atom_ui32_types[0]
......@@ -194,11 +194,11 @@ auto abstract_scheduled_actor::dq(queue_node& node,
std::swap(m_last_sender, node.sender);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly
// make sure no timeout is handled incorrectly in a nested receive
++m_active_timeout_id;
// lifetime scope of qguard
{
// make sure nested received do not process this node again
// make sure nested receives do not process this node again
queue_node_guard qguard{&node};
// try to invoke given function
if (rules(m_last_dequeued))
......@@ -213,7 +213,7 @@ auto abstract_scheduled_actor::dq(queue_node& node,
return dq_done;
}
}
// no match (restore members)
// no match, restore members
--m_active_timeout_id;
std::swap(m_last_dequeued, node.msg);
std::swap(m_last_sender, node.sender);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment