Commit 1620a761 authored by neverlord's avatar neverlord

iterating over mailbox cache instead of using mailbox.pop()

parent e7a5f306
......@@ -186,6 +186,8 @@ class any_tuple
return any_tuple{simple_view(std::forward<T>(value), token)};
}
void reset();
private:
cow_ptr<detail::abstract_tuple> m_vals;
......
......@@ -89,7 +89,9 @@ class cow_ptr
return *this;
}
inline T* get() { return detached_ptr(); }
inline void reset(T* value = nullptr) { m_ptr.reset(value); }
inline T* get() { return (m_ptr) ? detached_ptr() : nullptr; }
inline T& operator*() { return *detached_ptr(); }
......
......@@ -184,8 +184,10 @@ auto abstract_scheduled_actor::dq(queue_node_iterator iter,
}
default: break;
}
m_last_dequeued = node->msg;
m_last_sender = node->sender;
std::swap(m_last_dequeued, node->msg);
std::swap(m_last_sender, node->sender);
//m_last_dequeued = node->msg;
//m_last_sender = node->sender;
// make sure no timeout is handled incorrectly
++m_active_timeout_id;
// lifetime scope of qguard
......@@ -193,17 +195,22 @@ auto abstract_scheduled_actor::dq(queue_node_iterator iter,
// make sure nested received do not process this node again
queue_node_guard qguard{node.get()};
// try to invoke given function
if (rules(node->msg))
if (rules(m_last_dequeued))
{
// client erases node later (keep it marked until it's removed)
qguard.release();
// this members are only valid during invocation
m_last_dequeued.reset();
m_last_sender.reset();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
return dq_done;
}
}
// no match
// no match (restore members)
--m_active_timeout_id;
std::swap(m_last_dequeued, node->msg);
std::swap(m_last_sender, node->sender);
return dq_indeterminate;
}
......
......@@ -66,6 +66,11 @@ any_tuple& any_tuple::operator=(any_tuple&& other)
return *this;
}
void any_tuple::reset()
{
m_vals.reset(s_empty_tuple());
}
size_t any_tuple::size() const
{
return m_vals->size();
......
......@@ -76,7 +76,11 @@ void converted_thread_context::dequeue(partial_function& rules) /*override*/
{
for ( ; iter != mbox_end; ++iter)
{
if (dq(iter, rules)) return;
if (dq(iter, rules))
{
m_mailbox.cache().erase(iter);
return;
}
}
iter = m_mailbox.fetch_more();
}
......@@ -103,6 +107,7 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/
}
}
while (dq(iter, rules.get_partial_function()) == false);
m_mailbox.cache().erase(iter);
}
else
{
......@@ -138,17 +143,22 @@ bool converted_thread_context::dq(queue_node_iterator iter,
{
return false;
}
m_last_dequeued = node->msg;
m_last_sender = node->sender;
std::swap(m_last_dequeued, node->msg);
std::swap(m_last_sender, node->sender);
{
queue_node_guard qguard{node.get()};
if (rules(node->msg))
if (rules(m_last_dequeued))
{
// client calls erase(iter)
qguard.release();
m_mailbox.cache().erase(iter);
m_last_dequeued.reset();
m_last_sender.reset();
return true;
}
}
// no match (restore members)
std::swap(m_last_dequeued, node->msg);
std::swap(m_last_sender, node->sender);
return false;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment