Commit e7a5f306 authored by neverlord's avatar neverlord

RIP detail::intermediate

parent 6660002d
......@@ -30,7 +30,6 @@ libcppa_la_SOURCES = \
src/fiber.cpp \
src/group.cpp \
src/group_manager.cpp \
src/intermediate.cpp \
src/invokable.cpp \
src/local_actor.cpp \
src/mailman.cpp \
......@@ -108,7 +107,6 @@ nobase_library_include_HEADERS = \
cppa/detail/get_behavior.hpp \
cppa/detail/group_manager.hpp \
cppa/detail/implicit_conversions.hpp \
cppa/detail/intermediate.hpp \
cppa/detail/invokable.hpp \
cppa/detail/list_member.hpp \
cppa/detail/mailman.hpp \
......
......@@ -25,7 +25,6 @@ unit_testing/test__type_list.cpp
cppa/util/compare_tuples.hpp
cppa/get.hpp
cppa/detail/tdata.hpp
cppa/detail/intermediate.hpp
cppa/detail/invokable.hpp
cppa/on.hpp
unit_testing/test__serialization.cpp
......@@ -154,7 +153,6 @@ cppa/detail/yield_interface.hpp
src/yield_interface.cpp
cppa/detail/abstract_scheduled_actor.hpp
src/abstract_scheduled_actor.cpp
src/intermediate.cpp
src/invokable.cpp
cppa/detail/thread_pool_scheduler.hpp
src/thread_pool_scheduler.cpp
......
......@@ -64,17 +64,29 @@ class abstract_actor : public Base
struct queue_node
{
queue_node* next;
queue_node* next; // intrusive next pointer
bool marked; // denotes if this node is currently processed
actor_ptr sender;
any_tuple msg;
queue_node() : next(nullptr) { }
queue_node(actor* from, any_tuple&& content)
: next(nullptr), sender(from), msg(std::move(content)) { }
queue_node(actor* from, any_tuple const& content)
: next(nullptr), sender(from), msg(content) { }
queue_node() : next(nullptr), marked(false) { }
queue_node(actor* from, any_tuple content)
: next(nullptr), marked(false), sender(from), msg(std::move(content))
{
}
};
struct queue_node_guard
{
queue_node* m_node;
queue_node_guard(queue_node* ptr) : m_node(ptr) { ptr->marked = true; }
inline void release() { m_node = nullptr; }
~queue_node_guard() { if (m_node) m_node->marked = false; }
};
typedef std::unique_ptr<queue_node> queue_node_ptr;
typedef intrusive::single_reader_queue<queue_node> mailbox_type;
typedef typename mailbox_type::unique_value_ptr queue_node_ptr;
typedef typename mailbox_type::cache_type mailbox_cache_type;
typedef typename mailbox_cache_type::iterator queue_node_iterator;
bool attach(attachable* ptr) /*override*/
{
......@@ -173,7 +185,7 @@ class abstract_actor : public Base
protected:
intrusive::single_reader_queue<queue_node> m_mailbox;
mailbox_type m_mailbox;
template<typename T>
inline queue_node* fetch_node(actor* sender, T&& msg)
......
......@@ -52,7 +52,6 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
typedef detail::abstract_scheduled_actor super;
typedef super::queue_node queue_node;
typedef super::queue_node_buffer queue_node_buffer;
public:
......@@ -90,9 +89,11 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
typedef std::unique_ptr<behavior, detail::disablable_delete<behavior>>
stack_element;
queue_node_buffer m_buffer;
std::vector<stack_element> m_loop_stack;
// current position in mailbox
mailbox_cache_type::iterator m_mailbox_pos;
// provoke compiler errors for usage of receive() and related functions
/**
......@@ -136,7 +137,7 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
private:
void handle_message(queue_node_ptr& node);
bool handle_message(queue_node_iterator iter);
};
......
......@@ -85,11 +85,6 @@ class behavior
m_fun(value);
}
inline detail::intermediate* get_intermediate(any_tuple const& value)
{
return m_fun.get_intermediate(value);
}
inline partial_function& get_partial_function()
{
return m_fun;
......
......@@ -60,8 +60,9 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
delegate m_enqueue_to_scheduler;
typedef abstract_actor super;
typedef super::queue_node_guard queue_node_guard;
typedef super::queue_node queue_node;
typedef intrusive::singly_linked_list<queue_node> queue_node_buffer;
typedef super::queue_node_ptr queue_node_ptr;
enum dq_result
{
......@@ -80,9 +81,7 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
filter_result filter_msg(any_tuple const& msg);
dq_result dq(queue_node_ptr& node,
partial_function& rules,
queue_node_buffer& buffer);
auto dq(queue_node_iterator node, partial_function& rules) -> dq_result;
bool has_pending_timeout()
{
......
......@@ -95,9 +95,7 @@ class converted_thread_context : public abstract_actor<local_actor>
};
// returns true if node->msg was accepted by rules
bool dq(queue_node_ptr& node,
partial_function& rules,
queue_node_buffer& buffer);
bool dq(queue_node_iterator node, partial_function& rules);
throw_on_exit_result throw_on_exit(any_tuple const& msg);
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef INTERMEDIATE_HPP
#define INTERMEDIATE_HPP
#include <utility>
namespace cppa { namespace detail {
class intermediate
{
intermediate(intermediate const&) = delete;
intermediate& operator=(intermediate const&) = delete;
public:
intermediate() = default;
virtual ~intermediate();
virtual void invoke() = 0;
};
template<typename Impl, typename View = void>
class intermediate_impl : public intermediate
{
Impl m_impl;
View m_view;
public:
template<typename Arg0, typename Arg1>
intermediate_impl(Arg0&& impl, Arg1&& view)
: intermediate()
, m_impl(std::forward<Arg0>(impl))
, m_view(std::forward<Arg1>(view))
{
}
virtual void invoke() /*override*/
{
m_impl(m_view);
}
};
template<typename Impl>
class intermediate_impl<Impl, void> : public intermediate
{
Impl m_impl;
public:
intermediate_impl(Impl const& impl) : m_impl(impl) { }
intermediate_impl(Impl&& impl) : m_impl(std::move(impl)) { }
virtual void invoke() /*override*/
{
m_impl();
}
};
} } // namespace cppa::detail
#endif // INTERMEDIATE_HPP
......@@ -45,7 +45,6 @@
#include "cppa/util/callable_trait.hpp"
#include "cppa/detail/matches.hpp"
#include "cppa/detail/intermediate.hpp"
namespace cppa { namespace detail {
......@@ -72,88 +71,6 @@ class invokable
// Suppress type checking.
virtual bool unsafe_invoke(any_tuple& value) const;
// Prepare this invokable.
virtual intermediate* get_intermediate(any_tuple& value);
// Prepare this invokable and suppress type checking.
virtual intermediate* get_unsafe_intermediate(any_tuple& value);
};
/*
* @tparam Fun Function or functor
* @tparam FunArgs Type list of functor parameters *without* any qualifiers
*/
template<typename Fun, class FunArgs, class TupleTypes>
struct iimpl : intermediate
{
typedef Fun functor_type;
typedef typename cow_tuple_from_type_list<TupleTypes>::type tuple_type;
functor_type m_fun;
tuple_type m_default_args;
tuple_type m_args;
template<typename F> iimpl(F&& fun) : m_fun(std::forward<F>(fun)) { }
void invoke()
{
util::apply_tuple(m_fun, m_args);
// "forget" arguments after invocation
m_args = m_default_args;
}
inline intermediate* prepare(option<tuple_type>&& tup)
{
if (tup)
{
m_args = std::move(*tup);
return this;
}
return nullptr;
}
inline bool operator()(option<tuple_type>&& tup) const
{
if (tup)
{
util::apply_tuple(m_fun, *tup);
return true;
}
return false;
}
};
template<typename Fun, class TupleTypes>
struct iimpl<Fun, util::type_list<>, TupleTypes> : intermediate
{
typedef Fun functor_type;
functor_type m_fun;
template<typename F> iimpl(F&& fun) : m_fun(std::forward<F>(fun)) { }
void invoke() { m_fun(); }
inline intermediate* prepare(bool result) // result passed by policy
{
return result ? this : nullptr;
}
inline bool operator()(bool result) const // result passed by policy
{
if (result) m_fun();
return result;
}
};
template<typename Fun, class TupleTypes>
struct iimpl<Fun, util::type_list<any_tuple>, TupleTypes> : intermediate
{
typedef Fun functor_type;
functor_type m_fun;
any_tuple m_arg;
template<typename F> iimpl(F&& fun) : m_fun(std::forward<F>(fun)) { }
void invoke() { m_fun(m_arg); m_arg = any_tuple{}; }
inline intermediate* prepare(any_tuple arg)
{
m_arg = std::move(arg);
return this;
}
inline bool operator()(any_tuple arg) const
{
m_fun(arg);
return true;
}
};
enum mapping_policy
......@@ -177,13 +94,17 @@ struct pattern_policy
{
return matches(value, m_pattern);
}
any_tuple map(any_tuple& value) const
template<typename Fun>
bool call(Fun const& fun, any_tuple& value) const
{
return std::move(value);
fun(value);
return true;
}
any_tuple map_unsafe(any_tuple& value) const
template<typename Fun>
bool call_unsafe(Fun const& fun, any_tuple& value) const
{
return std::move(value);
fun(value);
return true;
}
};
......@@ -201,16 +122,27 @@ struct pattern_policy<map_to_option, Pattern>
{
return matches(value, m_pattern);
}
auto map(any_tuple& value) const
-> decltype(moving_tuple_cast(value, m_pattern))
template<typename Fun>
bool call(Fun const& fun, any_tuple& value) const
{
auto result = moving_tuple_cast(value, m_pattern);
return result;
if (result)
{
util::apply_tuple(fun, *result);
return true;
}
return false;
}
auto map_unsafe(any_tuple& value) const
-> decltype(unsafe_tuple_cast(value, m_pattern))
template<typename Fun>
bool call_unsafe(Fun const& fun, any_tuple& value) const
{
auto result = unsafe_tuple_cast(value, m_pattern);
if (result)
{
return unsafe_tuple_cast(value, m_pattern);
util::apply_tuple(fun, *result);
return true;
}
return false;
}
};
......@@ -228,13 +160,25 @@ struct pattern_policy<map_to_bool, Pattern>
{
return matches(value, m_pattern);
}
bool map(any_tuple& value) const
template<typename Fun>
bool call(Fun const& fun, any_tuple& value) const
{
if (could_invoke(value))
{
return could_invoke(value);
fun();
return true;
}
bool map_unsafe(any_tuple& value) const
return false;
}
template<typename Fun>
bool call_unsafe(Fun const& fun, any_tuple& value) const
{
return could_invoke(value);
if (could_invoke(value))
{
fun();
return true;
}
return false;
}
};
......@@ -248,38 +192,42 @@ struct dummy_policy
{
return true;
}
inline any_tuple map(any_tuple& value) const
template<typename Fun>
inline bool call(Fun const& fun, any_tuple&) const
{
return std::move(value);
fun();
return true;
}
inline any_tuple map_unsafe(any_tuple& value) const
template<typename Fun>
inline bool call_unsafe(Fun const& fun, any_tuple&) const
{
return std::move(value);
fun();
return true;
}
};
template<class IntermediateImpl, class Policy>
template<typename Fun, class Policy>
struct invokable_impl : public invokable
{
IntermediateImpl m_ii;
Fun m_fun;
Policy m_policy;
template<typename Arg0, typename... Args>
invokable_impl(Arg0&& arg0, Args&&... args)
: m_ii(std::forward<Arg0>(arg0))
: m_fun(std::forward<Arg0>(arg0))
, m_policy(std::forward<Args>(args)...)
{
}
bool invoke(any_tuple& value) const
{
return m_ii(m_policy.map(value));
return m_policy.call(m_fun, value);
}
bool unsafe_invoke(any_tuple& value) const
{
return m_ii(m_policy.map_unsafe(value));
return m_policy.call_unsafe(m_fun, value);
}
bool types_match(any_tuple const& value) const
......@@ -292,16 +240,6 @@ struct invokable_impl : public invokable
return m_policy.could_invoke(value);
}
intermediate* get_intermediate(any_tuple& value)
{
return m_ii.prepare(m_policy.map(value));
}
intermediate* get_unsafe_intermediate(any_tuple& value)
{
return m_ii.prepare(m_policy.map_unsafe(value));
}
};
template<class ArgTypes>
......@@ -339,8 +277,7 @@ struct select_invokable_impl
typedef typename util::tl_apply<qualified_arg_types, util::rm_ref>::type
arg_types;
static constexpr mapping_policy mp = get_mapping_policy<arg_types>();
typedef invokable_impl<iimpl<Fun, arg_types, typename filtered<Pattern>::types>,
pattern_policy<mp, Pattern> > type;
typedef invokable_impl<Fun, pattern_policy<mp, Pattern> > type;
};
template<typename Fun>
......@@ -354,8 +291,7 @@ struct select_invokable_impl<Fun, pattern<anything> >
static_assert( arg_types::size == 0
|| std::is_same<any_tuple, arg0>::value,
"bad signature");
typedef invokable_impl<iimpl<Fun, arg_types, util::type_list<> >,
dummy_policy> type;
typedef invokable_impl<Fun, dummy_policy> type;
};
template<class Pattern, typename Fun>
......
......@@ -52,7 +52,6 @@ class yielding_actor : public abstract_scheduled_actor
typedef abstract_scheduled_actor super;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
typedef super::queue_node_buffer queue_node_buffer;
util::fiber m_fiber;
scheduled_actor* m_behavior;
......@@ -77,9 +76,9 @@ class yielding_actor : public abstract_scheduled_actor
~yielding_actor() /*override*/;
void dequeue(behavior& rules) /*override*/;
void dequeue(behavior& bhvr) /*override*/;
void dequeue(partial_function& rules) /*override*/;
void dequeue(partial_function& fun) /*override*/;
void resume(util::fiber* from, resume_callback* callback) /*override*/;
......
......@@ -31,10 +31,11 @@
#ifndef SINGLE_READER_QUEUE_HPP
#define SINGLE_READER_QUEUE_HPP
#include <list>
#include <atomic>
#include <memory>
#include "cppa/detail/thread.hpp"
#include "cppa/intrusive/singly_linked_list.hpp"
namespace cppa { namespace intrusive {
......@@ -59,7 +60,9 @@ class single_reader_queue
typedef value_type* pointer;
typedef value_type const* const_pointer;
typedef singly_linked_list<value_type> cache_type;
typedef std::unique_ptr<value_type> unique_value_ptr;
typedef std::list<unique_value_ptr> cache_type;
typedef typename cache_type::iterator cache_iterator;
/**
* @warning call only from the reader (owner)
......@@ -87,22 +90,6 @@ class single_reader_queue
return (timed_wait_for_data(abs_time)) ? take_head() : nullptr;
}
/**
* @warning call only from the reader (owner)
*/
void push_front(pointer element)
{
m_cache.push_front(element);
}
/**
* @warning call only from the reader (owner)
*/
void push_front(cache_type&& list)
{
m_cache.splice_after(m_cache.before_begin(), std::move(list));
}
// returns true if the queue was empty
bool _push_back(pointer new_element)
{
......@@ -144,6 +131,11 @@ class single_reader_queue
inline cache_type& cache() { return m_cache; }
inline bool can_fetch_more() const
{
return m_stack.load() != nullptr;
}
/**
* @warning call only from the reader (owner)
*/
......@@ -170,6 +162,29 @@ class single_reader_queue
(void) fetch_new_data();
}
cache_iterator try_fetch_more()
{
cache_iterator result = m_cache.end();
fetch_new_data(&result);
return result;
}
template<typename TimePoint>
cache_iterator try_fetch_more(TimePoint const& abs_time)
{
cache_iterator result = m_cache.end();
if (timed_wait_for_data(abs_time)) fetch_new_data(&result);
return result;
}
cache_iterator fetch_more()
{
cache_iterator result = m_cache.end();
wait_for_data();
fetch_new_data(&result);
return result;
}
private:
// exposed to "outside" access
......@@ -209,15 +224,15 @@ class single_reader_queue
}
// atomically sets m_stack to nullptr and enqueues all elements to the cache
bool fetch_new_data()
bool fetch_new_data(cache_iterator* iter = nullptr)
{
pointer e = m_stack.load();
while (e)
{
if (m_stack.compare_exchange_weak(e, 0))
{
// iterator to the last element before insertions begin
auto iter = m_cache.before_end();
// temporary list to convert LIFO to FIFO order
cache_type tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
......@@ -225,10 +240,13 @@ class single_reader_queue
// next iteration element
pointer next = e->next;
// insert e to private cache (convert to LIFO order)
m_cache.insert_after(iter, e);
tmp.push_front(unique_value_ptr{e});
//m_cache.insert(iter, unique_value_ptr{e});
// next iteration
e = next;
}
if (iter) *iter = tmp.begin();
m_cache.splice(m_cache.end(), tmp);
return true;
}
// next iteration
......@@ -239,9 +257,12 @@ class single_reader_queue
pointer take_head()
{
if (m_cache.not_empty() || fetch_new_data())
if (!m_cache.empty() || fetch_new_data())
{
return m_cache.take_after(m_cache.before_begin());
auto result = m_cache.front().release();
m_cache.pop_front();
return result;
//return m_cache.take_after(m_cache.before_begin());
}
return nullptr;
}
......
......@@ -65,12 +65,10 @@ class partial_function
bool defined_at(any_tuple const& value);
void operator()(any_tuple value);
bool operator()(any_tuple value);
detail::invokable const* definition_at(any_tuple value);
detail::intermediate* get_intermediate(any_tuple value);
template<class... Args>
partial_function& splice(partial_function&& arg0, Args&&... args)
{
......
......@@ -28,6 +28,9 @@
\******************************************************************************/
#include <iostream>
#include "cppa/to_string.hpp"
#include "cppa/self.hpp"
#include "cppa/detail/invokable.hpp"
#include "cppa/abstract_event_based_actor.hpp"
......@@ -37,6 +40,7 @@ namespace cppa {
abstract_event_based_actor::abstract_event_based_actor()
: super(abstract_event_based_actor::blocked)
{
m_mailbox_pos = m_mailbox.cache().begin();
}
void abstract_event_based_actor::dequeue(behavior&)
......@@ -49,12 +53,12 @@ void abstract_event_based_actor::dequeue(partial_function&)
quit(exit_reason::unallowed_function_call);
}
void abstract_event_based_actor::handle_message(queue_node_ptr& node)
bool abstract_event_based_actor::handle_message(queue_node_iterator iter)
{
auto& bhvr = *(m_loop_stack.back());
if (bhvr.timeout().valid())
{
switch (dq(node, bhvr.get_partial_function(), m_buffer))
switch (dq(iter, bhvr.get_partial_function()))
{
case dq_timeout_occured:
{
......@@ -70,15 +74,14 @@ void abstract_event_based_actor::handle_message(queue_node_ptr& node)
auto& next_bhvr = *(m_loop_stack.back());
request_timeout(next_bhvr.timeout());
}
break;
return true;
}
default: break;
default: return false;
}
}
else
{
// no need to handle result
(void) dq(node, bhvr.get_partial_function(), m_buffer);
return dq(iter, bhvr.get_partial_function()) == dq_done;
}
}
......@@ -92,22 +95,53 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
on_exit();
callback->exec_done();
};
queue_node_ptr node;
if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb();
return;
}
auto mbox_end = m_mailbox.cache().end();
for (;;)
//do
{
while (m_mailbox_pos != mbox_end)
//for ( ; m_mailbox_pos != mbox_end; ++m_mailbox_pos)
{
try
{
if (handle_message(m_mailbox_pos))
{
m_mailbox_pos = m_mailbox.cache().erase(m_mailbox_pos);
}
else
{
++m_mailbox_pos;
}
}
catch (actor_exited& what)
{
cleanup(what.reason());
done_cb();
return;
}
catch (...)
{
cleanup(exit_reason::unhandled_exception);
done_cb();
return;
}
if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb();
return;
}
else if (m_mailbox.empty())
}
if (m_mailbox.can_fetch_more() == false)
{
m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER();
if (!m_mailbox.empty())
if (m_mailbox.can_fetch_more())
{
// someone preempt us
m_state.store(abstract_scheduled_actor::ready);
......@@ -120,7 +154,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{
case abstract_scheduled_actor::ready:
{
// got a new job
// someone preempt us
break;
}
case abstract_scheduled_actor::blocked:
......@@ -132,25 +166,8 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
};
}
}
node.reset(m_mailbox.pop());
try
{
handle_message(node);
}
catch (actor_exited& what)
{
cleanup(what.reason());
done_cb();
return;
}
catch (...)
{
cleanup(exit_reason::unhandled_exception);
done_cb();
return;
}
m_mailbox_pos = m_mailbox.try_fetch_more();
}
//while (callback->still_ready());
}
void abstract_event_based_actor::on_exit()
......
......@@ -163,10 +163,11 @@ auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result
return ordinary_message;
}
auto abstract_scheduled_actor::dq(queue_node_ptr& node,
partial_function& rules,
queue_node_buffer& buffer) -> dq_result
auto abstract_scheduled_actor::dq(queue_node_iterator iter,
partial_function& rules) -> dq_result
{
auto& node = *iter;
if (node->marked) return dq_indeterminate;
switch (filter_msg(node->msg))
{
case normal_exit_signal:
......@@ -179,47 +180,31 @@ auto abstract_scheduled_actor::dq(queue_node_ptr& node,
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request = false;
// restore mailbox before calling client
if (!buffer.empty())
{
m_mailbox.push_front(std::move(buffer));
}
return dq_timeout_occured;
}
default: break;
}
auto imd = rules.get_intermediate(node->msg);
if (imd)
{
m_last_dequeued = std::move(node->msg);
m_last_sender = std::move(node->sender);
// restore mailbox before invoking imd
if (!buffer.empty())
m_last_dequeued = node->msg;
m_last_sender = node->sender;
// make sure no timeout is handled incorrectly
++m_active_timeout_id;
// lifetime scope of qguard
{
m_mailbox.push_front(std::move(buffer));
}
// expire pending request
if (m_has_pending_timeout_request)
// make sure nested received do not process this node again
queue_node_guard qguard{node.get()};
// try to invoke given function
if (rules(node->msg))
{
++m_active_timeout_id;
// client erases node later (keep it marked until it's removed)
qguard.release();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request = false;
}
imd->invoke();
return dq_done;
}
else
{
/*
std::string err_msg = "unhandled message in actor ";
err_msg += std::to_string(id());
err_msg += ": ";
err_msg += to_string(node->msg);
err_msg += "\n";
cout << err_msg;
*/
buffer.push_back(node.release());
return dq_indeterminate;
}
// no match
--m_active_timeout_id;
return dq_indeterminate;
}
// dummy
......
......@@ -35,7 +35,6 @@
#include "cppa/exception.hpp"
#include "cppa/detail/matches.hpp"
#include "cppa/detail/invokable.hpp"
#include "cppa/detail/intermediate.hpp"
#include "cppa/detail/converted_thread_context.hpp"
namespace cppa { namespace detail {
......@@ -71,11 +70,15 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
void converted_thread_context::dequeue(partial_function& rules) /*override*/
{
queue_node_buffer buffer;
queue_node_ptr node(m_mailbox.pop());
while (dq(node, rules, buffer) == false)
auto iter = m_mailbox.cache().begin();
auto mbox_end = m_mailbox.cache().end();
for (;;)
{
node.reset(m_mailbox.pop());
for ( ; iter != mbox_end; ++iter)
{
if (dq(iter, rules)) return;
}
iter = m_mailbox.fetch_more();
}
}
......@@ -85,22 +88,21 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/
{
auto timeout = now();
timeout += rules.timeout();
queue_node_buffer buffer;
queue_node_ptr node(m_mailbox.try_pop());
auto iter = m_mailbox.cache().begin();
auto mbox_end = m_mailbox.cache().end();
do
{
if (!node)
if (iter == mbox_end)
{
node.reset(m_mailbox.try_pop(timeout));
if (!node)
iter = m_mailbox.try_fetch_more(timeout);
if (iter == mbox_end)
{
if (!buffer.empty()) m_mailbox.push_front(std::move(buffer));
rules.handle_timeout();
return;
}
}
}
while (dq(node, rules.get_partial_function(), buffer) == false);
while (dq(iter, rules.get_partial_function()) == false);
}
else
{
......@@ -127,29 +129,27 @@ converted_thread_context::throw_on_exit(any_tuple const& msg)
return not_an_exit_signal;
}
bool converted_thread_context::dq(queue_node_ptr& node,
partial_function& rules,
queue_node_buffer& buffer)
bool converted_thread_context::dq(queue_node_iterator iter,
partial_function& rules)
{
if (m_trap_exit == false && throw_on_exit(node->msg) == normal_exit_signal)
auto& node = *iter;
if ( m_trap_exit == false
&& throw_on_exit(node->msg) == normal_exit_signal)
{
return false;
}
auto imd = rules.get_intermediate(node->msg);
if (imd)
m_last_dequeued = node->msg;
m_last_sender = node->sender;
{
m_last_dequeued = std::move(node->msg);
m_last_sender = std::move(node->sender);
// restore mailbox before invoking imd
if (!buffer.empty()) m_mailbox.push_front(std::move(buffer));
imd->invoke();
queue_node_guard qguard{node.get()};
if (rules(node->msg))
{
qguard.release();
m_mailbox.cache().erase(iter);
return true;
}
else
{
buffer.push_back(node.release());
return false;
}
return false;
}
} } // namespace cppa::detail
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#include "cppa/detail/intermediate.hpp"
namespace cppa { namespace detail {
intermediate::~intermediate()
{
}
} } // namespace cppa::detail
......@@ -44,8 +44,4 @@ bool invokable::types_match(any_tuple const&) const { return false; }
bool invokable::could_invoke(any_tuple const&) const { return false; }
intermediate* invokable::get_intermediate(any_tuple&) { return 0; }
intermediate* invokable::get_unsafe_intermediate(any_tuple&) { return 0; }
} } // namespace cppa::detail
......@@ -93,18 +93,18 @@ auto partial_function::get_cache_entry(any_tuple const& value) -> cache_entry&
return i->second;
}
void partial_function::operator()(any_tuple value)
bool partial_function::operator()(any_tuple value)
{
using detail::invokable;
auto& v = get_cache_entry(value);
if (value.impl_type() == detail::tuple_impl_info::statically_typed)
{
std::any_of(v.begin(), v.end(),
return std::any_of(v.begin(), v.end(),
[&](invokable* i) { return i->unsafe_invoke(value); });
}
else
{
std::any_of(v.begin(), v.end(),
return std::any_of(v.begin(), v.end(),
[&](invokable* i) { return i->invoke(value); });
}
}
......@@ -126,24 +126,6 @@ bool partial_function::defined_at(any_tuple const& value)
return definition_at(value) != nullptr;
}
detail::intermediate* partial_function::get_intermediate(any_tuple value)
{
detail::intermediate* result = nullptr;
if (value.impl_type() == detail::tuple_impl_info::statically_typed)
{
for (auto& i : get_cache_entry(value))
if ((result = i->get_unsafe_intermediate(value)) != nullptr)
return result;
}
else
{
for (auto& i : get_cache_entry(value))
if ((result = i->get_intermediate(value)) != nullptr)
return result;
}
return nullptr;
}
behavior operator,(partial_function&& lhs, behavior&& rhs)
{
behavior bhvr{rhs.m_timeout, std::move(rhs.m_timeout_handler)};
......
......@@ -36,7 +36,6 @@
#include "cppa/cppa.hpp"
#include "cppa/self.hpp"
#include "cppa/detail/invokable.hpp"
#include "cppa/detail/intermediate.hpp"
namespace cppa { namespace detail {
......@@ -73,16 +72,13 @@ void yielding_actor::run(void* ptr_arg)
void yielding_actor::yield_until_not_empty()
{
while (m_mailbox.empty())
if (m_mailbox.can_fetch_more() == false)
{
m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER();
// make sure mailbox is empty
if (!m_mailbox.empty())
// make sure mailbox is 'empty'
if (m_mailbox.can_fetch_more() == false)
{
// someone preempt us
//compare_exchange_state(scheduled_actor::about_to_block,
// scheduled_actor::ready);
m_state.store(abstract_scheduled_actor::ready);
return;
}
......@@ -93,52 +89,58 @@ void yielding_actor::yield_until_not_empty()
}
}
void yielding_actor::dequeue(partial_function& rules)
void yielding_actor::dequeue(partial_function& fun)
{
queue_node_buffer buffer;
yield_until_not_empty();
queue_node_ptr node(m_mailbox.pop());
while (dq(node, rules, buffer) != dq_done)
auto iter = m_mailbox.cache().begin();
auto mbox_end = m_mailbox.cache().end();
for (;;)
{
for ( ; iter != mbox_end; ++iter)
{
if (dq(iter, fun) == dq_done)
{
m_mailbox.cache().erase(iter);
return;
}
}
yield_until_not_empty();
node.reset(m_mailbox.pop());
iter = m_mailbox.try_fetch_more();
}
}
void yielding_actor::dequeue(behavior& rules)
void yielding_actor::dequeue(behavior& bhvr)
{
if (rules.timeout().valid())
if (bhvr.timeout().valid())
{
queue_node_buffer buffer;
// try until a message was successfully dequeued
request_timeout(rules.timeout());
request_timeout(bhvr.timeout());
auto iter = m_mailbox.cache().begin();
auto mbox_end = m_mailbox.cache().end();
for (;;)
{
//if (m_mailbox.empty() && has_pending_timeout() == false)
//{
// request_timeout(rules.timeout());
//}
yield_until_not_empty();
queue_node_ptr node(m_mailbox.pop());
switch (dq(node, rules.get_partial_function(), buffer))
while (iter != mbox_end)
{
case dq_done:
switch (dq(iter, bhvr.get_partial_function()))
{
return;
}
case dq_timeout_occured:
{
rules.handle_timeout();
bhvr.handle_timeout();
// fall through
case dq_done:
iter = m_mailbox.cache().erase(iter);
return;
default:
++iter;
break;
}
default: break;
}
yield_until_not_empty();
iter = m_mailbox.try_fetch_more();
}
}
else
{
// suppress virtual function call
yielding_actor::dequeue(rules.get_partial_function());
yielding_actor::dequeue(bhvr.get_partial_function());
}
}
......@@ -199,4 +201,4 @@ void yielding_actor::resume(util::fiber* from, resume_callback* callback)
namespace { int keep_compiler_happy() { return 42; } }
#endif // CPPA_DISABLE_CONTEXT_SWITCHING
#endif // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
......@@ -130,7 +130,7 @@ size_t test__intrusive_containers()
tmp.push_back(iq.pop());
}
delete iq.pop();
iq.push_front(std::move(tmp));
/*iq.push_front(std::move(tmp));
CPPA_CHECK(tmp.empty());
CPPA_CHECK_EQUAL(std::distance(iq.cache().begin(), iq.cache().end()), 19);
std::unique_ptr<iint> iptr;
......@@ -146,6 +146,7 @@ size_t test__intrusive_containers()
CPPA_CHECK(iptr);
if (iptr) CPPA_CHECK_EQUAL(iptr->value, i);
}
*/
}
// two dummies
......
......@@ -74,25 +74,6 @@ void invoke_test(std::vector<any_tuple>& test_tuples, Testee& x)
}
}
inline detail::intermediate* get_i(partial_function& pf, any_tuple const& value)
{
return pf.get_intermediate(value);
}
template<class Testee>
void intermediate_test(std::vector<any_tuple>& test_tuples, Testee& x)
{
boost::progress_timer t;
for (int i = 0; i < 1000000; ++i)
{
for (auto& t : test_tuples)
{
auto i = get_i(x, t);
if (i) i->invoke();
}
}
}
size_t test__pattern()
{
CPPA_TEST(test__pattern);
......
......@@ -339,8 +339,12 @@ size_t test__spawn()
receive(after(std::chrono::seconds(1)) >> []() { });
CPPA_IF_VERBOSE(cout << "ok" << endl);
CPPA_IF_VERBOSE(cout << "testee1 & event_testee2 ... " << std::flush);
CPPA_IF_VERBOSE(cout << "testee1 ... " << std::flush);
spawn(testee1);
await_all_others_done();
CPPA_IF_VERBOSE(cout << "ok" << endl);
CPPA_IF_VERBOSE(cout << "event_testee2 ... " << std::flush);
spawn(event_testee2());
await_all_others_done();
CPPA_IF_VERBOSE(cout << "ok" << endl);
......
......@@ -18,7 +18,6 @@
#include "cppa/uniform_type_info.hpp"
#include "cppa/detail/invokable.hpp"
#include "cppa/detail/intermediate.hpp"
using std::cout;
using std::endl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment