Commit 82ae3d32 authored by neverlord's avatar neverlord

bugfixing

parent 136c5494
......@@ -35,14 +35,14 @@
#include "utility.hpp"
#include "boost/threadpool.hpp"
//#include "boost/threadpool.hpp"
#include "cppa/cppa.hpp"
#include "cppa/fsm_actor.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/yielding_actor.hpp"
/*
namespace cppa { namespace detail {
struct pool_job
......@@ -107,6 +107,7 @@ class boost_threadpool_scheduler : public scheduler
};
} } // namespace cppa::detail
*/
using std::cout;
using std::cerr;
......@@ -367,15 +368,17 @@ enum mode_type { event_based, fiber_based };
int main(int argc, char** argv)
{
announce<factors>();
if (argc != 6 && argc != 7) usage();
if (argc != 6) usage();
auto iter = argv;
++iter; // argv[0] (app name)
/*
if (argc == 7)
{
if (strcmp(*iter++, "--boost_pool") == 0)
cppa::set_scheduler(new cppa::detail::boost_threadpool_scheduler);
else usage();
}
*/
mode_type mode;
std::string mode_str = *iter++;
if (mode_str == "event-based") mode = event_based;
......
......@@ -78,17 +78,15 @@ class yielding_actor : public abstract_scheduled_actor
template<typename Fun>
void dequeue_impl(Fun rm_fun)
{
auto iter = m_mailbox.cache().remove_first(rm_fun);
auto mbox_end = m_mailbox.cache().end();
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end)
{
yield_until_not_empty();
iter = m_mailbox.try_fetch_more();
if (iter != mbox_end)
{
iter = m_mailbox.cache().remove_first(rm_fun, iter);
}
iter = std::find_if(m_mailbox.try_fetch_more(), mbox_end, rm_fun);
}
mbox_cache.erase(iter);
}
};
......
......@@ -31,13 +31,12 @@
#ifndef SINGLE_READER_QUEUE_HPP
#define SINGLE_READER_QUEUE_HPP
#include <list>
#include <atomic>
#include <memory>
#include "cppa/detail/thread.hpp"
#include "cppa/intrusive/singly_linked_list.hpp"
namespace cppa { namespace intrusive {
/**
......@@ -53,16 +52,17 @@ class single_reader_queue
public:
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
typedef std::unique_ptr<value_type> unique_pointer;
typedef singly_linked_list<value_type> cache_type;
typedef typename cache_type::iterator cache_iterator;
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
typedef std::unique_ptr<value_type> unique_pointer;
typedef std::list<unique_pointer> cache_type;
typedef typename cache_type::iterator cache_iterator;
/**
* @warning call only from the reader (owner)
......@@ -233,18 +233,20 @@ class single_reader_queue
{
// temporary list to convert LIFO to FIFO order
cache_type tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e)
{
// next iteration element
pointer next = e->next;
// insert e to private cache (convert to LIFO order)
tmp.push_front(e);
tmp.push_front(unique_pointer{e});
//m_cache.insert(iter, unique_value_ptr{e});
// next iteration
e = next;
}
if (iter) *iter = m_cache.before_end();
m_cache.splice_after(m_cache.before_end(), std::move(tmp));
if (iter) *iter = tmp.begin();
m_cache.splice(m_cache.end(), tmp);
return true;
}
// next iteration
......@@ -257,9 +259,11 @@ class single_reader_queue
{
if (!m_cache.empty() || fetch_new_data())
{
return unique_pointer{m_cache.take_after(m_cache.before_begin())};
auto result = std::move(m_cache.front());
m_cache.pop_front();
return std::move(result);
}
return {};
return nullptr;
}
};
......
......@@ -40,7 +40,7 @@ namespace cppa {
abstract_event_based_actor::abstract_event_based_actor()
: super(abstract_event_based_actor::blocked)
{
m_mailbox_pos = m_mailbox.cache().before_begin();
m_mailbox_pos = m_mailbox.cache().end();
}
void abstract_event_based_actor::dequeue(behavior&)
......@@ -55,6 +55,7 @@ void abstract_event_based_actor::dequeue(partial_function&)
bool abstract_event_based_actor::handle_message(queue_node& node)
{
CPPA_REQUIRE(m_loop_stack.empty() == false);
auto& bhvr = *(m_loop_stack.back());
if (bhvr.timeout().valid())
{
......@@ -88,7 +89,7 @@ bool abstract_event_based_actor::handle_message(queue_node& node)
void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{
self.set(this);
auto done_cb = [&]()
auto done_cb = [this, callback]()
{
m_state.store(abstract_scheduled_actor::done);
while (!m_loop_stack.empty()) m_loop_stack.pop_back();
......@@ -101,16 +102,20 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
done_cb();
return;
}
auto mbox_end = m_mailbox.cache().end();
auto rm_fun = [this](queue_node& node) { return handle_message(node); };
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto rm_fun = [this](queue_node_ptr& ptr) { return handle_message(*ptr); };
try
{
for (;;)
{
auto iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox_pos);
if (iter == mbox_end)
if (m_loop_stack.empty())
{
done_cb();
return;
}
while (m_mailbox_pos == mbox_end)
{
m_mailbox_pos = m_mailbox.cache().before_end();
// try fetch more
if (m_mailbox.can_fetch_more() == false)
{
......@@ -135,18 +140,13 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
};
}
}
m_mailbox.try_fetch_more();
}
else if (m_loop_stack.empty())
{
cleanup(exit_reason::normal);
done_cb();
return;
m_mailbox_pos = m_mailbox.try_fetch_more();
}
else
m_mailbox_pos = std::find_if(m_mailbox_pos, mbox_end, rm_fun);
if (m_mailbox_pos != mbox_end)
{
// prepare for next receive
m_mailbox_pos = m_mailbox.cache().before_begin();
mbox_cache.erase(m_mailbox_pos);
m_mailbox_pos = mbox_cache.begin();
}
}
}
......
......@@ -70,13 +70,15 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
void converted_thread_context::dequeue(partial_function& rules) /*override*/
{
auto rm_fun = [&](queue_node& node) { return dq(node, rules); };
auto iter = m_mailbox.cache().remove_first(rm_fun);
auto mbox_end = m_mailbox.cache().end();
auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, rules); };
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end)
{
iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox.fetch_more());
iter = std::find_if(m_mailbox.fetch_more(), mbox_end, rm_fun);
}
mbox_cache.erase(iter);
}
void converted_thread_context::dequeue(behavior& rules) /*override*/
......@@ -85,22 +87,24 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/
{
auto timeout = now();
timeout += rules.timeout();
auto rm_fun = [&](queue_node& node)
auto rm_fun = [&](queue_node_ptr& node)
{
return dq(node, rules.get_partial_function());
return dq(*node, rules.get_partial_function());
};
auto iter = m_mailbox.cache().remove_first(rm_fun);
auto mbox_end = m_mailbox.cache().end();
auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end)
{
iter = m_mailbox.try_fetch_more(timeout);
if (iter == mbox_end)
auto next = m_mailbox.try_fetch_more(timeout);
if (next == mbox_end)
{
rules.handle_timeout();
return;
}
iter = m_mailbox.cache().remove_first(rm_fun, iter);
iter = std::find_if(next, mbox_end, rm_fun);
}
mbox_cache.erase(iter);
}
else
{
......
......@@ -98,7 +98,7 @@ void yielding_actor::yield_until_not_empty()
void yielding_actor::dequeue(partial_function& fun)
{
auto rm_fun = [&](queue_node& node) { return dq(node, fun) == dq_done; };
auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, fun) == dq_done; };
dequeue_impl(rm_fun);
}
......@@ -107,9 +107,9 @@ void yielding_actor::dequeue(behavior& bhvr)
if (bhvr.timeout().valid())
{
request_timeout(bhvr.timeout());
auto rm_fun = [&](queue_node& node) -> bool
auto rm_fun = [&](queue_node_ptr& node) -> bool
{
switch (dq(node, bhvr.get_partial_function()))
switch (dq(*node, bhvr.get_partial_function()))
{
case dq_timeout_occured:
bhvr.handle_timeout();
......
......@@ -145,6 +145,14 @@ int main(int argc, char** argv)
return 0;
//*/
/*
auto nao = remote_actor("192.168.1.148", 12000);
send(nao, atom("speak"), "i am an actor! seriously!");
return 0;
*/
auto args = get_kv_pairs(argc, argv);
match_each(args)
(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment