Commit 82ae3d32 authored by neverlord's avatar neverlord

bugfixing

parent 136c5494
...@@ -35,14 +35,14 @@ ...@@ -35,14 +35,14 @@
#include "utility.hpp" #include "utility.hpp"
#include "boost/threadpool.hpp" //#include "boost/threadpool.hpp"
#include "cppa/cppa.hpp" #include "cppa/cppa.hpp"
#include "cppa/fsm_actor.hpp" #include "cppa/fsm_actor.hpp"
#include "cppa/detail/mock_scheduler.hpp" #include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/yielding_actor.hpp" #include "cppa/detail/yielding_actor.hpp"
/*
namespace cppa { namespace detail { namespace cppa { namespace detail {
struct pool_job struct pool_job
...@@ -107,6 +107,7 @@ class boost_threadpool_scheduler : public scheduler ...@@ -107,6 +107,7 @@ class boost_threadpool_scheduler : public scheduler
}; };
} } // namespace cppa::detail } } // namespace cppa::detail
*/
using std::cout; using std::cout;
using std::cerr; using std::cerr;
...@@ -367,15 +368,17 @@ enum mode_type { event_based, fiber_based }; ...@@ -367,15 +368,17 @@ enum mode_type { event_based, fiber_based };
int main(int argc, char** argv) int main(int argc, char** argv)
{ {
announce<factors>(); announce<factors>();
if (argc != 6 && argc != 7) usage(); if (argc != 6) usage();
auto iter = argv; auto iter = argv;
++iter; // argv[0] (app name) ++iter; // argv[0] (app name)
/*
if (argc == 7) if (argc == 7)
{ {
if (strcmp(*iter++, "--boost_pool") == 0) if (strcmp(*iter++, "--boost_pool") == 0)
cppa::set_scheduler(new cppa::detail::boost_threadpool_scheduler); cppa::set_scheduler(new cppa::detail::boost_threadpool_scheduler);
else usage(); else usage();
} }
*/
mode_type mode; mode_type mode;
std::string mode_str = *iter++; std::string mode_str = *iter++;
if (mode_str == "event-based") mode = event_based; if (mode_str == "event-based") mode = event_based;
......
...@@ -78,17 +78,15 @@ class yielding_actor : public abstract_scheduled_actor ...@@ -78,17 +78,15 @@ class yielding_actor : public abstract_scheduled_actor
template<typename Fun> template<typename Fun>
void dequeue_impl(Fun rm_fun) void dequeue_impl(Fun rm_fun)
{ {
auto iter = m_mailbox.cache().remove_first(rm_fun); auto& mbox_cache = m_mailbox.cache();
auto mbox_end = m_mailbox.cache().end(); auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end) while (iter == mbox_end)
{ {
yield_until_not_empty(); yield_until_not_empty();
iter = m_mailbox.try_fetch_more(); iter = std::find_if(m_mailbox.try_fetch_more(), mbox_end, rm_fun);
if (iter != mbox_end)
{
iter = m_mailbox.cache().remove_first(rm_fun, iter);
}
} }
mbox_cache.erase(iter);
} }
}; };
......
...@@ -31,13 +31,12 @@ ...@@ -31,13 +31,12 @@
#ifndef SINGLE_READER_QUEUE_HPP #ifndef SINGLE_READER_QUEUE_HPP
#define SINGLE_READER_QUEUE_HPP #define SINGLE_READER_QUEUE_HPP
#include <list>
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
#include "cppa/intrusive/singly_linked_list.hpp"
namespace cppa { namespace intrusive { namespace cppa { namespace intrusive {
/** /**
...@@ -60,8 +59,9 @@ class single_reader_queue ...@@ -60,8 +59,9 @@ class single_reader_queue
typedef value_type const& const_reference; typedef value_type const& const_reference;
typedef value_type* pointer; typedef value_type* pointer;
typedef value_type const* const_pointer; typedef value_type const* const_pointer;
typedef std::unique_ptr<value_type> unique_pointer; typedef std::unique_ptr<value_type> unique_pointer;
typedef singly_linked_list<value_type> cache_type; typedef std::list<unique_pointer> cache_type;
typedef typename cache_type::iterator cache_iterator; typedef typename cache_type::iterator cache_iterator;
/** /**
...@@ -233,18 +233,20 @@ class single_reader_queue ...@@ -233,18 +233,20 @@ class single_reader_queue
{ {
// temporary list to convert LIFO to FIFO order // temporary list to convert LIFO to FIFO order
cache_type tmp; cache_type tmp;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while (e) while (e)
{ {
// next iteration element // next iteration element
pointer next = e->next; pointer next = e->next;
// insert e to private cache (convert to LIFO order) // insert e to private cache (convert to LIFO order)
tmp.push_front(e); tmp.push_front(unique_pointer{e});
//m_cache.insert(iter, unique_value_ptr{e}); //m_cache.insert(iter, unique_value_ptr{e});
// next iteration // next iteration
e = next; e = next;
} }
if (iter) *iter = m_cache.before_end(); if (iter) *iter = tmp.begin();
m_cache.splice_after(m_cache.before_end(), std::move(tmp)); m_cache.splice(m_cache.end(), tmp);
return true; return true;
} }
// next iteration // next iteration
...@@ -257,9 +259,11 @@ class single_reader_queue ...@@ -257,9 +259,11 @@ class single_reader_queue
{ {
if (!m_cache.empty() || fetch_new_data()) if (!m_cache.empty() || fetch_new_data())
{ {
return unique_pointer{m_cache.take_after(m_cache.before_begin())}; auto result = std::move(m_cache.front());
m_cache.pop_front();
return std::move(result);
} }
return {}; return nullptr;
} }
}; };
......
...@@ -40,7 +40,7 @@ namespace cppa { ...@@ -40,7 +40,7 @@ namespace cppa {
abstract_event_based_actor::abstract_event_based_actor() abstract_event_based_actor::abstract_event_based_actor()
: super(abstract_event_based_actor::blocked) : super(abstract_event_based_actor::blocked)
{ {
m_mailbox_pos = m_mailbox.cache().before_begin(); m_mailbox_pos = m_mailbox.cache().end();
} }
void abstract_event_based_actor::dequeue(behavior&) void abstract_event_based_actor::dequeue(behavior&)
...@@ -55,6 +55,7 @@ void abstract_event_based_actor::dequeue(partial_function&) ...@@ -55,6 +55,7 @@ void abstract_event_based_actor::dequeue(partial_function&)
bool abstract_event_based_actor::handle_message(queue_node& node) bool abstract_event_based_actor::handle_message(queue_node& node)
{ {
CPPA_REQUIRE(m_loop_stack.empty() == false);
auto& bhvr = *(m_loop_stack.back()); auto& bhvr = *(m_loop_stack.back());
if (bhvr.timeout().valid()) if (bhvr.timeout().valid())
{ {
...@@ -88,7 +89,7 @@ bool abstract_event_based_actor::handle_message(queue_node& node) ...@@ -88,7 +89,7 @@ bool abstract_event_based_actor::handle_message(queue_node& node)
void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{ {
self.set(this); self.set(this);
auto done_cb = [&]() auto done_cb = [this, callback]()
{ {
m_state.store(abstract_scheduled_actor::done); m_state.store(abstract_scheduled_actor::done);
while (!m_loop_stack.empty()) m_loop_stack.pop_back(); while (!m_loop_stack.empty()) m_loop_stack.pop_back();
...@@ -101,16 +102,20 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -101,16 +102,20 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
done_cb(); done_cb();
return; return;
} }
auto mbox_end = m_mailbox.cache().end(); auto& mbox_cache = m_mailbox.cache();
auto rm_fun = [this](queue_node& node) { return handle_message(node); }; auto mbox_end = mbox_cache.end();
auto rm_fun = [this](queue_node_ptr& ptr) { return handle_message(*ptr); };
try try
{ {
for (;;) for (;;)
{ {
auto iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox_pos); if (m_loop_stack.empty())
if (iter == mbox_end) {
done_cb();
return;
}
while (m_mailbox_pos == mbox_end)
{ {
m_mailbox_pos = m_mailbox.cache().before_end();
// try fetch more // try fetch more
if (m_mailbox.can_fetch_more() == false) if (m_mailbox.can_fetch_more() == false)
{ {
...@@ -135,18 +140,13 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -135,18 +140,13 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
}; };
} }
} }
m_mailbox.try_fetch_more(); m_mailbox_pos = m_mailbox.try_fetch_more();
} }
else if (m_loop_stack.empty()) m_mailbox_pos = std::find_if(m_mailbox_pos, mbox_end, rm_fun);
{ if (m_mailbox_pos != mbox_end)
cleanup(exit_reason::normal);
done_cb();
return;
}
else
{ {
// prepare for next receive mbox_cache.erase(m_mailbox_pos);
m_mailbox_pos = m_mailbox.cache().before_begin(); m_mailbox_pos = mbox_cache.begin();
} }
} }
} }
......
...@@ -70,13 +70,15 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg) ...@@ -70,13 +70,15 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
void converted_thread_context::dequeue(partial_function& rules) /*override*/ void converted_thread_context::dequeue(partial_function& rules) /*override*/
{ {
auto rm_fun = [&](queue_node& node) { return dq(node, rules); }; auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, rules); };
auto iter = m_mailbox.cache().remove_first(rm_fun); auto& mbox_cache = m_mailbox.cache();
auto mbox_end = m_mailbox.cache().end(); auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end) while (iter == mbox_end)
{ {
iter = m_mailbox.cache().remove_first(rm_fun, m_mailbox.fetch_more()); iter = std::find_if(m_mailbox.fetch_more(), mbox_end, rm_fun);
} }
mbox_cache.erase(iter);
} }
void converted_thread_context::dequeue(behavior& rules) /*override*/ void converted_thread_context::dequeue(behavior& rules) /*override*/
...@@ -85,22 +87,24 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/ ...@@ -85,22 +87,24 @@ void converted_thread_context::dequeue(behavior& rules) /*override*/
{ {
auto timeout = now(); auto timeout = now();
timeout += rules.timeout(); timeout += rules.timeout();
auto rm_fun = [&](queue_node& node) auto rm_fun = [&](queue_node_ptr& node)
{ {
return dq(node, rules.get_partial_function()); return dq(*node, rules.get_partial_function());
}; };
auto iter = m_mailbox.cache().remove_first(rm_fun); auto& mbox_cache = m_mailbox.cache();
auto mbox_end = m_mailbox.cache().end(); auto mbox_end = mbox_cache.end();
auto iter = std::find_if(mbox_cache.begin(), mbox_end, rm_fun);
while (iter == mbox_end) while (iter == mbox_end)
{ {
iter = m_mailbox.try_fetch_more(timeout); auto next = m_mailbox.try_fetch_more(timeout);
if (iter == mbox_end) if (next == mbox_end)
{ {
rules.handle_timeout(); rules.handle_timeout();
return; return;
} }
iter = m_mailbox.cache().remove_first(rm_fun, iter); iter = std::find_if(next, mbox_end, rm_fun);
} }
mbox_cache.erase(iter);
} }
else else
{ {
......
...@@ -98,7 +98,7 @@ void yielding_actor::yield_until_not_empty() ...@@ -98,7 +98,7 @@ void yielding_actor::yield_until_not_empty()
void yielding_actor::dequeue(partial_function& fun) void yielding_actor::dequeue(partial_function& fun)
{ {
auto rm_fun = [&](queue_node& node) { return dq(node, fun) == dq_done; }; auto rm_fun = [&](queue_node_ptr& node) { return dq(*node, fun) == dq_done; };
dequeue_impl(rm_fun); dequeue_impl(rm_fun);
} }
...@@ -107,9 +107,9 @@ void yielding_actor::dequeue(behavior& bhvr) ...@@ -107,9 +107,9 @@ void yielding_actor::dequeue(behavior& bhvr)
if (bhvr.timeout().valid()) if (bhvr.timeout().valid())
{ {
request_timeout(bhvr.timeout()); request_timeout(bhvr.timeout());
auto rm_fun = [&](queue_node& node) -> bool auto rm_fun = [&](queue_node_ptr& node) -> bool
{ {
switch (dq(node, bhvr.get_partial_function())) switch (dq(*node, bhvr.get_partial_function()))
{ {
case dq_timeout_occured: case dq_timeout_occured:
bhvr.handle_timeout(); bhvr.handle_timeout();
......
...@@ -145,6 +145,14 @@ int main(int argc, char** argv) ...@@ -145,6 +145,14 @@ int main(int argc, char** argv)
return 0; return 0;
//*/ //*/
/*
auto nao = remote_actor("192.168.1.148", 12000);
send(nao, atom("speak"), "i am an actor! seriously!");
return 0;
*/
auto args = get_kv_pairs(argc, argv); auto args = get_kv_pairs(argc, argv);
match_each(args) match_each(args)
( (
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment