Commit 259c5e65 authored by neverlord's avatar neverlord

maintenance

parent d967328b
...@@ -84,7 +84,7 @@ class abstract_actor : public Base ...@@ -84,7 +84,7 @@ class abstract_actor : public Base
}; };
typedef intrusive::single_reader_queue<queue_node> mailbox_type; typedef intrusive::single_reader_queue<queue_node> mailbox_type;
typedef typename mailbox_type::unique_pointer queue_node_ptr; typedef std::unique_ptr<queue_node> queue_node_ptr;
typedef typename mailbox_type::cache_type mailbox_cache_type; typedef typename mailbox_type::cache_type mailbox_cache_type;
typedef typename mailbox_cache_type::iterator queue_node_iterator; typedef typename mailbox_cache_type::iterator queue_node_iterator;
......
...@@ -47,15 +47,15 @@ class thread_pool_scheduler : public scheduler ...@@ -47,15 +47,15 @@ class thread_pool_scheduler : public scheduler
struct worker; struct worker;
void start();// override void start() /*override*/;
void stop();// override void stop() /*override*/;
void enqueue(abstract_scheduled_actor* what);// override void enqueue(abstract_scheduled_actor* what) /*override*/;
actor_ptr spawn(abstract_event_based_actor* what); //override actor_ptr spawn(abstract_event_based_actor* what);
actor_ptr spawn(scheduled_actor* behavior, scheduling_hint hint); //override actor_ptr spawn(scheduled_actor* behavior, scheduling_hint hint);
private: private:
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include "cppa/config.hpp"
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
namespace cppa { namespace intrusive { namespace cppa { namespace intrusive {
...@@ -61,14 +60,14 @@ class single_reader_queue ...@@ -61,14 +60,14 @@ class single_reader_queue
typedef value_type* pointer; typedef value_type* pointer;
typedef value_type const* const_pointer; typedef value_type const* const_pointer;
typedef std::unique_ptr<value_type> unique_pointer; typedef std::unique_ptr<value_type> unique_value_ptr;
typedef std::list<unique_pointer> cache_type; typedef std::list<unique_value_ptr> cache_type;
typedef typename cache_type::iterator cache_iterator; typedef typename cache_type::iterator cache_iterator;
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
unique_pointer pop() pointer pop()
{ {
wait_for_data(); wait_for_data();
return take_head(); return take_head();
...@@ -77,7 +76,7 @@ class single_reader_queue ...@@ -77,7 +76,7 @@ class single_reader_queue
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
unique_pointer try_pop() pointer try_pop()
{ {
return take_head(); return take_head();
} }
...@@ -86,7 +85,7 @@ class single_reader_queue ...@@ -86,7 +85,7 @@ class single_reader_queue
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
template<typename TimePoint> template<typename TimePoint>
unique_pointer try_pop(TimePoint const& abs_time) pointer try_pop(TimePoint const& abs_time)
{ {
return (timed_wait_for_data(abs_time)) ? take_head() : nullptr; return (timed_wait_for_data(abs_time)) ? take_head() : nullptr;
} }
...@@ -241,14 +240,13 @@ class single_reader_queue ...@@ -241,14 +240,13 @@ class single_reader_queue
// next iteration element // next iteration element
pointer next = e->next; pointer next = e->next;
// insert e to private cache (convert to LIFO order) // insert e to private cache (convert to LIFO order)
tmp.push_front(unique_pointer{e}); tmp.push_front(unique_value_ptr{e});
//m_cache.insert(iter, unique_value_ptr{e}); //m_cache.insert(iter, unique_value_ptr{e});
// next iteration // next iteration
e = next; e = next;
} }
CPPA_REQUIRE(tmp.empty() == false);
if (iter) *iter = tmp.begin(); if (iter) *iter = tmp.begin();
m_cache.splice(m_cache.end(), std::move(tmp)); m_cache.splice(m_cache.end(), tmp);
return true; return true;
} }
// next iteration // next iteration
...@@ -257,15 +255,16 @@ class single_reader_queue ...@@ -257,15 +255,16 @@ class single_reader_queue
return false; return false;
} }
unique_pointer take_head() pointer take_head()
{ {
if (!m_cache.empty() || fetch_new_data()) if (!m_cache.empty() || fetch_new_data())
{ {
unique_pointer result = std::move(m_cache.front()); auto result = m_cache.front().release();
m_cache.pop_front(); m_cache.pop_front();
return std::move(result); return result;
//return m_cache.take_after(m_cache.before_begin());
} }
return {}; return nullptr;
} }
}; };
......
...@@ -39,8 +39,9 @@ namespace cppa { ...@@ -39,8 +39,9 @@ namespace cppa {
abstract_event_based_actor::abstract_event_based_actor() abstract_event_based_actor::abstract_event_based_actor()
: super(abstract_event_based_actor::blocked) : super(abstract_event_based_actor::blocked)
, m_mailbox_pos(m_mailbox.cache().end())
{ {
m_mailbox_pos = m_mailbox.cache().end(); //m_mailbox_pos = m_mailbox.cache().end();
} }
void abstract_event_based_actor::dequeue(behavior&) void abstract_event_based_actor::dequeue(behavior&)
...@@ -90,7 +91,9 @@ bool abstract_event_based_actor::invoke_from_cache() ...@@ -90,7 +91,9 @@ bool abstract_event_based_actor::invoke_from_cache()
{ {
for (auto i = m_mailbox_pos; i != m_mailbox.cache().end(); ++i) for (auto i = m_mailbox_pos; i != m_mailbox.cache().end(); ++i)
{ {
if (handle_message(*(*i))) auto& ptr = *i;
CPPA_REQUIRE(ptr.get() != nullptr);
if (handle_message(*ptr))
{ {
m_mailbox.cache().erase(i); m_mailbox.cache().erase(i);
return true; return true;
...@@ -102,20 +105,7 @@ bool abstract_event_based_actor::invoke_from_cache() ...@@ -102,20 +105,7 @@ bool abstract_event_based_actor::invoke_from_cache()
void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{ {
self.set(this); self.set(this);
auto done_cb = [=]()
{
m_state.store(abstract_scheduled_actor::done);
while (!m_loop_stack.empty()) m_loop_stack.pop_back();
on_exit();
callback->exec_done();
};
auto& mbox_cache = m_mailbox.cache(); auto& mbox_cache = m_mailbox.cache();
auto mbox_end = mbox_cache.end();
/*auto rm_fun = [=](queue_node_ptr& ptr) -> bool
{
CPPA_REQUIRE(ptr.get() != nullptr);
return handle_message(*ptr);
};*/
try try
{ {
for (;;) for (;;)
...@@ -123,10 +113,13 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -123,10 +113,13 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
if (m_loop_stack.empty()) if (m_loop_stack.empty())
{ {
cleanup(exit_reason::normal); cleanup(exit_reason::normal);
done_cb(); m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear();
on_exit();
callback->exec_done();
return; return;
} }
while (m_mailbox_pos == mbox_end) while (m_mailbox_pos == mbox_cache.end())
{ {
// try fetch more // try fetch more
if (m_mailbox.can_fetch_more() == false) if (m_mailbox.can_fetch_more() == false)
...@@ -166,7 +159,10 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -166,7 +159,10 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
{ {
cleanup(exit_reason::unhandled_exception); cleanup(exit_reason::unhandled_exception);
} }
done_cb(); m_state.store(abstract_scheduled_actor::done);
m_loop_stack.clear();
on_exit();
callback->exec_done();
} }
void abstract_event_based_actor::on_exit() void abstract_event_based_actor::on_exit()
......
...@@ -144,7 +144,6 @@ void abstract_scheduled_actor::request_timeout(util::duration const& d) ...@@ -144,7 +144,6 @@ void abstract_scheduled_actor::request_timeout(util::duration const& d)
auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result
{ {
CPPA_REQUIRE(msg.cvals().get() != nullptr);
if ( msg.size() == 2 if ( msg.size() == 2
&& msg.type_at(0) == t_atom_ui32_types[0] && msg.type_at(0) == t_atom_ui32_types[0]
&& msg.type_at(1) == t_atom_ui32_types[1]) && msg.type_at(1) == t_atom_ui32_types[1])
...@@ -174,6 +173,7 @@ auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result ...@@ -174,6 +173,7 @@ auto abstract_scheduled_actor::filter_msg(any_tuple const& msg) -> filter_result
auto abstract_scheduled_actor::dq(queue_node& node, auto abstract_scheduled_actor::dq(queue_node& node,
partial_function& fun) -> dq_result partial_function& fun) -> dq_result
{ {
CPPA_REQUIRE(node.msg.cvals().get() != nullptr);
if (node.marked) return dq_indeterminate; if (node.marked) return dq_indeterminate;
switch (filter_msg(node.msg)) switch (filter_msg(node.msg))
{ {
......
...@@ -103,7 +103,7 @@ void mailman_loop() ...@@ -103,7 +103,7 @@ void mailman_loop()
std::map<process_information, native_socket_type> peers; std::map<process_information, native_socket_type> peers;
for (;;) for (;;)
{ {
job = mqueue.pop(); job.reset(mqueue.pop());
if (job->is_send_job()) if (job->is_send_job())
{ {
mailman_send_job& sjob = job->send_job(); mailman_send_job& sjob = job->send_job();
......
...@@ -602,7 +602,7 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle) ...@@ -602,7 +602,7 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
case rd_queue_event: case rd_queue_event:
{ {
DEBUG("rd_queue_event"); DEBUG("rd_queue_event");
std::unique_ptr<post_office_msg> pom = msg_queue.pop(); std::unique_ptr<post_office_msg> pom{msg_queue.pop()};
if (pom->is_add_peer_msg()) if (pom->is_add_peer_msg())
{ {
DEBUG("add_peer_msg"); DEBUG("add_peer_msg");
......
...@@ -141,8 +141,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -141,8 +141,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{ {
if (messages.empty()) if (messages.empty())
{ {
//msg_ptr.reset(queue.pop()); msg_ptr.reset(queue.pop());
msg_ptr = queue.pop();
} }
else else
{ {
...@@ -169,8 +168,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -169,8 +168,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
// wait for next message or next timeout // wait for next message or next timeout
if (it != messages.end()) if (it != messages.end())
{ {
//msg_ptr.reset(queue.try_pop(it->first)); msg_ptr.reset(queue.try_pop(it->first));
msg_ptr = queue.try_pop(it->first);
} }
} }
} }
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include <cstddef> #include <cstddef>
#include <iostream> #include <iostream>
#include "cppa/config.hpp"
#include "cppa/abstract_event_based_actor.hpp" #include "cppa/abstract_event_based_actor.hpp"
#include "cppa/detail/invokable.hpp" #include "cppa/detail/invokable.hpp"
...@@ -139,9 +138,9 @@ struct thread_pool_scheduler::worker ...@@ -139,9 +138,9 @@ struct thread_pool_scheduler::worker
{ {
abstract_scheduled_actor* job; abstract_scheduled_actor* job;
handler() : job(nullptr) { } handler() : job(nullptr) { }
bool still_ready() { return true; }
void exec_done() void exec_done()
{ {
CPPA_REQUIRE(job != nullptr);
if (!job->deref()) delete job; if (!job->deref()) delete job;
dec_actor_count(); dec_actor_count();
job = nullptr; job = nullptr;
...@@ -183,7 +182,7 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue, ...@@ -183,7 +182,7 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
abstract_scheduled_actor* dummy) abstract_scheduled_actor* dummy)
{ {
std::vector<worker_ptr> workers; std::vector<worker_ptr> workers;
auto num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 8); size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 8);
for (size_t i = 0; i < num_workers; ++i) for (size_t i = 0; i < num_workers; ++i)
{ {
worker_ptr wptr(new worker(jqueue, dummy)); worker_ptr wptr(new worker(jqueue, dummy));
...@@ -231,15 +230,10 @@ actor_ptr thread_pool_scheduler::spawn_impl(abstract_scheduled_actor* what, ...@@ -231,15 +230,10 @@ actor_ptr thread_pool_scheduler::spawn_impl(abstract_scheduled_actor* what,
actor_ptr thread_pool_scheduler::spawn(abstract_event_based_actor* what) actor_ptr thread_pool_scheduler::spawn(abstract_event_based_actor* what)
{ {
// do NOT push event-based actors to the queue on startup // do NOT push event-based actors to the queue on startup
return spawn_impl(what->attach_to_scheduler(this)); return spawn_impl(what->attach_to_scheduler(this), false);
} }
#ifdef CPPA_DISABLE_CONTEXT_SWITCHING #ifndef CPPA_DISABLE_CONTEXT_SWITCHING
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, scheduling_hint)
{
return mock_scheduler::spawn(bhvr);
}
#else
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr,
scheduling_hint hint) scheduling_hint hint)
{ {
...@@ -252,6 +246,11 @@ actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, ...@@ -252,6 +246,11 @@ actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr,
return spawn_impl(new yielding_actor(bhvr, this)); return spawn_impl(new yielding_actor(bhvr, this));
} }
} }
#else
actor_ptr thread_pool_scheduler::spawn(scheduled_actor* bhvr, scheduling_hint)
{
return mock_scheduler::spawn(bhvr);
}
#endif #endif
} } // namespace cppa::detail } } // namespace cppa::detail
...@@ -183,9 +183,13 @@ int main(int argc, char** argv) ...@@ -183,9 +183,13 @@ int main(int argc, char** argv)
} }
} }
); );
//print_node_id();
std::cout << std::boolalpha; std::cout << std::boolalpha;
size_t errors = 0; size_t errors = 0;
RUN_TEST(test__spawn);
return 0;
//print_node_id();
RUN_TEST(test__ripemd_160); RUN_TEST(test__ripemd_160);
RUN_TEST(test__primitive_variant); RUN_TEST(test__primitive_variant);
RUN_TEST(test__intrusive_containers); RUN_TEST(test__intrusive_containers);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment