Commit 111c7a7c authored by neverlord's avatar neverlord

new scheduler

parent 657b58b5
...@@ -204,6 +204,7 @@ nobase_library_include_HEADERS = \ ...@@ -204,6 +204,7 @@ nobase_library_include_HEADERS = \
cppa/util/is_one_of.hpp \ cppa/util/is_one_of.hpp \
cppa/util/is_primitive.hpp \ cppa/util/is_primitive.hpp \
cppa/util/pop_back.hpp \ cppa/util/pop_back.hpp \
cppa/util/producer_consumer_list.hpp \
cppa/util/pt_dispatch.hpp \ cppa/util/pt_dispatch.hpp \
cppa/util/pt_token.hpp \ cppa/util/pt_token.hpp \
cppa/util/remove_const_reference.hpp \ cppa/util/remove_const_reference.hpp \
......
...@@ -126,11 +126,9 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master> ...@@ -126,11 +126,9 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master>
actor_ptr next; actor_ptr next;
actor_ptr worker; actor_ptr worker;
behavior init_state; behavior init_state;
int remainig_results;
void new_ring(int ring_size, int initial_token_value) void new_ring(int ring_size, int initial_token_value)
{ {
send(worker, atom("calc"), s_task_n); send(worker, atom("calc"), s_task_n);
++remainig_results;
next = self; next = self;
for (int i = 1; i < ring_size; ++i) for (int i = 1; i < ring_size; ++i)
{ {
...@@ -140,12 +138,11 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master> ...@@ -140,12 +138,11 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master>
} }
fsm_chain_master(actor_ptr msgcollector) : iteration(0), mc(msgcollector) fsm_chain_master(actor_ptr msgcollector) : iteration(0), mc(msgcollector)
{ {
remainig_results = 0;
worker = spawn(new fsm_worker(msgcollector));
init_state = init_state =
( (
on<atom("init"), int, int, int>() >> [=](int rs, int itv, int n) on<atom("init"), int, int, int>() >> [=](int rs, int itv, int n)
{ {
worker = spawn(new fsm_worker(msgcollector));
iteration = 0; iteration = 0;
new_ring(rs, itv); new_ring(rs, itv);
become become
......
...@@ -248,3 +248,4 @@ benchmarks/actor_creation.cpp ...@@ -248,3 +248,4 @@ benchmarks/actor_creation.cpp
benchmarks/mailbox_performance.cpp benchmarks/mailbox_performance.cpp
benchmarks/mixed_case.cpp benchmarks/mixed_case.cpp
cppa/util/default_deallocator.hpp cppa/util/default_deallocator.hpp
cppa/util/producer_consumer_list.hpp
...@@ -129,6 +129,9 @@ class abstract_actor : public Base ...@@ -129,6 +129,9 @@ class abstract_actor : public Base
queue_node* m_ptr; queue_node* m_ptr;
queue_node_deallocator d; queue_node_deallocator d;
queue_node_ptr(queue_node_ptr const&) = delete;
queue_node_ptr& operator=(queue_node_ptr const&) = delete;
public: public:
inline queue_node_ptr(queue_node* ptr = nullptr) : m_ptr(ptr) inline queue_node_ptr(queue_node* ptr = nullptr) : m_ptr(ptr)
...@@ -147,7 +150,7 @@ class abstract_actor : public Base ...@@ -147,7 +150,7 @@ class abstract_actor : public Base
inline queue_node* operator->() { return m_ptr; } inline queue_node* operator->() { return m_ptr; }
queue_node* release() inline queue_node* release()
{ {
auto result = m_ptr; auto result = m_ptr;
m_ptr = nullptr; m_ptr = nullptr;
...@@ -160,6 +163,12 @@ class abstract_actor : public Base ...@@ -160,6 +163,12 @@ class abstract_actor : public Base
m_ptr = ptr; m_ptr = ptr;
} }
inline queue_node_ptr& operator=(queue_node_ptr&& other)
{
reset(other.release());
return *this;
}
inline operator bool() const { return m_ptr != nullptr; } inline operator bool() const { return m_ptr != nullptr; }
}; };
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include "cppa/scheduler.hpp" #include "cppa/scheduler.hpp"
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
#include "cppa/util/producer_consumer_list.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp" #include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
...@@ -58,7 +59,8 @@ class thread_pool_scheduler : public scheduler ...@@ -58,7 +59,8 @@ class thread_pool_scheduler : public scheduler
private: private:
typedef util::single_reader_queue<abstract_scheduled_actor> job_queue; //typedef util::single_reader_queue<abstract_scheduled_actor> job_queue;
typedef util::producer_consumer_list<abstract_scheduled_actor> job_queue;
job_queue m_queue; job_queue m_queue;
scheduled_actor_dummy m_dummy; scheduled_actor_dummy m_dummy;
......
#ifndef PRODUCER_CONSUMER_LIST_HPP
#define PRODUCER_CONSUMER_LIST_HPP
#define CPPA_CACHE_LINE_SIZE 64
#include <atomic>
#include <cassert>
#include "cppa/detail/thread.hpp"
namespace cppa { namespace util {
// T is any type
template<typename T>
class producer_consumer_list
{
public:
typedef T* value_ptr;
struct node
{
value_ptr value;
std::atomic<node*> next;
~node() { delete value; }
node(value_ptr val) : value(val), next(nullptr) { }
char pad[CPPA_CACHE_LINE_SIZE - sizeof(value_ptr)- sizeof(std::atomic<node*>)];
};
private:
// for one consumer at a time
node* m_first;
char m_pad1[CPPA_CACHE_LINE_SIZE - sizeof(node*)];
// for one producers at a time
node* m_last;
char m_pad2[CPPA_CACHE_LINE_SIZE - sizeof(node*)];
// shared among producers
std::atomic<bool> m_consumer_lock;
std::atomic<bool> m_producer_lock;
void push_impl(node* tmp)
{
// acquire exclusivity
while (m_producer_lock.exchange(true))
{
detail::this_thread::yield();
}
// publish & swing last forward
m_last->next = tmp;
m_last = tmp;
// release exclusivity
m_producer_lock = false;
}
public:
producer_consumer_list()
{
m_first = m_last = new node(nullptr);
m_consumer_lock = false;
m_producer_lock = false;
}
~producer_consumer_list()
{
while (m_first)
{
node* tmp = m_first;
m_first = tmp->next;
delete tmp;
}
}
inline void push_back(value_ptr value)
{
assert(value != nullptr);
push_impl(new node(value));
}
// returns nullptr on failure
value_ptr try_pop()
{
value_ptr result = nullptr;
while (m_consumer_lock.exchange(true))
{
detail::this_thread::yield();
}
// only one consumer allowed
node* first = m_first;
node* next = m_first->next;
if (next)
{
// queue is not empty
result = next->value; // take it out of the node
next->value = nullptr;
// swing first forward
m_first = next;
// release exclusivity
m_consumer_lock = false;
// delete old dummy
//first->value = nullptr;
delete first;
return result;
}
else
{
// release exclusivity
m_consumer_lock = false;
return nullptr;
}
}
};
} } // namespace cppa::util
#endif // PRODUCER_CONSUMER_LIST_HPP
...@@ -60,18 +60,13 @@ typedef util::single_reader_queue<thread_pool_scheduler::worker> worker_queue; ...@@ -60,18 +60,13 @@ typedef util::single_reader_queue<thread_pool_scheduler::worker> worker_queue;
struct thread_pool_scheduler::worker struct thread_pool_scheduler::worker
{ {
worker* next; typedef abstract_scheduled_actor* job_ptr;
bool m_done;
job_queue* m_job_queue; job_queue* m_job_queue;
volatile abstract_scheduled_actor* m_job; job_ptr m_dummy;
worker_queue* m_supervisor_queue;
mutex m_mtx;
condition_variable m_cv;
thread m_thread; thread m_thread;
worker(worker_queue* supervisor_queue, job_queue* jq) worker(job_queue* jq, job_ptr dummy) : m_job_queue(jq), m_dummy(dummy)
: next(nullptr), m_done(false), m_job_queue(jq), m_job(nullptr)
, m_supervisor_queue(supervisor_queue)
{ {
} }
...@@ -84,31 +79,70 @@ struct thread_pool_scheduler::worker ...@@ -84,31 +79,70 @@ struct thread_pool_scheduler::worker
worker& operator=(const worker&) = delete; worker& operator=(const worker&) = delete;
void operator()() job_ptr aggressive_polling()
{ {
//typedef decltype(now()) time_type; job_ptr result = nullptr;
// enqueue as idle worker for (int i = 0; i < 3; ++i)
m_supervisor_queue->push_back(this);
util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback
{ {
abstract_scheduled_actor* job; result = m_job_queue->try_pop();
//time_type timeout; if (result)
//bool reschedule; {
handler() : job(nullptr)//, timeout(now()), reschedule(false) return result;
}
detail::this_thread::yield();
}
return result;
}
job_ptr less_aggressive_polling()
{
job_ptr result = nullptr;
for (int i = 0; i < 10; ++i)
{ {
result = m_job_queue->try_pop();
if (result)
{
return result;
}
# ifdef __APPLE__
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(1);
boost::this_thread::sleep(timeout);
# else
std::sleep_for(std::chrono::milliseconds(1));
# endif
}
return result;
} }
bool still_ready()
job_ptr relaxed_polling()
{
job_ptr result = nullptr;
for (;;)
{ {
/* result = m_job_queue->try_pop();
if (timeout >= now()) if (result)
{ {
reschedule = true; return result;
return false; }
# ifdef __APPLE__
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(10);
boost::this_thread::sleep(timeout);
# else
std::sleep_for(std::chrono::milliseconds(10));
# endif
} }
*/
return true;
} }
void operator()()
{
util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback
{
abstract_scheduled_actor* job;
handler() : job(nullptr) { }
bool still_ready() { return true; }
void exec_done() void exec_done()
{ {
if (!job->deref()) delete job; if (!job->deref()) delete job;
...@@ -119,31 +153,25 @@ struct thread_pool_scheduler::worker ...@@ -119,31 +153,25 @@ struct thread_pool_scheduler::worker
handler h; handler h;
for (;;) for (;;)
{ {
// lifetime scope of guard (wait for new job) h.job = aggressive_polling();
if (!h.job)
{ {
guard_type guard(m_mtx); h.job = less_aggressive_polling();
while (m_job == nullptr && !m_done) if (!h.job)
{ {
m_cv.wait(guard); h.job = relaxed_polling();
} }
if (m_done) return;
} }
h.job = const_cast<abstract_scheduled_actor*>(m_job); if (h.job == m_dummy)
/*
// run actor up to 300ms
h.reschedule = false;
h.timeout = now();
h.timeout += std::chrono::milliseconds(300);
h.job->resume(&fself, &h);
if (h.reschedule && h.job)
{ {
m_job_queue->push_back(h.job); // dummy of doom received ...
m_job_queue->push_back(h.job); // kill the next guy
return; // and say goodbye
} }
*/ else
{
h.job->resume(&fself, &h); h.job->resume(&fself, &h);
m_job = nullptr; }
CPPA_MEMORY_BARRIER();
m_supervisor_queue->push_back(this);
} }
} }
...@@ -157,85 +185,19 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w) ...@@ -157,85 +185,19 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w)
void thread_pool_scheduler::supervisor_loop(job_queue* jqueue, void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
abstract_scheduled_actor* dummy) abstract_scheduled_actor* dummy)
{ {
worker_queue wqueue;
std::vector<worker_ptr> workers; std::vector<worker_ptr> workers;
// init with at least two workers
//size_t num_workers = std::max<size_t>(thread::hardware_concurrency(), 2);
// init with 2 threads per core but no less than 4
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 4); size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 4);
size_t max_workers = num_workers * 4; for (size_t i = 0; i < num_workers; ++i)
auto new_worker = [&]() -> worker*
{ {
worker_ptr wptr(new worker(&wqueue, jqueue)); worker_ptr wptr(new worker(jqueue, dummy));
wptr->start(); wptr->start();
workers.push_back(std::move(wptr)); workers.push_back(std::move(wptr));
return workers.back().get();
};
for (size_t i = 0; i < num_workers; ++i)
{
new_worker();
}
bool done = false;
// loop
do
{
// fetch next job
abstract_scheduled_actor* job = jqueue->pop();
if (job == dummy)
{
done = true;
}
else
{
/*
// fetch next idle worker
worker* w = nullptr;
if (num_workers < max_workers)
{
w = wqueue.try_pop();
if (!w)
{
// fetch next idle worker (wait up to 500ms)
timeout = now();
timeout += std::chrono::milliseconds(500);
w = wqueue.try_pop(timeout);
// all workers are blocked since 500ms, start a new one
if (!w)
{
w = new_worker();
++num_workers;
}
}
}
else
{
w = wqueue.pop();
}
*/
worker* w = wqueue.pop();
// lifetime scope of guard
{
guard_type guard(w->m_mtx);
w->m_job = job;
w->m_cv.notify_one();
}
}
}
while (!done);
// quit
for (auto& w : workers)
{
guard_type guard(w->m_mtx);
w->m_done = true;
w->m_cv.notify_one();
} }
// wait for workers // wait for workers
for (auto& w : workers) for (auto& w : workers)
{ {
w->m_thread.join(); w->m_thread.join();
} }
// "clear" worker_queue
while (wqueue.try_pop() != nullptr) { }
} }
void thread_pool_scheduler::start() void thread_pool_scheduler::start()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment