Commit 111c7a7c authored by neverlord's avatar neverlord

new scheduler

parent 657b58b5
......@@ -204,6 +204,7 @@ nobase_library_include_HEADERS = \
cppa/util/is_one_of.hpp \
cppa/util/is_primitive.hpp \
cppa/util/pop_back.hpp \
cppa/util/producer_consumer_list.hpp \
cppa/util/pt_dispatch.hpp \
cppa/util/pt_token.hpp \
cppa/util/remove_const_reference.hpp \
......
......@@ -126,11 +126,9 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master>
actor_ptr next;
actor_ptr worker;
behavior init_state;
int remainig_results;
void new_ring(int ring_size, int initial_token_value)
{
send(worker, atom("calc"), s_task_n);
++remainig_results;
next = self;
for (int i = 1; i < ring_size; ++i)
{
......@@ -140,12 +138,11 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master>
}
fsm_chain_master(actor_ptr msgcollector) : iteration(0), mc(msgcollector)
{
remainig_results = 0;
worker = spawn(new fsm_worker(msgcollector));
init_state =
(
on<atom("init"), int, int, int>() >> [=](int rs, int itv, int n)
{
worker = spawn(new fsm_worker(msgcollector));
iteration = 0;
new_ring(rs, itv);
become
......
......@@ -248,3 +248,4 @@ benchmarks/actor_creation.cpp
benchmarks/mailbox_performance.cpp
benchmarks/mixed_case.cpp
cppa/util/default_deallocator.hpp
cppa/util/producer_consumer_list.hpp
......@@ -129,6 +129,9 @@ class abstract_actor : public Base
queue_node* m_ptr;
queue_node_deallocator d;
queue_node_ptr(queue_node_ptr const&) = delete;
queue_node_ptr& operator=(queue_node_ptr const&) = delete;
public:
inline queue_node_ptr(queue_node* ptr = nullptr) : m_ptr(ptr)
......@@ -147,7 +150,7 @@ class abstract_actor : public Base
inline queue_node* operator->() { return m_ptr; }
queue_node* release()
inline queue_node* release()
{
auto result = m_ptr;
m_ptr = nullptr;
......@@ -160,6 +163,12 @@ class abstract_actor : public Base
m_ptr = ptr;
}
inline queue_node_ptr& operator=(queue_node_ptr&& other)
{
reset(other.release());
return *this;
}
inline operator bool() const { return m_ptr != nullptr; }
};
......
......@@ -33,6 +33,7 @@
#include "cppa/scheduler.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/util/producer_consumer_list.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace cppa { namespace detail {
......@@ -58,7 +59,8 @@ class thread_pool_scheduler : public scheduler
private:
typedef util::single_reader_queue<abstract_scheduled_actor> job_queue;
//typedef util::single_reader_queue<abstract_scheduled_actor> job_queue;
typedef util::producer_consumer_list<abstract_scheduled_actor> job_queue;
job_queue m_queue;
scheduled_actor_dummy m_dummy;
......
#ifndef PRODUCER_CONSUMER_LIST_HPP
#define PRODUCER_CONSUMER_LIST_HPP
#define CPPA_CACHE_LINE_SIZE 64
#include <atomic>
#include <cassert>
#include "cppa/detail/thread.hpp"
namespace cppa { namespace util {
// T is any type
template<typename T>
class producer_consumer_list
{
public:
typedef T* value_ptr;
struct node
{
value_ptr value;
std::atomic<node*> next;
~node() { delete value; }
node(value_ptr val) : value(val), next(nullptr) { }
char pad[CPPA_CACHE_LINE_SIZE - sizeof(value_ptr)- sizeof(std::atomic<node*>)];
};
private:
// for one consumer at a time
node* m_first;
char m_pad1[CPPA_CACHE_LINE_SIZE - sizeof(node*)];
// for one producers at a time
node* m_last;
char m_pad2[CPPA_CACHE_LINE_SIZE - sizeof(node*)];
// shared among producers
std::atomic<bool> m_consumer_lock;
std::atomic<bool> m_producer_lock;
void push_impl(node* tmp)
{
// acquire exclusivity
while (m_producer_lock.exchange(true))
{
detail::this_thread::yield();
}
// publish & swing last forward
m_last->next = tmp;
m_last = tmp;
// release exclusivity
m_producer_lock = false;
}
public:
producer_consumer_list()
{
m_first = m_last = new node(nullptr);
m_consumer_lock = false;
m_producer_lock = false;
}
~producer_consumer_list()
{
while (m_first)
{
node* tmp = m_first;
m_first = tmp->next;
delete tmp;
}
}
inline void push_back(value_ptr value)
{
assert(value != nullptr);
push_impl(new node(value));
}
// returns nullptr on failure
value_ptr try_pop()
{
value_ptr result = nullptr;
while (m_consumer_lock.exchange(true))
{
detail::this_thread::yield();
}
// only one consumer allowed
node* first = m_first;
node* next = m_first->next;
if (next)
{
// queue is not empty
result = next->value; // take it out of the node
next->value = nullptr;
// swing first forward
m_first = next;
// release exclusivity
m_consumer_lock = false;
// delete old dummy
//first->value = nullptr;
delete first;
return result;
}
else
{
// release exclusivity
m_consumer_lock = false;
return nullptr;
}
}
};
} } // namespace cppa::util
#endif // PRODUCER_CONSUMER_LIST_HPP
......@@ -60,18 +60,13 @@ typedef util::single_reader_queue<thread_pool_scheduler::worker> worker_queue;
struct thread_pool_scheduler::worker
{
worker* next;
bool m_done;
typedef abstract_scheduled_actor* job_ptr;
job_queue* m_job_queue;
volatile abstract_scheduled_actor* m_job;
worker_queue* m_supervisor_queue;
mutex m_mtx;
condition_variable m_cv;
job_ptr m_dummy;
thread m_thread;
worker(worker_queue* supervisor_queue, job_queue* jq)
: next(nullptr), m_done(false), m_job_queue(jq), m_job(nullptr)
, m_supervisor_queue(supervisor_queue)
worker(job_queue* jq, job_ptr dummy) : m_job_queue(jq), m_dummy(dummy)
{
}
......@@ -84,31 +79,70 @@ struct thread_pool_scheduler::worker
worker& operator=(const worker&) = delete;
void operator()()
job_ptr aggressive_polling()
{
//typedef decltype(now()) time_type;
// enqueue as idle worker
m_supervisor_queue->push_back(this);
util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback
job_ptr result = nullptr;
for (int i = 0; i < 3; ++i)
{
abstract_scheduled_actor* job;
//time_type timeout;
//bool reschedule;
handler() : job(nullptr)//, timeout(now()), reschedule(false)
result = m_job_queue->try_pop();
if (result)
{
return result;
}
bool still_ready()
detail::this_thread::yield();
}
return result;
}
job_ptr less_aggressive_polling()
{
job_ptr result = nullptr;
for (int i = 0; i < 10; ++i)
{
result = m_job_queue->try_pop();
if (result)
{
/*
if (timeout >= now())
{
reschedule = true;
return false;
}
*/
return true;
return result;
}
# ifdef __APPLE__
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(1);
boost::this_thread::sleep(timeout);
# else
std::sleep_for(std::chrono::milliseconds(1));
# endif
}
return result;
}
job_ptr relaxed_polling()
{
job_ptr result = nullptr;
for (;;)
{
result = m_job_queue->try_pop();
if (result)
{
return result;
}
# ifdef __APPLE__
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(10);
boost::this_thread::sleep(timeout);
# else
std::sleep_for(std::chrono::milliseconds(10));
# endif
}
}
void operator()()
{
util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback
{
abstract_scheduled_actor* job;
handler() : job(nullptr) { }
bool still_ready() { return true; }
void exec_done()
{
if (!job->deref()) delete job;
......@@ -119,31 +153,25 @@ struct thread_pool_scheduler::worker
handler h;
for (;;)
{
// lifetime scope of guard (wait for new job)
h.job = aggressive_polling();
if (!h.job)
{
guard_type guard(m_mtx);
while (m_job == nullptr && !m_done)
h.job = less_aggressive_polling();
if (!h.job)
{
m_cv.wait(guard);
h.job = relaxed_polling();
}
if (m_done) return;
}
h.job = const_cast<abstract_scheduled_actor*>(m_job);
/*
// run actor up to 300ms
h.reschedule = false;
h.timeout = now();
h.timeout += std::chrono::milliseconds(300);
h.job->resume(&fself, &h);
if (h.reschedule && h.job)
if (h.job == m_dummy)
{
m_job_queue->push_back(h.job);
// dummy of doom received ...
m_job_queue->push_back(h.job); // kill the next guy
return; // and say goodbye
}
else
{
h.job->resume(&fself, &h);
}
*/
h.job->resume(&fself, &h);
m_job = nullptr;
CPPA_MEMORY_BARRIER();
m_supervisor_queue->push_back(this);
}
}
......@@ -157,85 +185,19 @@ void thread_pool_scheduler::worker_loop(thread_pool_scheduler::worker* w)
void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
abstract_scheduled_actor* dummy)
{
worker_queue wqueue;
std::vector<worker_ptr> workers;
// init with at least two workers
//size_t num_workers = std::max<size_t>(thread::hardware_concurrency(), 2);
// init with 2 threads per core but no less than 4
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 4);
size_t max_workers = num_workers * 4;
auto new_worker = [&]() -> worker*
for (size_t i = 0; i < num_workers; ++i)
{
worker_ptr wptr(new worker(&wqueue, jqueue));
worker_ptr wptr(new worker(jqueue, dummy));
wptr->start();
workers.push_back(std::move(wptr));
return workers.back().get();
};
for (size_t i = 0; i < num_workers; ++i)
{
new_worker();
}
bool done = false;
// loop
do
{
// fetch next job
abstract_scheduled_actor* job = jqueue->pop();
if (job == dummy)
{
done = true;
}
else
{
/*
// fetch next idle worker
worker* w = nullptr;
if (num_workers < max_workers)
{
w = wqueue.try_pop();
if (!w)
{
// fetch next idle worker (wait up to 500ms)
timeout = now();
timeout += std::chrono::milliseconds(500);
w = wqueue.try_pop(timeout);
// all workers are blocked since 500ms, start a new one
if (!w)
{
w = new_worker();
++num_workers;
}
}
}
else
{
w = wqueue.pop();
}
*/
worker* w = wqueue.pop();
// lifetime scope of guard
{
guard_type guard(w->m_mtx);
w->m_job = job;
w->m_cv.notify_one();
}
}
}
while (!done);
// quit
for (auto& w : workers)
{
guard_type guard(w->m_mtx);
w->m_done = true;
w->m_cv.notify_one();
}
// wait for workers
for (auto& w : workers)
{
w->m_thread.join();
}
// "clear" worker_queue
while (wqueue.try_pop() != nullptr) { }
}
void thread_pool_scheduler::start()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment