Commit b95c305b authored by Dominik Charousset's avatar Dominik Charousset

Use a single queue in the worker to allow stealing

This change allows stealing at all times. It requires the worker to synchronize
its access to the queue. However, this overhead has been measured to be 4-5%
for the actor_creation benchmark which stresses the scheduler heavily by
spawning a million actors computing virtually nothing. In a "real-world"
application, the overhead is probably not even measurable.
parent d8c01bbb
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
* http://www.boost.org/LICENSE_1_0.txt. * * http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/ ******************************************************************************/
#ifndef CAF_PRODUCER_CONSUMER_LIST_HPP #ifndef CAF_DETAIL_DOUBLE_ENDED_QUEUE_HPP
#define CAF_PRODUCER_CONSUMER_LIST_HPP #define CAF_DETAIL_DOUBLE_ENDED_QUEUE_HPP
#include "caf/config.hpp" #include "caf/config.hpp"
...@@ -69,15 +69,15 @@ inline void sleep_for(const chrono::duration<Rep, Period>& rt) { ...@@ -69,15 +69,15 @@ inline void sleep_for(const chrono::duration<Rep, Period>& rt) {
namespace caf { namespace caf {
namespace detail { namespace detail {
/** /*
* A producer-consumer list. * A thread-safe double-ended queue based on http://drdobbs.com/cpp/211601363.
* For implementation details see http://drdobbs.com/cpp/211601363. * This implementation is optimized for FIFO, i.e., it supports fast insertion
* at the end and fast removal from the beginning. As long as the queue is
* only used for FIFO operations, readers do not block writers and vice versa.
*/ */
template <class T> template <class T>
class producer_consumer_list { class double_ended_queue {
public: public:
using value_type = T; using value_type = T;
using size_type = size_t; using size_type = size_t;
using difference_type = ptrdiff_t; using difference_type = ptrdiff_t;
...@@ -87,115 +87,157 @@ class producer_consumer_list { ...@@ -87,115 +87,157 @@ class producer_consumer_list {
using const_pointer = const value_type*; using const_pointer = const value_type*;
class node { class node {
public: public:
pointer value; pointer value;
std::atomic<node*> next; std::atomic<node*> next;
node(pointer val) : value(val), next(nullptr) {
node(pointer val) : value(val), next(nullptr) {} // nop
}
private: private:
static constexpr size_type payload_size = static constexpr size_type payload_size =
sizeof(pointer) + sizeof(std::atomic<node*>); sizeof(pointer) + sizeof(std::atomic<node*>);
static constexpr size_type cline_size = CAF_CACHE_LINE_SIZE; static constexpr size_type cline_size = CAF_CACHE_LINE_SIZE;
static constexpr size_type pad_size = static constexpr size_type pad_size =
(cline_size * ((payload_size / cline_size) + 1)) - payload_size; (cline_size * ((payload_size / cline_size) + 1)) - payload_size;
// avoid false sharing // avoid false sharing
static_assert(pad_size > 0, "invalid padding size calculated");
char pad[pad_size]; char pad[pad_size];
}; };
private:
static_assert(sizeof(node*) < CAF_CACHE_LINE_SIZE, static_assert(sizeof(node*) < CAF_CACHE_LINE_SIZE,
"sizeof(node*) >= CAF_CACHE_LINE_SIZE"); "sizeof(node*) >= CAF_CACHE_LINE_SIZE");
// for one consumer at a time double_ended_queue() {
std::atomic<node*> m_first;
char m_pad1[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// for one producers at a time
std::atomic<node*> m_last;
char m_pad2[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// shared among producers
std::atomic<bool> m_consumer_lock;
std::atomic<bool> m_producer_lock;
public:
producer_consumer_list() {
auto ptr = new node(nullptr); auto ptr = new node(nullptr);
m_first = ptr; m_head = ptr;
m_last = ptr; m_tail = ptr;
m_consumer_lock = false; m_head_lock = false;
m_producer_lock = false; m_tail_lock = false;
} }
~producer_consumer_list() { ~double_ended_queue() {
while (m_first) { while (m_head) {
node* tmp = m_first; node* tmp = m_head;
m_first = tmp->next.load(); m_head = tmp->next.load();
delete tmp; delete tmp;
} }
} }
inline void push_back(pointer value) { // acquires only one lock
assert(value != nullptr); void append(pointer value) {
CAF_REQUIRE(value != nullptr);
node* tmp = new node(value); node* tmp = new node(value);
// acquire exclusivity lock_guard guard(m_tail_lock);
while (m_producer_lock.exchange(true)) {
std::this_thread::yield();
}
// publish & swing last forward // publish & swing last forward
m_last.load()->next = tmp; m_tail.load()->next = tmp;
m_last = tmp; m_tail = tmp;
// release exclusivity
m_producer_lock = false;
} }
// returns nullptr on failure // acquires both locks
pointer try_pop() { void prepend(pointer value) {
pointer result = nullptr; CAF_REQUIRE(value != nullptr);
while (m_consumer_lock.exchange(true)) { node* tmp = new node(value);
std::this_thread::yield(); // acquire both locks since we might touch m_last too
lock_guard guard1(m_head_lock);
lock_guard guard2(m_tail_lock);
auto first = m_head.load();
auto next = first->next.load();
// m_first always points to a dummy with no value,
// hence we put the new element second
tmp->next = next;
first->next = tmp;
// in case the queue is empty, we need to swing last forward
if (m_tail == first) {
m_tail = tmp;
} }
// only one consumer allowed }
node* first = m_first;
node* next = m_first.load()->next; // acquires only one lock, returns nullptr on failure
if (next) { pointer take_head() {
node* first = nullptr;
pointer result = nullptr;
{ // lifetime scope of guard
lock_guard guard(m_head_lock);
first = m_head;
node* next = m_head.load()->next;
if (next == nullptr) {
return nullptr;
}
// queue is not empty // queue is not empty
result = next->value; // take it out of the node result = next->value; // take it out of the node
next->value = nullptr; next->value = nullptr;
// swing first forward // swing first forward
m_first = next; m_head = next;
// release exclusivity
m_consumer_lock = false;
// delete old dummy
// first->value = nullptr;
delete first;
return result;
} else {
// release exclusivity // release exclusivity
m_consumer_lock = false; m_head_lock = false;
return nullptr;
} }
delete first;
return result;
} }
// acquires both locks, returns nullptr on failure
pointer take_tail() {
pointer result = nullptr;
node* last = nullptr;
{ // lifetime scope of guards
lock_guard guard1(m_head_lock);
lock_guard guard2(m_tail_lock);
last = m_tail;
if (m_head == last) {
return nullptr;
}
result = last->value;
m_tail = find_predecessor(last);
CAF_REQUIRE(m_tail != nullptr);
m_tail.load()->next = nullptr;
}
delete last;
return result;
}
// does not lock
bool empty() const { bool empty() const {
// atomically compares first and last pointer without locks // atomically compares first and last pointer without locks
return m_first == m_last; return m_head == m_tail;
}
private:
// precondition: *both* locks acquired
node* find_predecessor(node* what) {
for (auto i = m_head.load(); i != nullptr; i = i->next) {
if (i->next == what) {
return i;
}
}
return nullptr;
} }
// guarded by m_head_lock
std::atomic<node*> m_head;
char m_pad1[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// guarded by m_tail_lock
std::atomic<node*> m_tail;
char m_pad2[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// enforce exclusive access
std::atomic<bool> m_head_lock;
std::atomic<bool> m_tail_lock;
class lock_guard {
public:
lock_guard(std::atomic<bool>& lock) : m_lock(lock) {
while (m_lock.exchange(true)) {
std::this_thread::yield();
}
}
~lock_guard() {
m_lock = false;
}
private:
std::atomic<bool>& m_lock;
};
}; };
} // namespace detail } // namespace detail
} // namespace caf } // namespace caf
#endif // CAF_PRODUCER_CONSUMER_LIST_HPP #endif // CAF_DETAIL_DOUBLE_ENDED_QUEUE_HPP
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "caf/resumable.hpp" #include "caf/resumable.hpp"
#include "caf/detail/producer_consumer_list.hpp" #include "caf/detail/double_ended_queue.hpp"
namespace caf { namespace caf {
namespace policy { namespace policy {
...@@ -54,43 +54,38 @@ namespace policy { ...@@ -54,43 +54,38 @@ namespace policy {
class work_stealing { class work_stealing {
public: public:
// A thead-safe queue implementation. // A thead-safe queue implementation.
using sync_queue = detail::producer_consumer_list<resumable>; using queue_type = detail::double_ended_queue<resumable>;
// A queue implementation supporting fast push and pop // The coordinator has only a counter for round-robin enqueue to its workers.
// operations on both ends of the queue.
using priv_queue = std::deque<resumable*>;
// The coordinator has no data since our scheduling is decentralized.
struct coordinator_data { struct coordinator_data {
size_t next_worker; std::atomic<size_t> next_worker;
inline coordinator_data() : next_worker(0) { inline coordinator_data() : next_worker(0) {
// nop // nop
} }
}; };
// Holds the job queues of a worker. // Holds job job queue of a worker and a random number generator.
struct worker_data { struct worker_data {
// This queue is exposed to other workers that may attempt to steal jobs // This queue is exposed to other workers that may attempt to steal jobs
// from it and the central scheduling unit can push new jobs to the queue. // from it and the central scheduling unit can push new jobs to the queue.
sync_queue exposed_queue; queue_type queue;
// Internal job queue of a worker (not exposed to others).
priv_queue private_queue;
// needed by our engine // needed by our engine
std::random_device rdevice; std::random_device rdevice;
// needed to generate pseudo random numbers // needed to generate pseudo random numbers
std::default_random_engine rengine; std::default_random_engine rengine;
// initialize random engine
inline worker_data() : rdevice(), rengine(rdevice()) { inline worker_data() : rdevice(), rengine(rdevice()) {
// nop // nop
} }
}; };
// convenience function to access the data field // Convenience function to access the data field.
template <class WorkerOrCoordinator> template <class WorkerOrCoordinator>
auto d(WorkerOrCoordinator* self) -> decltype(self->data()) { auto d(WorkerOrCoordinator* self) -> decltype(self->data()) {
return self->data(); return self->data();
} }
// go on a raid in quest for a shiny new job // Goes on a raid in quest for a shiny new job.
template <class Worker> template <class Worker>
resumable* try_steal(Worker* self) { resumable* try_steal(Worker* self) {
auto p = self->parent(); auto p = self->parent();
...@@ -104,7 +99,8 @@ class work_stealing { ...@@ -104,7 +99,8 @@ class work_stealing {
victim = d(self).rengine() % p->num_workers(); victim = d(self).rengine() % p->num_workers();
} }
while (victim == self->id()); while (victim == self->id());
return d(p->worker_by_id(victim)).exposed_queue.try_pop(); // steal oldest element from the victim's queue
return d(p->worker_by_id(victim)).queue.take_tail();
} }
template <class Coordinator> template <class Coordinator>
...@@ -115,12 +111,12 @@ class work_stealing { ...@@ -115,12 +111,12 @@ class work_stealing {
template <class Worker> template <class Worker>
void external_enqueue(Worker* self, resumable* job) { void external_enqueue(Worker* self, resumable* job) {
d(self).exposed_queue.push_back(job); d(self).queue.append(job);
} }
template <class Worker> template <class Worker>
void internal_enqueue(Worker* self, resumable* job) { void internal_enqueue(Worker* self, resumable* job) {
d(self).private_queue.push_back(job); d(self).queue.prepend(job);
// give others the opportunity to steal from us // give others the opportunity to steal from us
after_resume(self); after_resume(self);
} }
...@@ -129,15 +125,7 @@ class work_stealing { ...@@ -129,15 +125,7 @@ class work_stealing {
void resume_job_later(Worker* self, resumable* job) { void resume_job_later(Worker* self, resumable* job) {
// job has voluntarily released the CPU to let others run instead // job has voluntarily released the CPU to let others run instead
// this means we are going to put this job to the very end of our queue // this means we are going to put this job to the very end of our queue
// by moving everything from the exposed to private queue first and d(self).queue.append(job);
// then enqueue job to the exposed queue
auto next = [&] {
return d(self).exposed_queue.try_pop();
};
for (auto ptr = next(); ptr != nullptr; ptr = next()) {
d(self).private_queue.push_front(ptr);
}
d(self).exposed_queue.push_back(job);
} }
template <class Worker> template <class Worker>
...@@ -164,15 +152,9 @@ class work_stealing { ...@@ -164,15 +152,9 @@ class work_stealing {
{101, 0, 1, std::chrono::microseconds{10000}} {101, 0, 1, std::chrono::microseconds{10000}}
}; };
resumable* job = nullptr; resumable* job = nullptr;
// local poll
if (!d(self).private_queue.empty()) {
job = d(self).private_queue.back();
d(self).private_queue.pop_back();
return job;
}
for (auto& strat : strategies) { for (auto& strat : strategies) {
for (size_t i = 0; i < strat.attempts; i += strat.step_size) { for (size_t i = 0; i < strat.attempts; i += strat.step_size) {
job = d(self).exposed_queue.try_pop(); job = d(self).queue.take_head();
if (job) { if (job) {
return job; return job;
} }
...@@ -192,30 +174,18 @@ class work_stealing { ...@@ -192,30 +174,18 @@ class work_stealing {
} }
template <class Worker> template <class Worker>
void before_shutdown(Worker* self) { void before_shutdown(Worker*) {
// give others the opportunity to steal unfinished jobs // nop
for (auto ptr : d(self).private_queue) {
d(self).exposed_queue.push_back(ptr);
}
d(self).private_queue.clear();
} }
template <class Worker> template <class Worker>
void after_resume(Worker* self) { void after_resume(Worker*) {
// give others the opportunity to steal from us // nop
if (d(self).private_queue.size() > 1 && d(self).exposed_queue.empty()) {
d(self).exposed_queue.push_back(d(self).private_queue.front());
d(self).private_queue.pop_front();
}
} }
template <class Worker, class UnaryFunction> template <class Worker, class UnaryFunction>
void foreach_resumable(Worker* self, UnaryFunction f) { void foreach_resumable(Worker* self, UnaryFunction f) {
for (auto job : d(self).private_queue) { auto next = [&] { return this->d(self).queue.take_head(); };
f(job);
}
d(self).private_queue.clear();
auto next = [&] { return this->d(self).exposed_queue.try_pop(); };
for (auto job = next(); job != nullptr; job = next()) { for (auto job = next(); job != nullptr; job = next()) {
f(job); f(job);
} }
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "caf/execution_unit.hpp" #include "caf/execution_unit.hpp"
#include "caf/detail/logging.hpp" #include "caf/detail/logging.hpp"
#include "caf/detail/producer_consumer_list.hpp" #include "caf/detail/double_ended_queue.hpp"
namespace caf { namespace caf {
namespace scheduler { namespace scheduler {
...@@ -65,7 +65,7 @@ class worker : public execution_unit { ...@@ -65,7 +65,7 @@ class worker : public execution_unit {
using job_ptr = resumable*; using job_ptr = resumable*;
using job_queue = detail::producer_consumer_list<resumable>; using job_queue = detail::double_ended_queue<resumable>;
using policy_data = typename Policy::worker_data; using policy_data = typename Policy::worker_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment