Commit b95c305b authored by Dominik Charousset's avatar Dominik Charousset

Use a single queue in the worker to allow stealing

This change allows stealing at all times. It requires the worker to synchronize
its access to the queue. However, this overhead has been measured to be 4-5%
for the actor_creation benchmark which stresses the scheduler heavily by
spawning a million actors computing virtually nothing. In a "real-world"
application, the overhead is probably not even measurable.
parent d8c01bbb
......@@ -17,8 +17,8 @@
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_PRODUCER_CONSUMER_LIST_HPP
#define CAF_PRODUCER_CONSUMER_LIST_HPP
#ifndef CAF_DETAIL_DOUBLE_ENDED_QUEUE_HPP
#define CAF_DETAIL_DOUBLE_ENDED_QUEUE_HPP
#include "caf/config.hpp"
......@@ -69,15 +69,15 @@ inline void sleep_for(const chrono::duration<Rep, Period>& rt) {
namespace caf {
namespace detail {
/**
* A producer-consumer list.
* For implementation details see http://drdobbs.com/cpp/211601363.
/*
* A thread-safe double-ended queue based on http://drdobbs.com/cpp/211601363.
* This implementation is optimized for FIFO, i.e., it supports fast insertion
* at the end and fast removal from the beginning. As long as the queue is
* only used for FIFO operations, readers do not block writers and vice versa.
*/
template <class T>
class producer_consumer_list {
class double_ended_queue {
public:
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
......@@ -87,115 +87,157 @@ class producer_consumer_list {
using const_pointer = const value_type*;
class node {
public:
pointer value;
std::atomic<node*> next;
node(pointer val) : value(val), next(nullptr) {}
node(pointer val) : value(val), next(nullptr) {
// nop
}
private:
static constexpr size_type payload_size =
sizeof(pointer) + sizeof(std::atomic<node*>);
static constexpr size_type cline_size = CAF_CACHE_LINE_SIZE;
static constexpr size_type pad_size =
(cline_size * ((payload_size / cline_size) + 1)) - payload_size;
// avoid false sharing
static_assert(pad_size > 0, "invalid padding size calculated");
char pad[pad_size];
};
private:
static_assert(sizeof(node*) < CAF_CACHE_LINE_SIZE,
"sizeof(node*) >= CAF_CACHE_LINE_SIZE");
"sizeof(node*) >= CAF_CACHE_LINE_SIZE");
// for one consumer at a time
std::atomic<node*> m_first;
char m_pad1[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// for one producers at a time
std::atomic<node*> m_last;
char m_pad2[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// shared among producers
std::atomic<bool> m_consumer_lock;
std::atomic<bool> m_producer_lock;
public:
producer_consumer_list() {
double_ended_queue() {
auto ptr = new node(nullptr);
m_first = ptr;
m_last = ptr;
m_consumer_lock = false;
m_producer_lock = false;
m_head = ptr;
m_tail = ptr;
m_head_lock = false;
m_tail_lock = false;
}
~producer_consumer_list() {
while (m_first) {
node* tmp = m_first;
m_first = tmp->next.load();
~double_ended_queue() {
while (m_head) {
node* tmp = m_head;
m_head = tmp->next.load();
delete tmp;
}
}
inline void push_back(pointer value) {
assert(value != nullptr);
// acquires only one lock
void append(pointer value) {
CAF_REQUIRE(value != nullptr);
node* tmp = new node(value);
// acquire exclusivity
while (m_producer_lock.exchange(true)) {
std::this_thread::yield();
}
lock_guard guard(m_tail_lock);
// publish & swing last forward
m_last.load()->next = tmp;
m_last = tmp;
// release exclusivity
m_producer_lock = false;
m_tail.load()->next = tmp;
m_tail = tmp;
}
// returns nullptr on failure
pointer try_pop() {
pointer result = nullptr;
while (m_consumer_lock.exchange(true)) {
std::this_thread::yield();
// acquires both locks
void prepend(pointer value) {
CAF_REQUIRE(value != nullptr);
node* tmp = new node(value);
// acquire both locks since we might touch m_last too
lock_guard guard1(m_head_lock);
lock_guard guard2(m_tail_lock);
auto first = m_head.load();
auto next = first->next.load();
// m_first always points to a dummy with no value,
// hence we put the new element second
tmp->next = next;
first->next = tmp;
// in case the queue is empty, we need to swing last forward
if (m_tail == first) {
m_tail = tmp;
}
// only one consumer allowed
node* first = m_first;
node* next = m_first.load()->next;
if (next) {
}
// acquires only one lock, returns nullptr on failure
pointer take_head() {
node* first = nullptr;
pointer result = nullptr;
{ // lifetime scope of guard
lock_guard guard(m_head_lock);
first = m_head;
node* next = m_head.load()->next;
if (next == nullptr) {
return nullptr;
}
// queue is not empty
result = next->value; // take it out of the node
next->value = nullptr;
// swing first forward
m_first = next;
// release exclusivity
m_consumer_lock = false;
// delete old dummy
// first->value = nullptr;
delete first;
return result;
} else {
m_head = next;
// release exclusivity
m_consumer_lock = false;
return nullptr;
m_head_lock = false;
}
delete first;
return result;
}
// acquires both locks, returns nullptr on failure
pointer take_tail() {
pointer result = nullptr;
node* last = nullptr;
{ // lifetime scope of guards
lock_guard guard1(m_head_lock);
lock_guard guard2(m_tail_lock);
last = m_tail;
if (m_head == last) {
return nullptr;
}
result = last->value;
m_tail = find_predecessor(last);
CAF_REQUIRE(m_tail != nullptr);
m_tail.load()->next = nullptr;
}
delete last;
return result;
}
// does not lock
bool empty() const {
// atomically compares first and last pointer without locks
return m_first == m_last;
return m_head == m_tail;
}
private:
// precondition: *both* locks acquired
node* find_predecessor(node* what) {
for (auto i = m_head.load(); i != nullptr; i = i->next) {
if (i->next == what) {
return i;
}
}
return nullptr;
}
// guarded by m_head_lock
std::atomic<node*> m_head;
char m_pad1[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// guarded by m_tail_lock
std::atomic<node*> m_tail;
char m_pad2[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// enforce exclusive access
std::atomic<bool> m_head_lock;
std::atomic<bool> m_tail_lock;
class lock_guard {
public:
lock_guard(std::atomic<bool>& lock) : m_lock(lock) {
while (m_lock.exchange(true)) {
std::this_thread::yield();
}
}
~lock_guard() {
m_lock = false;
}
private:
std::atomic<bool>& m_lock;
};
};
} // namespace detail
} // namespace caf
#endif // CAF_PRODUCER_CONSUMER_LIST_HPP
#endif // CAF_DETAIL_DOUBLE_ENDED_QUEUE_HPP
......@@ -28,7 +28,7 @@
#include "caf/resumable.hpp"
#include "caf/detail/producer_consumer_list.hpp"
#include "caf/detail/double_ended_queue.hpp"
namespace caf {
namespace policy {
......@@ -54,43 +54,38 @@ namespace policy {
class work_stealing {
public:
// A thead-safe queue implementation.
using sync_queue = detail::producer_consumer_list<resumable>;
using queue_type = detail::double_ended_queue<resumable>;
// A queue implementation supporting fast push and pop
// operations on both ends of the queue.
using priv_queue = std::deque<resumable*>;
// The coordinator has no data since our scheduling is decentralized.
// The coordinator has only a counter for round-robin enqueue to its workers.
struct coordinator_data {
size_t next_worker;
std::atomic<size_t> next_worker;
inline coordinator_data() : next_worker(0) {
// nop
}
};
// Holds the job queues of a worker.
// Holds job job queue of a worker and a random number generator.
struct worker_data {
// This queue is exposed to other workers that may attempt to steal jobs
// from it and the central scheduling unit can push new jobs to the queue.
sync_queue exposed_queue;
// Internal job queue of a worker (not exposed to others).
priv_queue private_queue;
queue_type queue;
// needed by our engine
std::random_device rdevice;
// needed to generate pseudo random numbers
std::default_random_engine rengine;
// initialize random engine
inline worker_data() : rdevice(), rengine(rdevice()) {
// nop
}
};
// convenience function to access the data field
// Convenience function to access the data field.
template <class WorkerOrCoordinator>
auto d(WorkerOrCoordinator* self) -> decltype(self->data()) {
return self->data();
}
// go on a raid in quest for a shiny new job
// Goes on a raid in quest for a shiny new job.
template <class Worker>
resumable* try_steal(Worker* self) {
auto p = self->parent();
......@@ -104,7 +99,8 @@ class work_stealing {
victim = d(self).rengine() % p->num_workers();
}
while (victim == self->id());
return d(p->worker_by_id(victim)).exposed_queue.try_pop();
// steal oldest element from the victim's queue
return d(p->worker_by_id(victim)).queue.take_tail();
}
template <class Coordinator>
......@@ -115,12 +111,12 @@ class work_stealing {
template <class Worker>
void external_enqueue(Worker* self, resumable* job) {
d(self).exposed_queue.push_back(job);
d(self).queue.append(job);
}
template <class Worker>
void internal_enqueue(Worker* self, resumable* job) {
d(self).private_queue.push_back(job);
d(self).queue.prepend(job);
// give others the opportunity to steal from us
after_resume(self);
}
......@@ -129,15 +125,7 @@ class work_stealing {
void resume_job_later(Worker* self, resumable* job) {
// job has voluntarily released the CPU to let others run instead
// this means we are going to put this job to the very end of our queue
// by moving everything from the exposed to private queue first and
// then enqueue job to the exposed queue
auto next = [&] {
return d(self).exposed_queue.try_pop();
};
for (auto ptr = next(); ptr != nullptr; ptr = next()) {
d(self).private_queue.push_front(ptr);
}
d(self).exposed_queue.push_back(job);
d(self).queue.append(job);
}
template <class Worker>
......@@ -164,15 +152,9 @@ class work_stealing {
{101, 0, 1, std::chrono::microseconds{10000}}
};
resumable* job = nullptr;
// local poll
if (!d(self).private_queue.empty()) {
job = d(self).private_queue.back();
d(self).private_queue.pop_back();
return job;
}
for (auto& strat : strategies) {
for (size_t i = 0; i < strat.attempts; i += strat.step_size) {
job = d(self).exposed_queue.try_pop();
job = d(self).queue.take_head();
if (job) {
return job;
}
......@@ -192,30 +174,18 @@ class work_stealing {
}
template <class Worker>
void before_shutdown(Worker* self) {
// give others the opportunity to steal unfinished jobs
for (auto ptr : d(self).private_queue) {
d(self).exposed_queue.push_back(ptr);
}
d(self).private_queue.clear();
void before_shutdown(Worker*) {
// nop
}
template <class Worker>
void after_resume(Worker* self) {
// give others the opportunity to steal from us
if (d(self).private_queue.size() > 1 && d(self).exposed_queue.empty()) {
d(self).exposed_queue.push_back(d(self).private_queue.front());
d(self).private_queue.pop_front();
}
void after_resume(Worker*) {
// nop
}
template <class Worker, class UnaryFunction>
void foreach_resumable(Worker* self, UnaryFunction f) {
for (auto job : d(self).private_queue) {
f(job);
}
d(self).private_queue.clear();
auto next = [&] { return this->d(self).exposed_queue.try_pop(); };
auto next = [&] { return this->d(self).queue.take_head(); };
for (auto job = next(); job != nullptr; job = next()) {
f(job);
}
......
......@@ -25,7 +25,7 @@
#include "caf/execution_unit.hpp"
#include "caf/detail/logging.hpp"
#include "caf/detail/producer_consumer_list.hpp"
#include "caf/detail/double_ended_queue.hpp"
namespace caf {
namespace scheduler {
......@@ -65,7 +65,7 @@ class worker : public execution_unit {
using job_ptr = resumable*;
using job_queue = detail::producer_consumer_list<resumable>;
using job_queue = detail::double_ended_queue<resumable>;
using policy_data = typename Policy::worker_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment