Commit a63af3ea authored by Dominik Charousset's avatar Dominik Charousset

Re-implement work-stealing queue without spinlocks

parent 8a188d75
...@@ -6,55 +6,17 @@ ...@@ -6,55 +6,17 @@
#include "caf/config.hpp" #include "caf/config.hpp"
#include <atomic>
#include <cassert> #include <cassert>
#include <chrono> #include <chrono>
#include <condition_variable>
#include <list>
#include <mutex>
#include <thread> #include <thread>
// GCC hack
#if defined(CAF_GCC) && !defined(_GLIBCXX_USE_SCHED_YIELD)
# include <time.h>
namespace std {
namespace this_thread {
namespace {
inline void yield() noexcept {
timespec req;
req.tv_sec = 0;
req.tv_nsec = 1;
nanosleep(&req, nullptr);
}
} // namespace
} // namespace this_thread
} // namespace std
#endif
// another GCC hack
#if defined(CAF_GCC) && !defined(_GLIBCXX_USE_NANOSLEEP)
# include <time.h>
namespace std {
namespace this_thread {
namespace {
template <class Rep, typename Period>
inline void sleep_for(const chrono::duration<Rep, Period>& rt) {
auto sec = chrono::duration_cast<chrono::seconds>(rt);
auto nsec = chrono::duration_cast<chrono::nanoseconds>(rt - sec);
timespec req;
req.tv_sec = sec.count();
req.tv_nsec = nsec.count();
nanosleep(&req, nullptr);
}
} // namespace
} // namespace this_thread
} // namespace std
#endif
namespace caf::detail { namespace caf::detail {
/* /*
* A thread-safe double-ended queue based on http://drdobbs.com/cpp/211601363. * A thread-safe, double-ended queue for work-stealing.
* This implementation is optimized for FIFO, i.e., it supports fast insertion
* at the end and fast removal from the beginning. As long as the queue is
* only used for FIFO operations, readers do not block writers and vice versa.
*/ */
template <class T> template <class T>
class double_ended_queue { class double_ended_queue {
...@@ -67,163 +29,82 @@ public: ...@@ -67,163 +29,82 @@ public:
using pointer = value_type*; using pointer = value_type*;
using const_pointer = const value_type*; using const_pointer = const value_type*;
class node { // -- for the owner ----------------------------------------------------------
public:
pointer value;
std::atomic<node*> next;
explicit node(pointer val) : value(val), next(nullptr) {
// nop
}
private:
static constexpr size_type payload_size
= sizeof(pointer) + sizeof(std::atomic<node*>);
static constexpr size_type cline_size = CAF_CACHE_LINE_SIZE;
static constexpr size_type pad_size
= (cline_size * ((payload_size / cline_size) + 1)) - payload_size;
// avoid false sharing
static_assert(pad_size > 0, "invalid padding size calculated");
char pad[pad_size];
};
using unique_node_ptr = std::unique_ptr<node>;
static_assert(sizeof(node*) < CAF_CACHE_LINE_SIZE,
"sizeof(node*) >= CAF_CACHE_LINE_SIZE");
double_ended_queue() {
head_lock_.clear();
tail_lock_.clear();
auto ptr = new node(nullptr);
head_ = ptr;
tail_ = ptr;
}
~double_ended_queue() {
auto ptr = head_.load();
while (ptr) {
unique_node_ptr tmp{ptr};
ptr = tmp->next.load();
}
}
// acquires only one lock void prepend(pointer value) {
void append(pointer value) {
CAF_ASSERT(value != nullptr); CAF_ASSERT(value != nullptr);
auto* tmp = new node(value); std::unique_lock guard{mtx_};
lock_guard guard(tail_lock_); items_.push_front(value);
// publish & swing last forward
tail_.load()->next = tmp;
tail_ = tmp;
} }
// acquires both locks pointer try_take_head() {
void prepend(pointer value) { std::unique_lock guard{mtx_};
CAF_ASSERT(value != nullptr); if (!items_.empty()) {
auto* tmp = new node(value); auto* result = items_.front();
node* first = nullptr; items_.pop_front();
// acquire both locks since we might touch last_ too return result;
lock_guard guard1(head_lock_);
lock_guard guard2(tail_lock_);
first = head_.load();
CAF_ASSERT(first != nullptr);
auto next = first->next.load();
// first_ always points to a dummy with no value,
// hence we put the new element second
if (next) {
CAF_ASSERT(first != tail_);
tmp->next = next;
} else {
// queue is empty
CAF_ASSERT(first == tail_);
tail_ = tmp;
} }
first->next = tmp; return nullptr;
} }
// acquires only one lock, returns nullptr on failure template <class Duration>
pointer take_head() { pointer try_take_head(Duration rel_timeout) {
unique_node_ptr first; auto abs_timeout = std::chrono::system_clock::now() + rel_timeout;
pointer result = nullptr; std::unique_lock guard{mtx_};
{ // lifetime scope of guard while (items_.empty()) {
lock_guard guard(head_lock_); if (cv_.wait_until(guard, abs_timeout) == std::cv_status::timeout) {
first.reset(head_.load());
node* next = first->next;
if (next == nullptr) {
// queue is empty
first.release();
return nullptr; return nullptr;
} }
// take it out of the node & swing first forward
result = next->value;
next->value = nullptr;
head_ = next;
} }
auto* result = items_.front();
items_.pop_front();
return result; return result;
} }
// acquires both locks, returns nullptr on failure pointer take_head() {
pointer take_tail() { std::unique_lock guard{mtx_};
pointer result = nullptr; while (items_.empty()) {
unique_node_ptr last; cv_.wait(guard);
{ // lifetime scope of guards
lock_guard guard1(head_lock_);
lock_guard guard2(tail_lock_);
CAF_ASSERT(head_ != nullptr);
last.reset(tail_.load());
if (last.get() == head_.load()) {
last.release();
return nullptr;
}
result = last->value;
tail_ = find_predecessor(last.get());
CAF_ASSERT(tail_ != nullptr);
tail_.load()->next = nullptr;
} }
auto* result = items_.front();
items_.pop_front();
return result; return result;
} }
// does not lock // Unsafe, since it does not wake up a currently sleeping worker.
bool empty() const { void unsafe_append(pointer value) {
// atomically compares first and last pointer without locks std::unique_lock guard{mtx_};
return head_.load() == tail_.load(); items_.push_back(value);
} }
private: // -- for others -------------------------------------------------------------
// precondition: *both* locks acquired
node* find_predecessor(node* what) { void append(pointer value) {
for (auto i = head_.load(); i != nullptr; i = i->next) { bool do_notify = false;
if (i->next == what) { {
return i; std::unique_lock guard{mtx_};
do_notify = items_.empty();
items_.push_back(value);
} }
if (do_notify) {
cv_.notify_one();
} }
return nullptr;
} }
// guarded by head_lock_ pointer try_take_tail() {
std::atomic<node*> head_; std::unique_lock guard{mtx_};
char pad1_[CAF_CACHE_LINE_SIZE - sizeof(node*)]; if (!items_.empty()) {
// guarded by tail_lock_ auto* result = items_.back();
std::atomic<node*> tail_; items_.pop_back();
char pad2_[CAF_CACHE_LINE_SIZE - sizeof(node*)]; return result;
// enforce exclusive access
std::atomic_flag head_lock_;
std::atomic_flag tail_lock_;
class lock_guard {
public:
explicit lock_guard(std::atomic_flag& lock) : lock_(lock) {
while (lock.test_and_set(std::memory_order_acquire)) {
std::this_thread::yield();
}
} }
~lock_guard() { return nullptr;
lock_.clear(std::memory_order_release);
} }
private: private:
std::atomic_flag& lock_; std::mutex mtx_;
}; std::condition_variable cv_;
std::list<pointer> items_;
}; };
} // namespace caf::detail } // namespace caf::detail
...@@ -39,13 +39,6 @@ public: ...@@ -39,13 +39,6 @@ public:
timespan sleep_duration; timespan sleep_duration;
}; };
// what is needed to implement the waiting strategy.
struct wait_strategy {
std::mutex lock;
std::condition_variable cv;
bool sleeping{false};
};
// The coordinator has only a counter for round-robin enqueue to its workers. // The coordinator has only a counter for round-robin enqueue to its workers.
struct coordinator_data { struct coordinator_data {
explicit coordinator_data(scheduler::abstract_coordinator*) explicit coordinator_data(scheduler::abstract_coordinator*)
...@@ -68,7 +61,6 @@ public: ...@@ -68,7 +61,6 @@ public:
std::default_random_engine rengine; std::default_random_engine rengine;
std::uniform_int_distribution<size_t> uniform; std::uniform_int_distribution<size_t> uniform;
std::array<poll_strategy, 3> strategies; std::array<poll_strategy, 3> strategies;
wait_strategy waitdata;
}; };
// Goes on a raid in quest for a shiny new job. // Goes on a raid in quest for a shiny new job.
...@@ -84,7 +76,7 @@ public: ...@@ -84,7 +76,7 @@ public:
if (victim == self->id()) if (victim == self->id())
victim = p->num_workers() - 1; victim = p->num_workers() - 1;
// steal oldest element from the victim's queue // steal oldest element from the victim's queue
return d(p->worker_by_id(victim)).queue.take_tail(); return d(p->worker_by_id(victim)).queue.try_take_tail();
} }
template <class Coordinator> template <class Coordinator>
...@@ -96,14 +88,6 @@ public: ...@@ -96,14 +88,6 @@ public:
template <class Worker> template <class Worker>
void external_enqueue(Worker* self, resumable* job) { void external_enqueue(Worker* self, resumable* job) {
d(self).queue.append(job); d(self).queue.append(job);
auto& lock = d(self).waitdata.lock;
auto& cv = d(self).waitdata.cv;
{ // guard scope
std::unique_lock<std::mutex> guard(lock);
// check if the worker is sleeping
if (d(self).waitdata.sleeping && !d(self).queue.empty())
cv.notify_one();
}
} }
template <class Worker> template <class Worker>
...@@ -115,7 +99,7 @@ public: ...@@ -115,7 +99,7 @@ public:
void resume_job_later(Worker* self, resumable* job) { void resume_job_later(Worker* self, resumable* job) {
// job has voluntarily released the CPU to let others run instead // job has voluntarily released the CPU to let others run instead
// this means we are going to put this job to the very end of our queue // this means we are going to put this job to the very end of our queue
d(self).queue.append(job); d(self).queue.unsafe_append(job);
} }
template <class Worker> template <class Worker>
...@@ -125,67 +109,37 @@ public: ...@@ -125,67 +109,37 @@ public:
// polling, then we relax our polling a bit and wait 50 us between // polling, then we relax our polling a bit and wait 50 us between
// dequeue attempts // dequeue attempts
auto& strategies = d(self).strategies; auto& strategies = d(self).strategies;
resumable* job = nullptr; auto* job = d(self).queue.try_take_head();
if (job)
return job;
for (size_t k = 0; k < 2; ++k) { // iterate over the first two strategies for (size_t k = 0; k < 2; ++k) { // iterate over the first two strategies
for (size_t i = 0; i < strategies[k].attempts; for (size_t i = 0; i < strategies[k].attempts;
i += strategies[k].step_size) { i += strategies[k].step_size) {
job = d(self).queue.take_head();
if (job)
return job;
// try to steal every X poll attempts // try to steal every X poll attempts
if ((i % strategies[k].steal_interval) == 0) { if ((i % strategies[k].steal_interval) == 0) {
job = try_steal(self); job = try_steal(self);
if (job) if (job)
return job; return job;
} }
if (strategies[k].sleep_duration.count() > 0) { // wait for some work to appear
#ifdef CAF_MSVC job = d(self).queue.try_take_head(strategies[k].sleep_duration);
// Windows cannot sleep less than 1000 us, so timeout is converted to if (job)
// 0 inside sleep_for(), but Sleep(0) is dangerous so replace it with return job;
// yield()
if (strategies[k].sleep_duration.count() < 1000)
std::this_thread::yield();
else
std::this_thread::sleep_for(strategies[k].sleep_duration);
#else
std::this_thread::sleep_for(strategies[k].sleep_duration);
#endif
}
} }
} }
// we assume pretty much nothing is going on so we can relax polling // we assume pretty much nothing is going on so we can relax polling
// and falling to sleep on a condition variable whose timeout is the one // and falling to sleep on a condition variable whose timeout is the one
// of the relaxed polling strategy // of the relaxed polling strategy
auto& relaxed = strategies[2]; auto& relaxed = strategies[2];
auto& sleeping = d(self).waitdata.sleeping;
auto& lock = d(self).waitdata.lock;
auto& cv = d(self).waitdata.cv;
bool notimeout = true;
size_t i = 1;
do { do {
{ // guard scope job = d(self).queue.try_take_head(relaxed.sleep_duration);
std::unique_lock<std::mutex> guard(lock);
sleeping = true;
if (!cv.wait_for(guard, relaxed.sleep_duration,
[&] { return !d(self).queue.empty(); }))
notimeout = false;
sleeping = false;
}
if (notimeout) {
job = d(self).queue.take_head();
} else {
notimeout = true;
if ((i % relaxed.steal_interval) == 0)
job = try_steal(self);
}
++i;
} while (job == nullptr); } while (job == nullptr);
return job; return job;
} }
template <class Worker, class UnaryFunction> template <class Worker, class UnaryFunction>
void foreach_resumable(Worker* self, UnaryFunction f) { void foreach_resumable(Worker* self, UnaryFunction f) {
auto next = [&] { return d(self).queue.take_head(); }; auto next = [&] { return d(self).queue.try_take_head(); };
for (auto job = next(); job != nullptr; job = next()) { for (auto job = next(); job != nullptr; job = next()) {
f(job); f(job);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment