Commit 3fccabd8 authored by Dominik Charousset's avatar Dominik Charousset

Streamline implementation of fork join policy

parent 07d4ef49
...@@ -19,9 +19,10 @@ ...@@ -19,9 +19,10 @@
#ifndef CPPA_POLICY_FORK_JOIN_HPP #ifndef CPPA_POLICY_FORK_JOIN_HPP
#define CPPA_POLICY_FORK_JOIN_HPP #define CPPA_POLICY_FORK_JOIN_HPP
#include <list>
#include <chrono> #include <chrono>
#include <vector>
#include <thread> #include <thread>
#include <cstddef>
#include "cppa/resumable.hpp" #include "cppa/resumable.hpp"
...@@ -76,98 +77,76 @@ class fork_join { ...@@ -76,98 +77,76 @@ class fork_join {
/** /**
* @brief A queue implementation supporting fast push and pop * @brief A queue implementation supporting fast push and pop
* operations. Note that we do dequeue from the back of the * operations on both ends of the queue.
* queue.
*/ */
using priv_queue = std::vector<resumable*>; using priv_queue = std::list<resumable*>;
template<class Worker> template<class Worker>
inline void external_enqueue(Worker*, resumable* job) { void external_enqueue(Worker*, resumable* job) {
m_exposed_queue.push_back(job); m_exposed_queue.push_back(job);
} }
template<class Worker> template<class Worker>
inline void internal_enqueue(Worker*, resumable* job) { void internal_enqueue(Worker* ptr, resumable* job) {
m_exposed_queue.push_back(job);
// give others the opportunity to steal from us // give others the opportunity to steal from us
if (m_exposed_queue.empty()) { assert_stealable(ptr);
if (m_private_queue.empty()) {
m_exposed_queue.push_back(job);
} else {
m_exposed_queue.push_back(m_private_queue.front());
m_private_queue.erase(m_private_queue.begin());
m_private_queue.push_back(job);
}
} else {
m_private_queue.push_back(job);
}
} }
template<class Worker> template<class Worker>
inline resumable* try_external_dequeue(Worker*) { resumable* try_external_dequeue(Worker*) {
return m_exposed_queue.try_pop(); return m_exposed_queue.try_pop();
} }
template<class Worker> template<class Worker>
inline resumable* internal_dequeue(Worker* self) { resumable* internal_dequeue(Worker* self) {
resumable* job; // we wait for new jobs by polling our external queue: first, we
auto local_poll = [&]() -> bool { // assume an active work load on the machine and perform aggresive
if (!m_private_queue.empty()) { // polling, then we relax our polling a bit and wait 50 us between
job = m_private_queue.back(); // dequeue attempts, finally we assume pretty much nothing is going
m_private_queue.pop_back(); // on and poll every 10 ms; this strategy strives to minimize the
return true; // downside of "busy waiting", which still performs much better than a
} // "signalizing" implementation based on mutexes and conition variables
return false; struct poll_strategy {
size_t attempts;
size_t step_size;
size_t raid_interval;
std::chrono::microseconds sleep_duration;
}; };
auto aggressive_poll = [&]() -> bool { constexpr poll_strategy strategies[3] = {
for (int i = 1; i < 101; ++i) { // aggressive polling (100x) without sleep interval
job = m_exposed_queue.try_pop(); {100, 1, 10, std::chrono::microseconds{0}},
if (job) { // moderate polling (500x) with 50 us sleep interval
return true; {500, 1, 5, std::chrono::microseconds{50}},
} // relaxed polling (infinite attempts) with 10 ms sleep interval
// try to steal every 10 poll attempts {101, 0, 1, std::chrono::microseconds{10000}}
if ((i % 10) == 0) {
job = self->raid();
if (job) {
return true;
}
}
std::this_thread::yield();
}
return false;
}; };
auto moderate_poll = [&]() -> bool { resumable* job = nullptr;
for (int i = 1; i < 550; ++i) { // local poll
if (!m_private_queue.empty()) {
job = m_private_queue.back();
m_private_queue.pop_back();
return job;
}
for (auto& strat : strategies) {
for (size_t i = 0; i < strat.attempts; i += strat.step_size) {
job = m_exposed_queue.try_pop(); job = m_exposed_queue.try_pop();
if (job) { if (job) {
return true; return job;
} }
// try to steal every 5 poll attempts // try to steal every X poll attempts
if ((i % 5) == 0) { if ((i % strat.raid_interval) == 0) {
job = self->raid(); job = self->raid();
if (job) { if (job) {
return true; return job;
} }
} }
std::this_thread::sleep_for(std::chrono::microseconds(50)); std::this_thread::sleep_for(strat.sleep_duration);
} }
return false; }
}; // unreachable, because the last strategy loops
auto relaxed_poll = [&]() -> bool { // until a job has been dequeued
for (;;) { return nullptr;
job = m_exposed_queue.try_pop();
if (job) {
return true;
}
// always try to steal at this stage
job = self->raid();
if (job) {
return true;
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
};
local_poll() || aggressive_poll() || moderate_poll() || relaxed_poll();
return job;
} }
template<class Worker> template<class Worker>
...@@ -184,7 +163,7 @@ class fork_join { ...@@ -184,7 +163,7 @@ class fork_join {
// give others the opportunity to steal from us // give others the opportunity to steal from us
if (m_private_queue.size() > 1 && m_exposed_queue.empty()) { if (m_private_queue.size() > 1 && m_exposed_queue.empty()) {
m_exposed_queue.push_back(m_private_queue.front()); m_exposed_queue.push_back(m_private_queue.front());
m_private_queue.erase(m_private_queue.begin()); m_private_queue.pop_front();
} }
} }
...@@ -202,9 +181,8 @@ class fork_join { ...@@ -202,9 +181,8 @@ class fork_join {
private: private:
// this queue is exposed to others, i.e., other workers // this queue is exposed to other workers that may attempt to steal jobs
// may attempt to steal jobs from it and the central scheduling // from it and the central scheduling unit can push new jobs to the queue
// unit can push new jobs to the queue
sync_queue m_exposed_queue; sync_queue m_exposed_queue;
// internal job queue // internal job queue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment