Commit fa5f795c authored by Sebastian Woelke's avatar Sebastian Woelke

Add cache support for locality guided scheduling

parent 58015d19
......@@ -70,7 +70,7 @@ set (LIBCAF_CORE_SRCS
src/monitorable_actor.cpp
src/node_id.cpp
src/outbound_path.cpp
src/numa_aware_work_stealing.cpp
src/locality_guided_scheduling.cpp
src/parse_ini.cpp
src/pretty_type_name.cpp
src/private_thread.cpp
......
......@@ -264,9 +264,10 @@ public:
size_t work_stealing_relaxed_steal_interval;
size_t work_stealing_relaxed_sleep_duration_us;
// -- config parameters for numa aware work-stealing -------------------------
// -- config parameters for locality guided scheduling (LGS) -----------------
size_t numa_aware_work_stealing_neighborhood_level;
atom_value lgs_actor_pinning_entity;
atom_value lgs_weighted_work_stealing_start_entity;
// -- config parameters for the logger ---------------------------------------
......
......@@ -17,8 +17,8 @@
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_POLICY_NUMA_AWARE_WORK_STEALING_HPP
#define CAF_POLICY_NUMA_AWARE_WORK_STEALING_HPP
#ifndef CAF_POLICY_LOCALITY_GUIDED_SCHEDULING_HPP_
#define CAF_POLICY_LOCALITY_GUIDED_SCHEDULING_HPP_
#include <deque>
#include <chrono>
......@@ -33,15 +33,11 @@
namespace caf {
namespace policy {
#define CALL_CAF_CRITICAL(predicate, msg) \
if (predicate) \
CAF_CRITICAL(msg)
/// Implements scheduling of actors via a numa aware work stealing.
/// @extends scheduler_policy
class numa_aware_work_stealing : public work_stealing {
class locality_guided_scheduling : public work_stealing {
public:
~numa_aware_work_stealing();
~locality_guided_scheduling();
struct hwloc_topo_free {
void operator()(hwloc_topology_t p) {
......@@ -68,33 +64,31 @@ public:
}
};
using hwloc_bitmap_wrapper =
using bitmap_wrapper_t =
std::unique_ptr<hwloc_bitmap_s, hwloc_bitmap_free_wrapper>;
static hwloc_bitmap_wrapper hwloc_bitmap_make_wrapper() {
return hwloc_bitmap_wrapper(hwloc_bitmap_alloc());
static bitmap_wrapper_t hwloc_bitmap_make_wrapper() {
return bitmap_wrapper_t(hwloc_bitmap_alloc());
}
using pu_id_t = int;
using node_id_t = int;
using pu_set_t = hwloc_bitmap_wrapper;
using node_set_t = hwloc_bitmap_wrapper;
template <class Worker>
struct coordinator_data {
inline explicit coordinator_data(scheduler::abstract_coordinator*) {
int res;
hwloc_topology_t raw_topo;
res = hwloc_topology_init(&raw_topo);
CALL_CAF_CRITICAL(res == -1, "hwloc_topology_init() failed");
// hwloc_topology_init() failed
CAF_IGNORE_UNUSED(res);
CAF_ASSERT(res == -1);
topo.reset(raw_topo);
res = hwloc_topology_load(topo.get());
CALL_CAF_CRITICAL(res == -1, "hwloc_topology_load() failed");
// hwloc_topology_load() failed
CAF_ASSERT(res == -1);
next_worker = 0;
}
topo_ptr topo;
std::vector<std::unique_ptr<Worker, worker_deleter<Worker>>> workers;
std::map<pu_id_t, Worker*> worker_id_map;
std::map<int, Worker*> worker_id_map;
// used by central enqueue to balance new jobs between workers with round
// robin strategy
std::atomic<size_t> next_worker;
......@@ -104,119 +98,269 @@ public:
struct worker_data {
using neighbors_t = std::vector<Worker*>;
using worker_proximity_matrix_t = std::vector<neighbors_t>;
using pu_distance_map_t = std::map<float, bitmap_wrapper_t>;
explicit worker_data(scheduler::abstract_coordinator* p)
: rengine(std::random_device{}())
, strategies(get_poll_strategies(p))
, neighborhood_level(
p->system().config().numa_aware_work_stealing_neighborhood_level) {
, actor_pinning_entity(p->system().config().lgs_actor_pinning_entity)
, wws_start_entity(
p->system().config().lgs_weighted_work_stealing_start_entity)
, start_steal_group_idx(0) {
// nop
}
worker_proximity_matrix_t init_worker_proximity_matrix(Worker* self,
const pu_set_t& current_pu_set) {
auto& cdata = d(self->parent());
auto& topo = cdata.topo;
auto current_node_set = hwloc_bitmap_make_wrapper();
hwloc_cpuset_to_nodeset(topo.get(), current_pu_set.get(),
current_node_set.get());
CALL_CAF_CRITICAL(hwloc_bitmap_iszero(current_node_set.get()),
"Current NUMA node_set is unknown");
auto current_node_id = hwloc_bitmap_first(current_node_set.get());
std::map<float, pu_set_t> dist_map;
worker_proximity_matrix_t result_matrix;
// Distance matrix for NUMA nodes. It is possible to request the distance
// matrix on PU level, which would be a better match for our usecase but
// on all tested hardware it returned a nullptr. Future work?
auto distance_matrix =
hwloc_get_whole_distance_matrix_by_type(topo.get(), HWLOC_OBJ_NUMANODE);
// If NUMA distance matrix is not available it is assumed that all PUs
// have the same distance
if (!distance_matrix || !distance_matrix->latency) {
auto allowed_const_pus = hwloc_topology_get_allowed_cpuset(topo.get());
hwloc_bitmap_wrapper allowed_pus;
allowed_pus.reset(hwloc_bitmap_dup(allowed_const_pus));
dist_map.insert(std::make_pair(1.0, std::move(allowed_pus)));
//debug fun
bool check_pu_id(hwloc_const_cpuset_t current_pu) {
//auto current_pu_id = hwloc_bitmap_first(current_pu);
//return current_pu_id == 0;
return false;
}
//debug fun
void xxx(hwloc_const_cpuset_t current_pu, const std::string& str) {
//if (!check_pu_id(current_pu))
//return;
//std::cout << str << std::endl;
}
//debug fun
void xxx(hwloc_const_bitmap_t current_pu, std::map<float, bitmap_wrapper_t>& dist_map) {
//if (!check_pu_id(current_pu))
//return;
//for(auto& e : dist_map) {
//std::cout << "dist: " << e.first << "; pu_set: " << e.second << std::endl;
//}
}
// collects recursively all PUs which are children of obj and obj itself
void traverse_hwloc_obj(hwloc_cpuset_t result_pu_set, hwloc_topology_t topo,
const hwloc_obj_t obj, unsigned int filter_os_idx,
const hwloc_obj_t filter_obj) {
if (!obj || obj == filter_obj)
return;
if (obj->type == hwloc_obj_type_t::HWLOC_OBJ_PU
&& obj->os_index != filter_os_idx) {
hwloc_bitmap_set(result_pu_set, obj->os_index);
} else {
auto num_of_dist_objs = distance_matrix->nbobjs;
// relvant line for the current NUMA node in distance matrix
float* dist_pointer =
&distance_matrix
->latency[num_of_dist_objs
* static_cast<unsigned int>(current_node_id)];
// iterate over all NUMA nodes and classify them in distance levels
// regarding to the current NUMA node
for (node_id_t x = 0; static_cast<unsigned int>(x) < num_of_dist_objs;
++x) {
node_set_t tmp_node_set = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(tmp_node_set.get(), static_cast<unsigned int>(x));
auto tmp_pu_set = hwloc_bitmap_make_wrapper();
hwloc_cpuset_from_nodeset(topo.get(), tmp_pu_set.get(),
tmp_node_set.get());
// you cannot steal from yourself
if (x == current_node_id) {
hwloc_bitmap_andnot(tmp_pu_set.get(), tmp_pu_set.get(),
current_pu_set.get());
hwloc_obj_t child = hwloc_get_next_child(topo, obj, nullptr);
while (child) {
traverse_hwloc_obj(result_pu_set, topo, child, filter_os_idx,
filter_obj);
child = hwloc_get_next_child(topo, obj, child);
}
}
}
// collect the PUs for each cache level
pu_distance_map_t traverse_caches(hwloc_topology_t topo,
hwloc_const_cpuset_t current_pu) {
pu_distance_map_t result_map;
// We need the distance devider to define the distance between PUs sharing
// a cache level. PUs sharing a NUMA-node have a distance of 1 by
// definition. PUs which don't share a NUMA-node have a distance of > 1.
// Consequently, a the distance between PUs sharing a cache level must be
// smaller than 1. We define the distance between PUs sharing the L1 cache
// as 1 / 100 (the distance_divider). Ergo the distance for the L2 cache
// is 2 / 100, and so on. Why 100?: It is readable by humans and at least
// 100 cache levels are requried to collide with NUMA distances which is
// very unlikely.
const float distance_divider = 100.0;
int current_cache_lvl = 1;
hwloc_obj_t last_cache_obj = nullptr;
auto current_cache_obj =
hwloc_get_cache_covering_cpuset(topo, current_pu);
auto current_pu_id = hwloc_bitmap_first(current_pu);
while (current_cache_obj
&& current_cache_obj->type == hwloc_obj_type_t::HWLOC_OBJ_CACHE) {
auto result_pus = hwloc_bitmap_make_wrapper();
traverse_hwloc_obj(result_pus.get(), topo, current_cache_obj, current_pu_id,
last_cache_obj);
if (!hwloc_bitmap_iszero(result_pus.get())) {
result_map.insert(make_pair(current_cache_lvl / distance_divider,
move(result_pus)));
}
++current_cache_lvl;
last_cache_obj = current_cache_obj;
current_cache_obj = current_cache_obj->parent;
}
return result_map;
}
pu_distance_map_t traverse_nodes(hwloc_topology_t topo,
const hwloc_distances_s* node_dist_matrix,
hwloc_const_cpuset_t current_pu,
hwloc_const_cpuset_t current_node) {
pu_distance_map_t result_map;
auto current_node_id = hwloc_bitmap_first(current_node);
auto num_nodes = node_dist_matrix->nbobjs;
// relvant line for the current NUMA node in distance matrix
float* dist_ptr =
&node_dist_matrix
->latency[num_nodes * static_cast<unsigned int>(current_node_id)];
// iterate over all NUMA nodes and classify them in distance levels
// regarding to the current NUMA node
for (int x = 0; static_cast<unsigned int>(x) < num_nodes; ++x) {
auto tmp_nodes = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(tmp_nodes.get(), static_cast<unsigned int>(x));
auto tmp_pus = hwloc_bitmap_make_wrapper();
hwloc_cpuset_from_nodeset(topo, tmp_pus.get(),
tmp_nodes.get());
// you cannot steal from yourself
if (x == current_node_id) {
hwloc_bitmap_andnot(tmp_pus.get(), tmp_pus.get(),
current_pu);
}
if (hwloc_bitmap_iszero(tmp_pus.get())) {
continue;
}
auto result_map_it = result_map.find(dist_ptr[x]);
if (result_map_it == result_map.end()) {
// create a new distane group
result_map.insert(make_pair(dist_ptr[x], move(tmp_pus)));
} else {
// add PUs to an available distance group
hwloc_bitmap_or(result_map_it->second.get(),
result_map_it->second.get(), tmp_pus.get());
}
}
return result_map;
}
// Merge the distance maps.
// The pu maps in cache_dists and node_dists must have no set intersections
// because they are accumulated later.
// wp_matrix_first_node_idx is set to the first index which represents a
// full NUMA-node
pu_distance_map_t merge_dist_maps(pu_distance_map_t&& cache_dists,
pu_distance_map_t&& node_dists,
int& wp_matrix_first_node_idx) {
if (!cache_dists.empty() && !node_dists.empty()) {
auto local_node_it = node_dists.begin();
// remove all pus collected in cache_dists from the local node
auto local_node = local_node_it->second.get();
for (auto& e : cache_dists) {
hwloc_bitmap_andnot(local_node, local_node, e.second.get());
}
wp_matrix_first_node_idx = cache_dists.size();
if (hwloc_bitmap_iszero(local_node)) {
node_dists.erase(local_node_it);
--wp_matrix_first_node_idx;
}
cache_dists.insert(make_move_iterator(begin(node_dists)),
make_move_iterator(end(node_dists)));
return move(cache_dists);
} else if (!cache_dists.empty() && node_dists.empty()) {
// caf cannot it collected all pus because because it CPU could have two
// L3-caches and only of them is represented by cahces_dists.
CAF_CRITICAL("caf could not reliable collect all PUs");
} else if (cache_dists.empty() && !node_dists.empty()) {
wp_matrix_first_node_idx = 0;
return move(node_dists);
} else {
// both maps are empty, which happens on a single core machine
wp_matrix_first_node_idx = -1;
return pu_distance_map_t{};
}
}
worker_proximity_matrix_t
init_worker_proximity_matrix(Worker* self,
hwloc_const_cpuset_t current_pu) {
auto& cdata = d(self->parent());
auto topo = cdata.topo.get();
auto current_node = hwloc_bitmap_make_wrapper();
auto current_pu_id = hwloc_bitmap_first(current_pu);
hwloc_cpuset_to_nodeset(topo, current_pu,
current_node.get());
// Current NUMA-node is unknown
CAF_ASSERT(hwloc_bitmap_iszero(current_node.get()));
pu_distance_map_t pu_dists;
worker_proximity_matrix_t result_wp_matrix;
auto node_dist_matrix =
hwloc_get_whole_distance_matrix_by_type(topo, HWLOC_OBJ_NUMANODE);
// If NUMA distance matrix we still try to exploit cache locality
if (!node_dist_matrix || !node_dist_matrix->latency) {
auto cache_dists = traverse_caches(topo, current_pu);
// We have to check whether dist_map includes all pus or not.
// If not, we have to add an additional group which includes them.
auto all_pus = hwloc_bitmap_make_wrapper();
const float normalized_numa_node_dist = 1.0;
for (auto& e: cdata.worker_id_map) {
if (e.first != current_pu_id) {
hwloc_bitmap_set(all_pus.get(), e.first);
}
auto dist_it = dist_map.find(dist_pointer[x]);
if (dist_it == dist_map.end())
// create a new distane level
dist_map.insert(
std::make_pair(dist_pointer[x], std::move(tmp_pu_set)));
else
// add PUs to an available distance level
hwloc_bitmap_or(dist_it->second.get(), dist_it->second.get(),
tmp_pu_set.get());
}
pu_distance_map_t tmp_node_dists;
tmp_node_dists.insert(
make_pair(normalized_numa_node_dist, move(all_pus)));
pu_dists = merge_dist_maps(move(cache_dists), move(tmp_node_dists),
wp_matrix_first_node_idx);
} else {
auto cache_dists = traverse_caches(topo, current_pu);
auto node_dists = traverse_nodes(topo, node_dist_matrix, current_pu,
current_node.get());
pu_dists = merge_dist_maps(move(cache_dists), move(node_dists),
wp_matrix_first_node_idx);
}
// return PU matrix sorted by its distance
result_matrix.reserve(dist_map.size());
for (auto& pu_set_it : dist_map) {
std::vector<Worker*> current_lvl;
xxx(current_pu, pu_dists);
// map PU ids to worker* sorted by its distance
result_wp_matrix.reserve(pu_dists.size());
for (auto& pu_set_it : pu_dists) {
std::vector<Worker*> current_worker_group;
auto pu_set = pu_set_it.second.get();
for (pu_id_t pu_id = hwloc_bitmap_first(pu_set); pu_id != -1;
for (int pu_id = hwloc_bitmap_first(pu_set); pu_id != -1;
pu_id = hwloc_bitmap_next(pu_set, pu_id)) {
auto worker_id_it = cdata.worker_id_map.find(pu_id);
// if worker id is not found less worker than available PUs.
// have been started
if (worker_id_it != cdata.worker_id_map.end())
current_lvl.emplace_back(worker_id_it->second);
current_worker_group.emplace_back(worker_id_it->second);
}
// current_lvl can be empty if all pus of NUMA node are deactivated
if (!current_lvl.empty()) {
// The number of workers in current_lvl must be larger then in the
// previous lvl (if exist).
// If it is smaller something is wrong (should not be possible).
// If they have the same size, its the same lvl (possible when lvls
// are created from different sources)
if (result_matrix.empty()
|| current_lvl.size()
> result_matrix[result_matrix.size() - 1].size()) {
result_matrix.emplace_back(std::move(current_lvl));
}
// current_worker_group can be empty if pus of this level are deactivated
if (!current_worker_group.empty()) {
result_wp_matrix.emplace_back(move(current_worker_group));
}
}
//accumulate steal_groups - each group contains all lower level groups
auto last_group_it = result_wp_matrix.begin();
for (auto current_group_it = result_wp_matrix.begin();
current_group_it != result_wp_matrix.end(); ++current_group_it) {
if (current_group_it != result_wp_matrix.begin()) {
std::copy(last_group_it->begin(), last_group_it->end(),
std::back_inserter(*current_group_it));
++last_group_it;
}
}
//accumulate scheduler_lvls - each lvl contains all lower lvls
auto last_lvl_it = result_matrix.begin();
for (auto current_lvl_it = result_matrix.begin();
current_lvl_it != result_matrix.end(); ++current_lvl_it) {
if (current_lvl_it != result_matrix.begin()) {
std::copy(last_lvl_it->begin(), last_lvl_it->end(),
std::back_inserter(*current_lvl_it));
++last_lvl_it;
if (check_pu_id(current_pu)) {
int distance_idx = 0;
std::cout << "wp_matrix_first_node_idx: " << wp_matrix_first_node_idx << std::endl;
for (auto& neighbors : result_wp_matrix) {
std::cout << "result_matix distance_idx: " << distance_idx++ << std::endl;
std::cout << " -- ";
for (auto neighbor : neighbors) {
std::cout << neighbor->to_string() << "; ";
}
std::cout << std::endl;
}
}
return result_matrix;
}
return result_wp_matrix;
}
// This queue is exposed to other workers that may attempt to steal jobs
// from it and the central scheduling unit can push new jobs to the queue.
queue_type queue;
worker_proximity_matrix_t wp_matrix;
// Defines the index in wp_matrix which references the local NUMA-node.
// wp_matrix_first_node_idx is -1 if no neigbhors exist (wp_matrix.empty()).
int wp_matrix_first_node_idx;
std::default_random_engine rengine;
std::uniform_int_distribution<size_t> uniform;
std::vector<poll_strategy> strategies;
size_t neighborhood_level;
atom_value actor_pinning_entity;
atom_value wws_start_entity;
size_t start_steal_group_idx;
};
/// Create x workers.
......@@ -228,8 +372,8 @@ public:
auto allowed_pus = hwloc_topology_get_allowed_cpuset(topo.get());
size_t num_allowed_pus =
static_cast<size_t>(hwloc_bitmap_weight(allowed_pus));
CALL_CAF_CRITICAL(num_allowed_pus < num_workers,
"less PUs than worker");
// less PUs than worker
CAF_ASSERT(num_allowed_pus < num_workers);
cdata.workers.reserve(num_allowed_pus);
auto pu_set = hwloc_bitmap_make_wrapper();
auto node_set = hwloc_bitmap_make_wrapper();
......@@ -256,49 +400,82 @@ public:
void init_worker_thread(Worker* self) {
auto& wdata = d(self);
auto& cdata = d(self->parent());
auto current_pu_set = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(current_pu_set.get(),
auto current_pu = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(current_pu.get(),
static_cast<unsigned int>(self->id()));
auto res = hwloc_set_cpubind(cdata.topo.get(), current_pu_set.get(),
auto res = hwloc_set_cpubind(cdata.topo.get(), current_pu.get(),
HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_NOMEMBIND);
CALL_CAF_CRITICAL(res == -1, "hwloc_set_cpubind() failed");
wdata.wp_matrix = wdata.init_worker_proximity_matrix(self, current_pu_set);
auto wm_max_idx = wdata.wp_matrix.size() - 1;
if (wdata.neighborhood_level == 0) {
// hwloc_set_cpubind() failed
CAF_IGNORE_UNUSED(res);
CAF_ASSERT(res == -1);
wdata.wp_matrix = wdata.init_worker_proximity_matrix(self, current_pu.get());
auto& node_idx = wdata.wp_matrix_first_node_idx;
auto& wp_matrix = wdata.wp_matrix;
if (wp_matrix.empty()) {
// no neighbors could be found, use the fallback behavior
self->set_all_workers_are_neighbors(true);
} else if (wdata.neighborhood_level <= wm_max_idx) {
self->set_neighbors(
wdata.wp_matrix[wm_max_idx - wdata.neighborhood_level]);
self->set_all_workers_are_neighbors(false);
} else { //neighborhood_level > wm_max_idx
self->set_all_workers_are_neighbors(false);
wdata.xxx(current_pu.get(), "pinnning: wp_matrix.empty(); all are neigbhors");
} else if (wdata.actor_pinning_entity == atom("pu")) {
wdata.xxx(current_pu.get(), "pinning: pu; no workers are neigbors");
self->set_all_workers_are_neighbors(false);
} else if (wdata.actor_pinning_entity == atom("cache")) {
if (wp_matrix.size() == 1) {
wdata.xxx(current_pu.get(), "pinning: cache; all are neigbhors");
self->set_all_workers_are_neighbors(true);
} else {
wdata.xxx(current_pu.get(), "pinning: cache; wp_matrix[0]");
self->set_neighbors(wp_matrix[0]);
self->set_all_workers_are_neighbors(false);
}
} else if (wdata.actor_pinning_entity == atom("node")) {
if (node_idx == static_cast<int>(wp_matrix.size()) - 1) {
wdata.xxx(current_pu.get(), "pinning: node; all are neighbors");
self->set_all_workers_are_neighbors(true);
} else {
wdata.xxx(current_pu.get(), "pinning: node; wp_matrix[node_idx]");
self->set_neighbors(wp_matrix[node_idx]);
self->set_all_workers_are_neighbors(false);
}
} else if (wdata.actor_pinning_entity == atom("system")) {
wdata.xxx(current_pu.get(), "pinning: system; all are neigbors");
self->set_all_workers_are_neighbors(true);
} else {
CAF_CRITICAL("config variable actor_pnning_entity with unsopprted value");
}
if (wdata.wws_start_entity == atom("cache")) {
wdata.xxx(current_pu.get(), "wws: cache; start_steal_group_idx = 0");
wdata.start_steal_group_idx = 0;
} else if (wdata.wws_start_entity == atom("node")) {
wdata.xxx(current_pu.get(), "wws: node; start_steal_group_idx = node_idx");
wdata.start_steal_group_idx = node_idx;
} else if (wdata.wws_start_entity == atom("system")) {
wdata.xxx(current_pu.get(), "wws: system; start_steal_group_idx = wp_matrix.size() - 1");
wdata.start_steal_group_idx = wp_matrix.size() - 1;
} else {
CAF_CRITICAL("config variable wws_start_entity with unsopprted value");
}
}
template <class Worker>
resumable* try_steal(Worker* self, size_t& scheduler_lvl_idx,
resumable* try_steal(Worker* self, size_t& steal_group_idx,
size_t& steal_cnt) {
//auto p = self->parent();
auto& wdata = d(self);
auto& cdata = d(self->parent());
size_t num_workers = cdata.workers.size();
if (num_workers < 2) {
auto& wp_matrix = wdata.wp_matrix;
if (wp_matrix.empty()) {
// you can't steal from yourself, can you?
return nullptr;
}
auto& wmatrix = wdata.wp_matrix;
auto& scheduler_lvl = wmatrix[scheduler_lvl_idx];
auto& steal_group = wp_matrix[steal_group_idx];
auto res =
scheduler_lvl[wdata.uniform(wdata.rengine) % scheduler_lvl.size()]
steal_group[wdata.uniform(wdata.rengine) % steal_group.size()]
->data()
.queue.take_tail();
++steal_cnt;
if (steal_cnt >= scheduler_lvl.size()) {
if (steal_cnt >= steal_group.size()) {
steal_cnt = 0;
++scheduler_lvl_idx;
if (scheduler_lvl_idx >= wmatrix.size()) {
scheduler_lvl_idx = wmatrix.size() -1;
++steal_group_idx;
if (steal_group_idx >= wp_matrix.size()) {
steal_group_idx = wp_matrix.size() -1;
}
}
return res;
......@@ -313,7 +490,7 @@ public:
// on and poll every 10 ms; this strategy strives to minimize the
// downside of "busy waiting", which still performs much better than a
// "signalizing" implementation based on mutexes and conition variables
size_t scheduler_lvl_idx = 0;
size_t steal_group_idx = d(self).start_steal_group_idx;
size_t steal_cnt = 0;
auto& strategies = d(self).strategies;
resumable* job = nullptr;
......@@ -324,7 +501,7 @@ public:
return job;
// try to steal every X poll attempts
if ((i % strat.steal_interval) == 0) {
job = try_steal(self, scheduler_lvl_idx, steal_cnt);
job = try_steal(self, steal_group_idx, steal_cnt);
if (job)
return job;
}
......@@ -339,11 +516,11 @@ public:
private:
// -- debug stuff --
friend std::ostream& operator<<(std::ostream& s,
const hwloc_bitmap_wrapper& w);
const bitmap_wrapper_t& w);
};
} // namespace policy
} // namespace caf
#endif // CAF_POLICY_NUMA_AWARE_WORK_STEALING_HPP
#endif // CAF_POLICY_LOCALITY_GUIDED_SCHEDULING_HPP_
......@@ -29,7 +29,7 @@
#include "caf/policy/work_sharing.hpp"
#include "caf/policy/work_stealing.hpp"
#include "caf/policy/numa_aware_work_stealing.hpp"
#include "caf/policy/locality_guided_scheduling.hpp"
#include "caf/scheduler/coordinator.hpp"
#include "caf/scheduler/test_coordinator.hpp"
......@@ -230,7 +230,7 @@ actor_system::actor_system(actor_system_config& cfg)
using test = scheduler::test_coordinator;
using share = scheduler::coordinator<policy::work_sharing>;
using steal = scheduler::coordinator<policy::work_stealing>;
using numa_steal = scheduler::coordinator<policy::numa_aware_work_stealing>;
using weighted_steal = scheduler::coordinator<policy::locality_guided_scheduling>;
using profiled_share = scheduler::profiled_coordinator<policy::profiled<policy::work_sharing>>;
using profiled_steal = scheduler::profiled_coordinator<policy::profiled<policy::work_stealing>>;
// set scheduler only if not explicitly loaded by user
......@@ -239,12 +239,12 @@ actor_system::actor_system(actor_system_config& cfg)
stealing = 0x0001,
sharing = 0x0002,
testing = 0x0003,
numa_stealing = 0x0004,
weighted_stealing = 0x0004,
profiled = 0x0100,
profiled_stealing = 0x0101,
profiled_sharing = 0x0102
};
sched_conf sc = numa_stealing;
sched_conf sc = weighted_stealing;
if (cfg.scheduler_policy == atom("stealing"))
sc = stealing;
......@@ -252,17 +252,17 @@ actor_system::actor_system(actor_system_config& cfg)
sc = sharing;
else if (cfg.scheduler_policy == atom("testing"))
sc = testing;
else if (cfg.scheduler_policy != atom("numa-steal"))
else if (cfg.scheduler_policy != atom("w-stealing"))
std::cerr
<< "[WARNING] " << deep_to_string(cfg.scheduler_policy)
<< " is an unrecognized scheduler pollicy, "
"falling back to 'numa-steal' (i.e. numa aware work-stealing)"
"falling back to 'w-stealing' (i.e. weighted work stealing)"
<< std::endl;
if (cfg.scheduler_enable_profiling)
sc = static_cast<sched_conf>(sc | profiled);
switch (sc) {
default: // any invalid configuration falls back to numa work stealing
sched.reset(new numa_steal(*this));
sched.reset(new weighted_steal(*this));
break;
case sharing:
sched.reset(new share(*this));
......
......@@ -112,7 +112,7 @@ actor_system_config::actor_system_config()
add_message_type_impl<std::vector<atom_value>>("std::vector<@atom>");
add_message_type_impl<std::vector<message>>("std::vector<@message>");
// (1) hard-coded defaults
scheduler_policy = atom("numa-steal");
scheduler_policy = atom("w-stealing");
scheduler_max_threads = std::max(std::thread::hardware_concurrency(),
unsigned{4});
scheduler_max_throughput = std::numeric_limits<size_t>::max();
......@@ -125,7 +125,8 @@ actor_system_config::actor_system_config()
work_stealing_moderate_sleep_duration_us = 50;
work_stealing_relaxed_steal_interval = 1;
work_stealing_relaxed_sleep_duration_us = 10000;
numa_aware_work_stealing_neighborhood_level = 1;
lgs_actor_pinning_entity = atom("node");
lgs_weighted_work_stealing_start_entity = atom("cache");
logger_file_name = "actor_log_[PID]_[TIMESTAMP]_[NODE].log";
logger_file_format = "%r %c %p %a %t %C %M %F:%L %m%n";
logger_console = atom("none");
......@@ -139,18 +140,20 @@ actor_system_config::actor_system_config()
middleman_detach_multiplexer = true;
// fill our options vector for creating INI and CLI parsers
opt_group{options_, "scheduler"}
.add(scheduler_policy, "policy",
"sets the scheduling policy to either 'stealing' (default) or 'sharing'")
.add(scheduler_max_threads, "max-threads",
"sets a fixed number of worker threads for the scheduler")
.add(scheduler_max_throughput, "max-throughput",
"sets the maximum number of messages an actor consumes before yielding")
.add(scheduler_enable_profiling, "enable-profiling",
"enables or disables profiler output")
.add(scheduler_profiling_ms_resolution, "profiling-ms-resolution",
"sets the rate in ms in which the profiler collects data")
.add(scheduler_profiling_output_file, "profiling-output-file",
"sets the output file for the profiler");
.add(scheduler_policy, "policy",
"sets the scheduling policy to either 'w-stealing' (default), "
"'stealing' or 'sharing'")
.add(scheduler_max_threads, "max-threads",
"sets a fixed number of worker threads for the scheduler")
.add(
scheduler_max_throughput, "max-throughput",
"sets the maximum number of messages an actor consumes before yielding")
.add(scheduler_enable_profiling, "enable-profiling",
"enables or disables profiler output")
.add(scheduler_profiling_ms_resolution, "profiling-ms-resolution",
"sets the rate in ms in which the profiler collects data")
.add(scheduler_profiling_output_file, "profiling-output-file",
"sets the output file for the profiler");
opt_group(options_, "work-stealing")
.add(work_stealing_aggressive_poll_attempts, "aggressive-poll-attempts",
"sets the number of zero-sleep-interval polling attempts")
......@@ -166,9 +169,12 @@ actor_system_config::actor_system_config()
"sets the frequency of steal attempts during relaxed polling")
.add(work_stealing_relaxed_sleep_duration_us, "relaxed-sleep-duration",
"sets the sleep interval between poll attempts during relaxed polling");
opt_group{options_, "numa"}
.add(numa_aware_work_stealing_neighborhood_level, "neighborhood-level",
"defines the neighborhood radius (0=all, 1=next smaller group, 2=...)");
opt_group{options_, "lgs"}
.add(lgs_actor_pinning_entity, "actor-pinning-entity",
"defines the actor pinning entity (pu, cache, node, system)")
.add(
lgs_weighted_work_stealing_start_entity, "w-stealing-entity",
"defines the weighted work stealing start entity (cache, node, system)");
opt_group{options_, "logger"}
.add(logger_file_name, "file-name",
"sets the filesystem path of the log file")
......@@ -436,7 +442,7 @@ actor_system_config& actor_system_config::parse(message& args,
atom("asio")
# endif
}, middleman_network_backend, "middleman.network-backend");
verify_atom_opt({atom("stealing"), atom("sharing"), atom("numa-steal")},
verify_atom_opt({atom("stealing"), atom("sharing"), atom("w-stealing")},
scheduler_policy, "scheduler.policy ");
if (res.opts.count("caf#dump-config") != 0u) {
cli_helptext_printed = true;
......
......@@ -17,18 +17,18 @@
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#include "caf/policy/numa_aware_work_stealing.hpp"
#include "caf/policy/locality_guided_scheduling.hpp"
namespace caf {
namespace policy {
numa_aware_work_stealing::~numa_aware_work_stealing() {
locality_guided_scheduling::~locality_guided_scheduling() {
// nop
}
std::ostream&
operator<<(std::ostream& s,
const numa_aware_work_stealing::hwloc_bitmap_wrapper& w) {
const locality_guided_scheduling::bitmap_wrapper_t& w) {
char* tmp = nullptr;
hwloc_bitmap_asprintf(&tmp, w.get());
s << std::string(tmp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment