Commit 6b9b6be5 authored by Sebastian Woelke's avatar Sebastian Woelke

Implement soft actor pinning

parent ec3d4c47
......@@ -264,6 +264,10 @@ public:
size_t work_stealing_relaxed_steal_interval;
size_t work_stealing_relaxed_sleep_duration_us;
// -- config parameters for numa aware work-stealing -------------------------
size_t numa_aware_work_stealing_neighborhood_level;
// -- config parameters for the logger ---------------------------------------
std::string logger_file_name;
......
......@@ -40,7 +40,10 @@ public:
/// Enqueues `ptr` to the job list of the execution unit.
/// @warning Must only be called from a {@link resumable} currently
/// executed by this execution unit.
virtual void exec_later(resumable* ptr) = 0;
virtual void exec_later(resumable* ptr, bool high_prio = true) = 0;
/// Checks if `ptr` has a high memory locality to this execution_unit
virtual bool is_neighbor(execution_unit* ptr) const = 0;
/// Returns the enclosing actor system.
/// @warning Must be set before the execution unit calls `resume` on an actor.
......
......@@ -102,34 +102,35 @@ public:
template <class Worker>
struct worker_data {
using worker_matrix_t = std::vector<std::vector<Worker*>>;
using neighbors_t = std::vector<Worker*>;
using worker_proximity_matrix_t = std::vector<neighbors_t>;
explicit worker_data(scheduler::abstract_coordinator* p)
: rengine(std::random_device{}())
, strategies(get_poll_strategies(p)) {
, strategies(get_poll_strategies(p))
, neighborhood_level(
p->system().config().numa_aware_work_stealing_neighborhood_level) {
// nop
}
worker_matrix_t init_worker_matrix(Worker* self,
const pu_set_t& current_pu_id_set) {
worker_proximity_matrix_t init_worker_proximity_matrix(Worker* self,
const pu_set_t& current_pu_set) {
auto& cdata = d(self->parent());
auto& topo = cdata.topo;
auto current_node_set = hwloc_bitmap_make_wrapper();
hwloc_cpuset_to_nodeset(topo.get(), current_pu_id_set.get(),
hwloc_cpuset_to_nodeset(topo.get(), current_pu_set.get(),
current_node_set.get());
CALL_CAF_CRITICAL(hwloc_bitmap_iszero(current_node_set.get()),
"Current NUMA node_set is unknown");
auto current_node_id = hwloc_bitmap_first(current_node_set.get());
std::map<float, pu_set_t> dist_map;
worker_matrix_t result_matrix;
// Distance matrix of NUMA nodes.
// It is possible to request the distance matrix on PU level,
// which would be a better match for our usecase
// but on all tested hardware it returned a nullptr, maybe future
// work?
worker_proximity_matrix_t result_matrix;
// Distance matrix for NUMA nodes. It is possible to request the distance
// matrix on PU level, which would be a better match for our usecase but
// on all tested hardware it returned a nullptr. Future work?
auto distance_matrix =
hwloc_get_whole_distance_matrix_by_type(topo.get(), HWLOC_OBJ_NUMANODE);
// if NUMA distance matrix is not available it is assumed that all PUs
// If NUMA distance matrix is not available it is assumed that all PUs
// have the same distance
if (!distance_matrix || !distance_matrix->latency) {
auto allowed_const_pus = hwloc_topology_get_allowed_cpuset(topo.get());
......@@ -145,7 +146,8 @@ public:
* static_cast<unsigned int>(current_node_id)];
// iterate over all NUMA nodes and classify them in distance levels
// regarding to the current NUMA node
for (node_id_t x = 0; static_cast<unsigned int>(x) < num_of_dist_objs; ++x) {
for (node_id_t x = 0; static_cast<unsigned int>(x) < num_of_dist_objs;
++x) {
node_set_t tmp_node_set = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(tmp_node_set.get(), static_cast<unsigned int>(x));
auto tmp_pu_set = hwloc_bitmap_make_wrapper();
......@@ -154,7 +156,7 @@ public:
// you cannot steal from yourself
if (x == current_node_id) {
hwloc_bitmap_andnot(tmp_pu_set.get(), tmp_pu_set.get(),
current_pu_id_set.get());
current_pu_set.get());
}
auto dist_it = dist_map.find(dist_pointer[x]);
if (dist_it == dist_map.end())
......@@ -175,14 +177,23 @@ public:
for (pu_id_t pu_id = hwloc_bitmap_first(pu_set); pu_id != -1;
pu_id = hwloc_bitmap_next(pu_set, pu_id)) {
auto worker_id_it = cdata.worker_id_map.find(pu_id);
// if worker id is not found less worker than available PUs
// if worker id is not found less worker than available PUs.
// have been started
if (worker_id_it != cdata.worker_id_map.end())
current_lvl.emplace_back(worker_id_it->second);
}
// current_lvl can be empty if all pus of NUMA node are deactivated
if (!current_lvl.empty()) {
result_matrix.emplace_back(std::move(current_lvl));
// The number of workers in current_lvl must be larger then in the
// previous lvl (if exist).
// If it is smaller something is wrong (should not be possible).
// If they have the same size, its the same lvl (possible when lvls
// are created from different sources)
if (result_matrix.empty()
|| current_lvl.size()
> result_matrix[result_matrix.size() - 1].size()) {
result_matrix.emplace_back(std::move(current_lvl));
}
}
}
//accumulate scheduler_lvls - each lvl contains all lower lvls
......@@ -190,20 +201,22 @@ public:
for (auto current_lvl_it = result_matrix.begin();
current_lvl_it != result_matrix.end(); ++current_lvl_it) {
if (current_lvl_it != result_matrix.begin()) {
std::copy(last_lvl_it->begin(), last_lvl_it->end(), std::back_inserter(*current_lvl_it)) ;
std::copy(last_lvl_it->begin(), last_lvl_it->end(),
std::back_inserter(*current_lvl_it));
++last_lvl_it;
}
}
return result_matrix;
}
// This queue is exposed to other workers that may attempt to steal jobs
// from it and the central scheduling unit can push new jobs to the queue.
queue_type queue;
worker_matrix_t worker_matrix;
worker_proximity_matrix_t wp_matrix;
std::default_random_engine rengine;
std::uniform_int_distribution<size_t> uniform;
std::vector<poll_strategy> strategies;
size_t neighborhood_level;
};
/// Create x workers.
......@@ -243,12 +256,24 @@ public:
void init_worker_thread(Worker* self) {
auto& wdata = d(self);
auto& cdata = d(self->parent());
auto pu_set = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(pu_set.get(), static_cast<unsigned int>(self->id()));
auto res = hwloc_set_cpubind(cdata.topo.get(), pu_set.get(),
auto current_pu_set = hwloc_bitmap_make_wrapper();
hwloc_bitmap_set(current_pu_set.get(),
static_cast<unsigned int>(self->id()));
auto res = hwloc_set_cpubind(cdata.topo.get(), current_pu_set.get(),
HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_NOMEMBIND);
CALL_CAF_CRITICAL(res == -1, "hwloc_set_cpubind() failed");
wdata.worker_matrix = wdata.init_worker_matrix(self, pu_set);
wdata.wp_matrix = wdata.init_worker_proximity_matrix(self, current_pu_set);
auto wm_max_idx = wdata.wp_matrix.size() - 1;
if (wdata.neighborhood_level == 0) {
self->set_all_workers_are_neighbors(true);
} else if (wdata.neighborhood_level <= wm_max_idx) {
self->set_neighbors(
wdata.wp_matrix[wm_max_idx - wdata.neighborhood_level]);
self->set_all_workers_are_neighbors(false);
} else { //neighborhood_level > wm_max_idx
self->set_all_workers_are_neighbors(false);
}
}
template <class Worker>
......@@ -262,7 +287,7 @@ public:
// you can't steal from yourself, can you?
return nullptr;
}
auto& wmatrix = wdata.worker_matrix;
auto& wmatrix = wdata.wp_matrix;
auto& scheduler_lvl = wmatrix[scheduler_lvl_idx];
auto res =
scheduler_lvl[wdata.uniform(wdata.rengine) % scheduler_lvl.size()]
......@@ -313,7 +338,8 @@ public:
}
private:
// -- debug stuff --
friend std::ostream& operator <<(std::ostream& s, const hwloc_bitmap_wrapper& w);
friend std::ostream& operator<<(std::ostream& s,
const hwloc_bitmap_wrapper& w);
};
......
......@@ -116,8 +116,8 @@ public:
/// Initalize worker thread.
template <class Worker>
void init_worker_thread(Worker*) {
// nop
void init_worker_thread(Worker* self) {
self->set_all_workers_are_neighbors(true);
}
// Goes on a raid in quest for a shiny new job.
......
......@@ -877,6 +877,9 @@ protected:
exception_handler exception_handler_;
# endif // CAF_NO_EXCEPTIONS
/// Stores the home execution unit.
execution_unit* home_eu_;
/// @endcond
};
......
......@@ -43,11 +43,12 @@ public:
using policy_data = typename Policy::template worker_data<worker<Policy>>;
worker(size_t worker_id, coordinator_ptr worker_parent, size_t throughput)
: execution_unit(&worker_parent->system()),
max_throughput_(throughput),
id_(worker_id),
parent_(worker_parent),
data_(worker_parent) {
: execution_unit(&worker_parent->system())
, max_throughput_(throughput)
, id_(worker_id)
, parent_(worker_parent)
, all_workers_are_neighbors_(true)
, data_(worker_parent) {
// nop
}
......@@ -72,9 +73,13 @@ public:
/// Enqueues a new job to the worker's queue from an internal
/// source, i.e., a job that is currently executed by this worker.
/// @warning Must not be called from other threads.
void exec_later(job_ptr job) override {
void exec_later(job_ptr job, bool high_prio = true) override {
CAF_ASSERT(job != nullptr);
policy_.internal_enqueue(this, job);
CAF_LOG_TRACE(CAF_ARG(id()) << CAF_ARG(id_of(job)));
if (high_prio)
policy_.internal_enqueue(this, job);
else
policy_.external_enqueue(this, job);
}
coordinator_ptr parent() {
......@@ -103,6 +108,25 @@ public:
return max_throughput_;
}
bool is_neighbor(execution_unit* ptr) const {
if (all_workers_are_neighbors_)
return true;
for (auto e: neighbors_) {
if (ptr == e) {
return true;
}
}
return false;
}
void set_neighbors(const std::vector<worker*>& n) {
neighbors_= n;
}
void set_all_workers_are_neighbors(bool x) {
all_workers_are_neighbors_ = x;
}
private:
void run() {
CAF_SET_LOGGER_SYS(&system());
......@@ -148,6 +172,9 @@ private:
size_t id_;
// pointer to central coordinator
coordinator_ptr parent_;
// other workers with a high memory locality
std::vector<worker*> neighbors_;
bool all_workers_are_neighbors_;
// policy-specific data
policy_data data_;
// instance of our policy object
......
......@@ -32,7 +32,10 @@ public:
explicit scoped_execution_unit(actor_system* sys = nullptr);
/// Delegates the resumable to the scheduler of `system()`.
void exec_later(resumable* ptr) override;
void exec_later(resumable* ptr, bool high_prio = true) override;
/// It is assumed that `this` is never in the neighborhood of `ptr`.
bool is_neighbor(execution_unit* ptr) const override;
};
} // namespace caf
......
......@@ -362,9 +362,12 @@ void abstract_coordinator::cleanup_and_release(resumable* ptr) {
dummy_unit(local_actor* job) : execution_unit(&job->home_system()) {
// nop
}
void exec_later(resumable* job) override {
void exec_later(resumable* job, bool) override {
resumables.push_back(job);
}
bool is_neighbor(execution_unit*) const override {
return false;
}
std::vector<resumable*> resumables;
};
switch (ptr->subtype()) {
......
......@@ -192,10 +192,16 @@ public:
// nop
}
void exec_later(resumable*) override {
void exec_later(resumable*, bool) override {
// should not happen in the first place
CAF_LOG_ERROR("actor registry actor called exec_later during shutdown");
}
bool is_neighbor(execution_unit*) const override {
// should not happen in the first place
CAF_LOG_ERROR("actor registry actor called is_neighbor during shutdown");
return false;
}
};
} // namespace <anonymous>
......
......@@ -125,6 +125,7 @@ actor_system_config::actor_system_config()
work_stealing_moderate_sleep_duration_us = 50;
work_stealing_relaxed_steal_interval = 1;
work_stealing_relaxed_sleep_duration_us = 10000;
numa_aware_work_stealing_neighborhood_level = 1;
logger_file_name = "actor_log_[PID]_[TIMESTAMP]_[NODE].log";
logger_file_format = "%r %c %p %a %t %C %M %F:%L %m%n";
logger_console = atom("none");
......@@ -165,6 +166,9 @@ actor_system_config::actor_system_config()
"sets the frequency of steal attempts during relaxed polling")
.add(work_stealing_relaxed_sleep_duration_us, "relaxed-sleep-duration",
"sets the sleep interval between poll attempts during relaxed polling");
opt_group{options_, "numa"}
.add(numa_aware_work_stealing_neighborhood_level, "neighborhood-level",
"defines the neighborhood radius (0=all, 1=next smaller group, 2=...)");
opt_group{options_, "logger"}
.add(logger_file_name, "file-name",
"sets the filesystem path of the log file")
......
......@@ -26,7 +26,9 @@ numa_aware_work_stealing::~numa_aware_work_stealing() {
// nop
}
std::ostream& operator<<(std::ostream& s, const numa_aware_work_stealing::hwloc_bitmap_wrapper& w) {
std::ostream&
operator<<(std::ostream& s,
const numa_aware_work_stealing::hwloc_bitmap_wrapper& w) {
char* tmp = nullptr;
hwloc_bitmap_asprintf(&tmp, w.get());
s << std::string(tmp);
......
......@@ -96,16 +96,17 @@ error scheduled_actor::default_exception_handler(pointer ptr,
// -- constructors and destructors ---------------------------------------------
scheduled_actor::scheduled_actor(actor_config& cfg)
: local_actor(cfg),
timeout_id_(0),
default_handler_(print_and_drop),
error_handler_(default_error_handler),
down_handler_(default_down_handler),
exit_handler_(default_exit_handler),
private_thread_(nullptr)
: local_actor(cfg)
, timeout_id_(0)
, default_handler_(print_and_drop)
, error_handler_(default_error_handler)
, down_handler_(default_down_handler)
, exit_handler_(default_exit_handler)
, private_thread_(nullptr)
# ifndef CAF_NO_EXCEPTIONS
, exception_handler_(default_exception_handler)
, exception_handler_(default_exception_handler)
# endif // CAF_NO_EXCEPTIONS
, home_eu_(cfg.host)
{
// nop
}
......@@ -135,10 +136,18 @@ void scheduled_actor::enqueue(mailbox_element_ptr ptr, execution_unit* eu) {
CAF_ASSERT(private_thread_ != nullptr);
private_thread_->resume();
} else {
if (eu != nullptr)
eu->exec_later(this);
else
home_system().scheduler().enqueue(this);
if (eu) {
// msg is received from an other scheduled actor
if (eu == home_eu_ || eu->is_neighbor(home_eu_)) {
eu->exec_later(this, true); // internal enqueue
} else {
// `eu` has a high memory distance to this actor
home_eu_->exec_later(this, false); // external enqueued
}
} else {
// msg is received from non-actor or context or from a detached actor
home_eu_->exec_later(this, false); // external enqueue
}
}
break;
}
......@@ -564,6 +573,15 @@ bool scheduled_actor::activate(execution_unit* ctx) {
if (finalize()) {
CAF_LOG_DEBUG("actor_done() returned true right after make_behavior()");
return false;
} else {
CAF_LOG_DEBUG("initialized actor:" << CAF_ARG(name()) << CAF_ARG(ctx));
if (home_eu_ != ctx) {
if (home_eu_ == system().dummy_execution_unit()) {
home_eu_ = ctx;
} else if (home_eu_->is_neighbor(ctx)) {
home_eu_ = ctx;
}
}
}
CAF_LOG_DEBUG("initialized actor:" << CAF_ARG(name()));
}
......
......@@ -29,8 +29,12 @@ scoped_execution_unit::scoped_execution_unit(actor_system* sys)
// nop
}
void scoped_execution_unit::exec_later(resumable* ptr) {
void scoped_execution_unit::exec_later(resumable* ptr, bool) {
system().scheduler().enqueue(ptr);
}
bool scoped_execution_unit::is_neighbor(execution_unit*) const {
return false;
}
} // namespace caf
......@@ -37,10 +37,14 @@ public:
// nop
}
void exec_later(resumable* ptr) override {
void exec_later(resumable* ptr, bool) override {
parent_->jobs.push_back(ptr);
}
bool is_neighbor(execution_unit*) const override {
return false;
}
private:
test_coordinator* parent_;
};
......
......@@ -321,7 +321,9 @@ public:
expected<doorman_ptr> new_tcp_doorman(uint16_t port, const char* in,
bool reuse_addr) override;
void exec_later(resumable* ptr) override;
void exec_later(resumable* ptr, bool high_prio = true) override;
bool is_neighbor(execution_unit*) const override;
explicit default_multiplexer(actor_system* sys);
......
......@@ -188,7 +188,8 @@ public:
}
protected:
void exec_later(resumable* ptr) override;
void exec_later(resumable* ptr, bool high_prio = true) override;
bool is_neighbor(execution_unit*) const override;
private:
using resumable_ptr = intrusive_ptr<resumable>;
......
......@@ -817,7 +817,7 @@ default_multiplexer::~default_multiplexer() {
# endif
}
void default_multiplexer::exec_later(resumable* ptr) {
void default_multiplexer::exec_later(resumable* ptr, bool) {
CAF_ASSERT(ptr);
switch (ptr->subtype()) {
case resumable::io_actor:
......@@ -829,6 +829,10 @@ void default_multiplexer::exec_later(resumable* ptr) {
}
}
bool default_multiplexer::is_neighbor(execution_unit*) const {
return false;
}
scribe_ptr default_multiplexer::new_scribe(native_socket fd) {
CAF_LOG_TRACE("");
return make_counted<scribe_impl>(*this, fd);
......
......@@ -612,7 +612,7 @@ void test_multiplexer::flush_runnables() {
} while (!runnables.empty());
}
void test_multiplexer::exec_later(resumable* ptr) {
void test_multiplexer::exec_later(resumable* ptr, bool) {
CAF_ASSERT(ptr != nullptr);
CAF_LOG_TRACE("");
switch (ptr->subtype()) {
......@@ -642,6 +642,10 @@ void test_multiplexer::exec_later(resumable* ptr) {
}
}
bool test_multiplexer::is_neighbor(execution_unit*) const {
return false;
}
void test_multiplexer::exec(resumable_ptr& ptr) {
CAF_ASSERT(std::this_thread::get_id() == tid_);
CAF_ASSERT(ptr != nullptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment