Commit a0a23971 authored by Dominik Charousset's avatar Dominik Charousset

Merge branch 'topic/neverlord/spinlocks'

parents 58b53ab9 a63af3ea
......@@ -132,7 +132,6 @@ caf_add_component(
src/detail/ripemd_160.cpp
src/detail/serialized_size.cpp
src/detail/set_thread_name.cpp
src/detail/shared_spinlock.cpp
src/detail/size_based_credit_controller.cpp
src/detail/stringification_inspector.cpp
src/detail/sync_request_bouncer.cpp
......
......@@ -4,11 +4,7 @@
#pragma once
#include <functional>
#include <memory>
#include "caf/detail/core_export.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/extend.hpp"
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp"
......@@ -17,6 +13,10 @@
#include "caf/mixin/subscriber.hpp"
#include "caf/scheduled_actor.hpp"
#include <functional>
#include <memory>
#include <shared_mutex>
namespace caf {
template <>
......@@ -45,9 +45,6 @@ public:
/// Required by `spawn` for type deduction.
using behavior_type = behavior;
/// A shared lockable.
using lock_type = detail::shared_spinlock;
/// Delegates incoming messages to user-defined event loop.
using enqueue_handler = std::function<void(mailbox_element_ptr)>;
......@@ -92,7 +89,7 @@ private:
on_exit_handler on_exit_;
// guards access to handler_
lock_type lock_;
std::shared_mutex lock_;
};
} // namespace caf
......@@ -4,18 +4,17 @@
#pragma once
#include <functional>
#include <vector>
#include "caf/actor.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/detail/split_join.hpp"
#include "caf/execution_unit.hpp"
#include "caf/locks.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/monitorable_actor.hpp"
#include <functional>
#include <mutex>
#include <vector>
namespace caf {
/// An actor poool is a lightweight abstraction for a set of workers.
......@@ -42,10 +41,11 @@ namespace caf {
/// @experimental
class CAF_CORE_EXPORT actor_pool : public monitorable_actor {
public:
using uplock = upgrade_lock<detail::shared_spinlock>;
using actor_vec = std::vector<actor>;
using factory = std::function<actor()>;
using policy = std::function<void(actor_system&, uplock&, const actor_vec&,
using guard_type = std::unique_lock<std::mutex>;
using policy
= std::function<void(actor_system&, guard_type&, const actor_vec&,
mailbox_element_ptr&, execution_unit*)>;
/// Returns a simple round robin dispatching policy.
......@@ -99,14 +99,13 @@ protected:
void on_cleanup(const error& reason) override;
private:
bool filter(upgrade_lock<detail::shared_spinlock>&,
const strong_actor_ptr& sender, message_id mid, message& msg,
execution_unit* eu);
bool filter(guard_type&, const strong_actor_ptr& sender, message_id mid,
message& msg, execution_unit* eu);
// call without workers_mtx_ held
void quit(execution_unit* host);
detail::shared_spinlock workers_mtx_;
std::mutex workers_mtx_;
std::vector<actor> workers_;
policy policy_;
exit_reason planned_reason_;
......
......@@ -9,7 +9,6 @@
#include "caf/abstract_actor.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/monitorable_actor.hpp"
namespace caf {
......
......@@ -4,23 +4,23 @@
#pragma once
#include <atomic>
#include <condition_variable>
#include <cstdint>
#include <mutex>
#include <string>
#include <thread>
#include <unordered_map>
#include "caf/abstract_actor.hpp"
#include "caf/actor.hpp"
#include "caf/actor_cast.hpp"
#include "caf/actor_control_block.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/fwd.hpp"
#include "caf/telemetry/int_gauge.hpp"
#include <atomic>
#include <condition_variable>
#include <cstdint>
#include <mutex>
#include <shared_mutex>
#include <string>
#include <thread>
#include <unordered_map>
namespace caf {
/// A registry is used to associate actors to IDs or names. This allows a
......@@ -113,11 +113,11 @@ private:
mutable std::mutex running_mtx_;
mutable std::condition_variable running_cv_;
mutable detail::shared_spinlock instances_mtx_;
mutable std::shared_mutex instances_mtx_;
entries entries_;
name_map named_entries_;
mutable detail::shared_spinlock named_entries_mtx_;
mutable std::shared_mutex named_entries_mtx_;
actor_system& system_;
};
......
......@@ -6,55 +6,17 @@
#include "caf/config.hpp"
#include <atomic>
#include <cassert>
#include <chrono>
#include <condition_variable>
#include <list>
#include <mutex>
#include <thread>
// GCC hack
#if defined(CAF_GCC) && !defined(_GLIBCXX_USE_SCHED_YIELD)
# include <time.h>
namespace std {
namespace this_thread {
namespace {
inline void yield() noexcept {
timespec req;
req.tv_sec = 0;
req.tv_nsec = 1;
nanosleep(&req, nullptr);
}
} // namespace
} // namespace this_thread
} // namespace std
#endif
// another GCC hack
#if defined(CAF_GCC) && !defined(_GLIBCXX_USE_NANOSLEEP)
# include <time.h>
namespace std {
namespace this_thread {
namespace {
template <class Rep, typename Period>
inline void sleep_for(const chrono::duration<Rep, Period>& rt) {
auto sec = chrono::duration_cast<chrono::seconds>(rt);
auto nsec = chrono::duration_cast<chrono::nanoseconds>(rt - sec);
timespec req;
req.tv_sec = sec.count();
req.tv_nsec = nsec.count();
nanosleep(&req, nullptr);
}
} // namespace
} // namespace this_thread
} // namespace std
#endif
namespace caf::detail {
/*
* A thread-safe double-ended queue based on http://drdobbs.com/cpp/211601363.
* This implementation is optimized for FIFO, i.e., it supports fast insertion
* at the end and fast removal from the beginning. As long as the queue is
* only used for FIFO operations, readers do not block writers and vice versa.
* A thread-safe, double-ended queue for work-stealing.
*/
template <class T>
class double_ended_queue {
......@@ -67,163 +29,82 @@ public:
using pointer = value_type*;
using const_pointer = const value_type*;
class node {
public:
pointer value;
std::atomic<node*> next;
explicit node(pointer val) : value(val), next(nullptr) {
// nop
}
private:
static constexpr size_type payload_size
= sizeof(pointer) + sizeof(std::atomic<node*>);
static constexpr size_type cline_size = CAF_CACHE_LINE_SIZE;
static constexpr size_type pad_size
= (cline_size * ((payload_size / cline_size) + 1)) - payload_size;
// avoid false sharing
static_assert(pad_size > 0, "invalid padding size calculated");
char pad[pad_size];
};
using unique_node_ptr = std::unique_ptr<node>;
static_assert(sizeof(node*) < CAF_CACHE_LINE_SIZE,
"sizeof(node*) >= CAF_CACHE_LINE_SIZE");
double_ended_queue() {
head_lock_.clear();
tail_lock_.clear();
auto ptr = new node(nullptr);
head_ = ptr;
tail_ = ptr;
}
~double_ended_queue() {
auto ptr = head_.load();
while (ptr) {
unique_node_ptr tmp{ptr};
ptr = tmp->next.load();
}
}
// -- for the owner ----------------------------------------------------------
// acquires only one lock
void append(pointer value) {
void prepend(pointer value) {
CAF_ASSERT(value != nullptr);
auto* tmp = new node(value);
lock_guard guard(tail_lock_);
// publish & swing last forward
tail_.load()->next = tmp;
tail_ = tmp;
std::unique_lock guard{mtx_};
items_.push_front(value);
}
// acquires both locks
void prepend(pointer value) {
CAF_ASSERT(value != nullptr);
auto* tmp = new node(value);
node* first = nullptr;
// acquire both locks since we might touch last_ too
lock_guard guard1(head_lock_);
lock_guard guard2(tail_lock_);
first = head_.load();
CAF_ASSERT(first != nullptr);
auto next = first->next.load();
// first_ always points to a dummy with no value,
// hence we put the new element second
if (next) {
CAF_ASSERT(first != tail_);
tmp->next = next;
} else {
// queue is empty
CAF_ASSERT(first == tail_);
tail_ = tmp;
pointer try_take_head() {
std::unique_lock guard{mtx_};
if (!items_.empty()) {
auto* result = items_.front();
items_.pop_front();
return result;
}
first->next = tmp;
return nullptr;
}
// acquires only one lock, returns nullptr on failure
pointer take_head() {
unique_node_ptr first;
pointer result = nullptr;
{ // lifetime scope of guard
lock_guard guard(head_lock_);
first.reset(head_.load());
node* next = first->next;
if (next == nullptr) {
// queue is empty
first.release();
template <class Duration>
pointer try_take_head(Duration rel_timeout) {
auto abs_timeout = std::chrono::system_clock::now() + rel_timeout;
std::unique_lock guard{mtx_};
while (items_.empty()) {
if (cv_.wait_until(guard, abs_timeout) == std::cv_status::timeout) {
return nullptr;
}
// take it out of the node & swing first forward
result = next->value;
next->value = nullptr;
head_ = next;
}
auto* result = items_.front();
items_.pop_front();
return result;
}
// acquires both locks, returns nullptr on failure
pointer take_tail() {
pointer result = nullptr;
unique_node_ptr last;
{ // lifetime scope of guards
lock_guard guard1(head_lock_);
lock_guard guard2(tail_lock_);
CAF_ASSERT(head_ != nullptr);
last.reset(tail_.load());
if (last.get() == head_.load()) {
last.release();
return nullptr;
}
result = last->value;
tail_ = find_predecessor(last.get());
CAF_ASSERT(tail_ != nullptr);
tail_.load()->next = nullptr;
pointer take_head() {
std::unique_lock guard{mtx_};
while (items_.empty()) {
cv_.wait(guard);
}
auto* result = items_.front();
items_.pop_front();
return result;
}
// does not lock
bool empty() const {
// atomically compares first and last pointer without locks
return head_.load() == tail_.load();
// Unsafe, since it does not wake up a currently sleeping worker.
void unsafe_append(pointer value) {
std::unique_lock guard{mtx_};
items_.push_back(value);
}
private:
// precondition: *both* locks acquired
node* find_predecessor(node* what) {
for (auto i = head_.load(); i != nullptr; i = i->next) {
if (i->next == what) {
return i;
// -- for others -------------------------------------------------------------
void append(pointer value) {
bool do_notify = false;
{
std::unique_lock guard{mtx_};
do_notify = items_.empty();
items_.push_back(value);
}
if (do_notify) {
cv_.notify_one();
}
return nullptr;
}
// guarded by head_lock_
std::atomic<node*> head_;
char pad1_[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// guarded by tail_lock_
std::atomic<node*> tail_;
char pad2_[CAF_CACHE_LINE_SIZE - sizeof(node*)];
// enforce exclusive access
std::atomic_flag head_lock_;
std::atomic_flag tail_lock_;
class lock_guard {
public:
explicit lock_guard(std::atomic_flag& lock) : lock_(lock) {
while (lock.test_and_set(std::memory_order_acquire)) {
std::this_thread::yield();
}
pointer try_take_tail() {
std::unique_lock guard{mtx_};
if (!items_.empty()) {
auto* result = items_.back();
items_.pop_back();
return result;
}
~lock_guard() {
lock_.clear(std::memory_order_release);
return nullptr;
}
private:
std::atomic_flag& lock_;
};
private:
std::mutex mtx_;
std::condition_variable cv_;
std::list<pointer> items_;
};
} // namespace caf::detail
// This file is part of CAF, the C++ Actor Framework. See the file LICENSE in
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#pragma once
#include <atomic>
#include <cstddef>
#include "caf/detail/core_export.hpp"
namespace caf::detail {
/// A spinlock implementation providing shared and exclusive locking.
class CAF_CORE_EXPORT shared_spinlock {
public:
shared_spinlock();
void lock();
void unlock();
bool try_lock();
void lock_shared();
void unlock_shared();
bool try_lock_shared();
void lock_upgrade();
void unlock_upgrade();
void unlock_upgrade_and_lock();
void unlock_and_lock_upgrade();
private:
std::atomic<long> flag_;
};
} // namespace caf::detail
......@@ -9,9 +9,6 @@
#include "caf/actor.hpp"
#include "caf/actor_system.hpp"
#include "caf/event_based_actor.hpp"
#include "caf/locks.hpp"
#include "caf/detail/shared_spinlock.hpp"
namespace caf::detail {
......@@ -78,8 +75,7 @@ public:
// nop
}
void
operator()(actor_system& sys, upgrade_lock<detail::shared_spinlock>& ulock,
void operator()(actor_system& sys, std::unique_lock<std::shared_mutex>& ulock,
const std::vector<actor>& workers, mailbox_element_ptr& ptr,
execution_unit* host) {
if (!ptr->sender)
......
......@@ -7,7 +7,8 @@
#include "caf/actor.hpp"
#include "caf/actor_proxy.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include <shared_mutex>
namespace caf {
......@@ -32,7 +33,7 @@ private:
bool forward_msg(strong_actor_ptr sender, message_id mid, message msg,
const forwarding_stack* fwd = nullptr);
mutable detail::shared_spinlock broker_mtx_;
mutable std::shared_mutex broker_mtx_;
actor broker_;
};
......
// This file is part of CAF, the C++ Actor Framework. See the file LICENSE in
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#pragma once
#include <mutex>
namespace caf {
template <class Lockable>
using unique_lock = std::unique_lock<Lockable>;
template <class SharedLockable>
class shared_lock {
public:
using lockable = SharedLockable;
explicit shared_lock(lockable& arg) : lockable_(&arg) {
lockable_->lock_shared();
}
~shared_lock() {
unlock();
}
bool owns_lock() const {
return lockable_ != nullptr;
}
void unlock() {
if (lockable_) {
lockable_->unlock_shared();
lockable_ = nullptr;
}
}
lockable* release() {
auto result = lockable_;
lockable_ = nullptr;
return result;
}
private:
lockable* lockable_;
};
template <class SharedLockable>
using upgrade_lock = shared_lock<SharedLockable>;
template <class UpgradeLockable>
class upgrade_to_unique_lock {
public:
using lockable = UpgradeLockable;
template <class LockType>
explicit upgrade_to_unique_lock(LockType& other) {
lockable_ = other.release();
if (lockable_)
lockable_->unlock_upgrade_and_lock();
}
~upgrade_to_unique_lock() {
unlock();
}
bool owns_lock() const {
return lockable_ != nullptr;
}
void unlock() {
if (lockable_) {
lockable_->unlock();
lockable_ = nullptr;
}
}
private:
lockable* lockable_;
};
} // namespace caf
......@@ -22,7 +22,6 @@
#include "caf/detail/pretty_type_name.hpp"
#include "caf/detail/ringbuffer.hpp"
#include "caf/detail/scope_guard.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/fwd.hpp"
#include "caf/intrusive/drr_queue.hpp"
#include "caf/intrusive/fifo_inbox.hpp"
......
......@@ -39,13 +39,6 @@ public:
timespan sleep_duration;
};
// what is needed to implement the waiting strategy.
struct wait_strategy {
std::mutex lock;
std::condition_variable cv;
bool sleeping{false};
};
// The coordinator has only a counter for round-robin enqueue to its workers.
struct coordinator_data {
explicit coordinator_data(scheduler::abstract_coordinator*)
......@@ -68,7 +61,6 @@ public:
std::default_random_engine rengine;
std::uniform_int_distribution<size_t> uniform;
std::array<poll_strategy, 3> strategies;
wait_strategy waitdata;
};
// Goes on a raid in quest for a shiny new job.
......@@ -84,7 +76,7 @@ public:
if (victim == self->id())
victim = p->num_workers() - 1;
// steal oldest element from the victim's queue
return d(p->worker_by_id(victim)).queue.take_tail();
return d(p->worker_by_id(victim)).queue.try_take_tail();
}
template <class Coordinator>
......@@ -96,14 +88,6 @@ public:
template <class Worker>
void external_enqueue(Worker* self, resumable* job) {
d(self).queue.append(job);
auto& lock = d(self).waitdata.lock;
auto& cv = d(self).waitdata.cv;
{ // guard scope
std::unique_lock<std::mutex> guard(lock);
// check if the worker is sleeping
if (d(self).waitdata.sleeping && !d(self).queue.empty())
cv.notify_one();
}
}
template <class Worker>
......@@ -115,7 +99,7 @@ public:
void resume_job_later(Worker* self, resumable* job) {
// job has voluntarily released the CPU to let others run instead
// this means we are going to put this job to the very end of our queue
d(self).queue.append(job);
d(self).queue.unsafe_append(job);
}
template <class Worker>
......@@ -125,67 +109,37 @@ public:
// polling, then we relax our polling a bit and wait 50 us between
// dequeue attempts
auto& strategies = d(self).strategies;
resumable* job = nullptr;
auto* job = d(self).queue.try_take_head();
if (job)
return job;
for (size_t k = 0; k < 2; ++k) { // iterate over the first two strategies
for (size_t i = 0; i < strategies[k].attempts;
i += strategies[k].step_size) {
job = d(self).queue.take_head();
if (job)
return job;
// try to steal every X poll attempts
if ((i % strategies[k].steal_interval) == 0) {
job = try_steal(self);
if (job)
return job;
}
if (strategies[k].sleep_duration.count() > 0) {
#ifdef CAF_MSVC
// Windows cannot sleep less than 1000 us, so timeout is converted to
// 0 inside sleep_for(), but Sleep(0) is dangerous so replace it with
// yield()
if (strategies[k].sleep_duration.count() < 1000)
std::this_thread::yield();
else
std::this_thread::sleep_for(strategies[k].sleep_duration);
#else
std::this_thread::sleep_for(strategies[k].sleep_duration);
#endif
}
// wait for some work to appear
job = d(self).queue.try_take_head(strategies[k].sleep_duration);
if (job)
return job;
}
}
// we assume pretty much nothing is going on so we can relax polling
// and falling to sleep on a condition variable whose timeout is the one
// of the relaxed polling strategy
auto& relaxed = strategies[2];
auto& sleeping = d(self).waitdata.sleeping;
auto& lock = d(self).waitdata.lock;
auto& cv = d(self).waitdata.cv;
bool notimeout = true;
size_t i = 1;
do {
{ // guard scope
std::unique_lock<std::mutex> guard(lock);
sleeping = true;
if (!cv.wait_for(guard, relaxed.sleep_duration,
[&] { return !d(self).queue.empty(); }))
notimeout = false;
sleeping = false;
}
if (notimeout) {
job = d(self).queue.take_head();
} else {
notimeout = true;
if ((i % relaxed.steal_interval) == 0)
job = try_steal(self);
}
++i;
job = d(self).queue.try_take_head(relaxed.sleep_duration);
} while (job == nullptr);
return job;
}
template <class Worker, class UnaryFunction>
void foreach_resumable(Worker* self, UnaryFunction f) {
auto next = [&] { return d(self).queue.take_head(); };
auto next = [&] { return d(self).queue.try_take_head(); };
for (auto job = next(); job != nullptr; job = next()) {
f(job);
}
......
......@@ -17,7 +17,6 @@
#include "caf/actor_system.hpp"
#include "caf/config.hpp"
#include "caf/default_attachable.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/execution_unit.hpp"
#include "caf/logger.hpp"
#include "caf/mailbox_element.hpp"
......
......@@ -5,7 +5,6 @@
#include "caf/abstract_group.hpp"
#include "caf/actor_cast.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/group.hpp"
#include "caf/group_manager.hpp"
#include "caf/group_module.hpp"
......
......@@ -2,7 +2,6 @@
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#include "caf/locks.hpp"
#include "caf/actor_companion.hpp"
namespace caf {
......@@ -16,7 +15,7 @@ actor_companion::~actor_companion() {
}
void actor_companion::on_enqueue(enqueue_handler handler) {
std::lock_guard<lock_type> guard(lock_);
std::lock_guard guard{lock_};
on_enqueue_ = std::move(handler);
}
......@@ -26,7 +25,7 @@ void actor_companion::on_exit(on_exit_handler handler) {
bool actor_companion::enqueue(mailbox_element_ptr ptr, execution_unit*) {
CAF_ASSERT(ptr);
shared_lock<lock_type> guard(lock_);
std::shared_lock guard{lock_};
if (on_enqueue_) {
on_enqueue_(std::move(ptr));
return true;
......@@ -49,7 +48,7 @@ void actor_companion::launch(execution_unit*, bool, bool hide) {
void actor_companion::on_exit() {
enqueue_handler tmp;
{ // lifetime scope of guard
std::lock_guard<lock_type> guard(lock_);
std::unique_lock guard(lock_);
on_enqueue_.swap(tmp);
}
if (on_exit_)
......
......@@ -7,8 +7,8 @@
#include <atomic>
#include <random>
#include "caf/send.hpp"
#include "caf/default_attachable.hpp"
#include "caf/send.hpp"
#include "caf/detail/sync_request_bouncer.hpp"
......@@ -22,7 +22,7 @@ actor_pool::policy actor_pool::round_robin() {
impl(const impl&) : pos_(0) {
// nop
}
void operator()(actor_system&, uplock& guard, const actor_vec& vec,
void operator()(actor_system&, guard_type& guard, const actor_vec& vec,
mailbox_element_ptr& ptr, execution_unit* host) {
CAF_ASSERT(!vec.empty());
actor selected = vec[pos_++ % vec.size()];
......@@ -36,7 +36,7 @@ actor_pool::policy actor_pool::round_robin() {
namespace {
void broadcast_dispatch(actor_system&, actor_pool::uplock&,
void broadcast_dispatch(actor_system&, actor_pool::guard_type&,
const actor_pool::actor_vec& vec,
mailbox_element_ptr& ptr, execution_unit* host) {
CAF_ASSERT(!vec.empty());
......@@ -59,12 +59,11 @@ actor_pool::policy actor_pool::random() {
impl(const impl&) : rd_() {
// nop
}
void operator()(actor_system&, uplock& guard, const actor_vec& vec,
void operator()(actor_system&, guard_type& guard, const actor_vec& vec,
mailbox_element_ptr& ptr, execution_unit* host) {
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
auto selected =
vec[dis_(rd_, decltype(dis_)::param_type(0, vec.size() - 1))];
unique_guard.unlock();
auto selected
= vec[dis_(rd_, decltype(dis_)::param_type(0, vec.size() - 1))];
guard.unlock();
selected->enqueue(std::move(ptr), host);
}
std::random_device rd_;
......@@ -95,14 +94,15 @@ actor actor_pool::make(execution_unit* eu, size_t num_workers,
auto res_addr = ptr->address();
for (size_t i = 0; i < num_workers; ++i) {
auto worker = fac();
worker->attach(default_attachable::make_monitor(worker.address(), res_addr));
worker->attach(
default_attachable::make_monitor(worker.address(), res_addr));
ptr->workers_.push_back(std::move(worker));
}
return res;
}
bool actor_pool::enqueue(mailbox_element_ptr what, execution_unit* eu) {
upgrade_lock<detail::shared_spinlock> guard{workers_mtx_};
guard_type guard{workers_mtx_};
if (filter(guard, what->sender, what->mid, what->payload, eu))
return false;
policy_(home_system(), guard, workers_, what, eu);
......@@ -110,8 +110,7 @@ bool actor_pool::enqueue(mailbox_element_ptr what, execution_unit* eu) {
}
actor_pool::actor_pool(actor_config& cfg)
: monitorable_actor(cfg),
planned_reason_(exit_reason::normal) {
: monitorable_actor(cfg), planned_reason_(exit_reason::normal) {
register_at_system();
}
......@@ -130,9 +129,8 @@ void actor_pool::on_cleanup(const error& reason) {
CAF_LOG_TERMINATE_EVENT(this, reason);
}
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
const strong_actor_ptr& sender, message_id mid,
message& content, execution_unit* eu) {
bool actor_pool::filter(guard_type& guard, const strong_actor_ptr& sender,
message_id mid, message& content, execution_unit* eu) {
CAF_LOG_TRACE(CAF_ARG(mid) << CAF_ARG(content));
if (auto view = make_const_typed_message_view<exit_msg>(content)) {
// acquire second mutex as well
......@@ -141,9 +139,8 @@ bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
if (cleanup(std::move(reason), eu)) {
// send exit messages *always* to all workers and clear vector afterwards
// but first swap workers_ out of the critical section
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
workers_.swap(workers);
unique_guard.unlock();
guard.unlock();
for (auto& w : workers)
anon_send(w, content);
unregister_from_system();
......@@ -153,7 +150,6 @@ bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
if (auto view = make_const_typed_message_view<down_msg>(content)) {
// remove failed worker from pool
const auto& dm = get<0>(view);
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
auto last = workers_.end();
auto i = std::find(workers_.begin(), workers_.end(), dm.source);
CAF_LOG_DEBUG_IF(i == last, "received down message for an unknown worker");
......@@ -161,7 +157,7 @@ bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
workers_.erase(i);
if (workers_.empty()) {
planned_reason_ = exit_reason::out_of_workers;
unique_guard.unlock();
guard.unlock();
quit(eu);
}
return true;
......@@ -169,15 +165,13 @@ bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
if (auto view
= make_const_typed_message_view<sys_atom, put_atom, actor>(content)) {
const auto& worker = get<2>(view);
worker->attach(default_attachable::make_monitor(worker.address(),
address()));
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
worker->attach(
default_attachable::make_monitor(worker.address(), address()));
workers_.push_back(worker);
return true;
}
if (auto view
= make_const_typed_message_view<sys_atom, delete_atom, actor>(content)) {
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
auto& what = get<2>(view);
auto last = workers_.end();
auto i = std::find(workers_.begin(), last, what);
......@@ -190,7 +184,6 @@ bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
return true;
}
if (content.match_elements<sys_atom, delete_atom>()) {
upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
for (auto& worker : workers_) {
default_attachable::observe_token tk{address(),
default_attachable::monitor};
......@@ -202,8 +195,8 @@ bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
if (content.match_elements<sys_atom, get_atom>()) {
auto cpy = workers_;
guard.unlock();
sender->enqueue(nullptr, mid.response_id(),
make_message(std::move(cpy)), eu);
sender->enqueue(nullptr, mid.response_id(), make_message(std::move(cpy)),
eu);
return true;
}
if (workers_.empty()) {
......
......@@ -12,10 +12,8 @@
#include "caf/actor_system.hpp"
#include "caf/attachable.hpp"
#include "caf/detail/shared_spinlock.hpp"
#include "caf/event_based_actor.hpp"
#include "caf/exit_reason.hpp"
#include "caf/locks.hpp"
#include "caf/logger.hpp"
#include "caf/scoped_actor.hpp"
#include "caf/sec.hpp"
......@@ -25,8 +23,8 @@ namespace caf {
namespace {
using exclusive_guard = unique_lock<detail::shared_spinlock>;
using shared_guard = shared_lock<detail::shared_spinlock>;
using exclusive_guard = std::unique_lock<std::shared_mutex>;
using shared_guard = std::shared_lock<std::shared_mutex>;
} // namespace
......
// This file is part of CAF, the C++ Actor Framework. See the file LICENSE in
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#include "caf/config.hpp"
#include <limits>
#include <thread>
#include "caf/detail/shared_spinlock.hpp"
#include "caf/detail/cas_weak.hpp"
namespace {
inline long min_long() {
return std::numeric_limits<long>::min();
}
} // namespace
namespace caf::detail {
shared_spinlock::shared_spinlock() : flag_(0) {
// nop
}
void shared_spinlock::lock() {
long v = flag_.load();
for (;;) {
if (v != 0) {
v = flag_.load();
} else if (cas_weak(&flag_, &v, min_long())) {
return;
}
// else: next iteration
}
}
void shared_spinlock::lock_upgrade() {
lock_shared();
}
void shared_spinlock::unlock_upgrade() {
unlock_shared();
}
void shared_spinlock::unlock_upgrade_and_lock() {
unlock_shared();
lock();
}
void shared_spinlock::unlock_and_lock_upgrade() {
unlock();
lock_upgrade();
}
void shared_spinlock::unlock() {
flag_.store(0);
}
bool shared_spinlock::try_lock() {
long v = flag_.load();
return (v == 0) ? cas_weak(&flag_, &v, min_long()) : false;
}
void shared_spinlock::lock_shared() {
long v = flag_.load();
for (;;) {
if (v < 0) {
// std::this_thread::yield();
v = flag_.load();
} else if (cas_weak(&flag_, &v, v + 1)) {
return;
}
// else: next iteration
}
}
void shared_spinlock::unlock_shared() {
flag_.fetch_sub(1);
}
bool shared_spinlock::try_lock_shared() {
long v = flag_.load();
return (v >= 0) ? cas_weak(&flag_, &v, v + 1) : false;
}
} // namespace caf::detail
......@@ -2,15 +2,14 @@
// the main distribution directory for license terms and copyright or visit
// https://github.com/actor-framework/actor-framework/blob/master/LICENSE.
#include <utility>
#include "caf/forwarding_actor_proxy.hpp"
#include "caf/locks.hpp"
#include "caf/logger.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/send.hpp"
#include <utility>
namespace caf {
forwarding_actor_proxy::forwarding_actor_proxy(actor_config& cfg, actor dest)
......@@ -30,7 +29,7 @@ bool forwarding_actor_proxy::forward_msg(strong_actor_ptr sender,
if (msg.match_elements<exit_msg>())
unlink_from(msg.get_as<exit_msg>(0).source);
forwarding_stack tmp;
shared_lock<detail::shared_spinlock> guard(broker_mtx_);
std::shared_lock guard{broker_mtx_};
if (broker_)
return broker_->enqueue(nullptr, make_message_id(),
make_message(forward_atom_v, std::move(sender),
......@@ -71,7 +70,7 @@ bool forwarding_actor_proxy::remove_backlink(abstract_actor* x) {
void forwarding_actor_proxy::kill_proxy(execution_unit* ctx, error rsn) {
actor tmp;
{ // lifetime scope of guard
std::unique_lock<detail::shared_spinlock> guard(broker_mtx_);
std::unique_lock guard{broker_mtx_};
broker_.swap(tmp); // manually break cycle
}
cleanup(std::move(rsn), ctx);
......
......@@ -8,7 +8,6 @@
#include "caf/detail/local_group_module.hpp"
#include "caf/event_based_actor.hpp"
#include "caf/group.hpp"
#include "caf/locks.hpp"
#include "caf/message.hpp"
#include "caf/sec.hpp"
#include "caf/serializer.hpp"
......
......@@ -25,7 +25,6 @@
#include "caf/detail/set_thread_name.hpp"
#include "caf/intrusive/task_result.hpp"
#include "caf/local_actor.hpp"
#include "caf/locks.hpp"
#include "caf/message.hpp"
#include "caf/string_algorithms.hpp"
#include "caf/term.hpp"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment