Commit 898b0be2 authored by Dominik Charousset's avatar Dominik Charousset Committed by Dominik Charousset

Flesh out DRR policies and the stream manager

parent 95af3963
No preview for this file type
...@@ -40,6 +40,7 @@ set (LIBCAF_CORE_SRCS ...@@ -40,6 +40,7 @@ set (LIBCAF_CORE_SRCS
src/decorated_tuple.cpp src/decorated_tuple.cpp
src/default_attachable.cpp src/default_attachable.cpp
src/deserializer.cpp src/deserializer.cpp
src/downstream_messages.cpp
src/duration.cpp src/duration.cpp
src/dynamic_message_data.cpp src/dynamic_message_data.cpp
src/error.cpp src/error.cpp
......
...@@ -32,16 +32,28 @@ ...@@ -32,16 +32,28 @@
#include "caf/is_timeout_or_catch_all.hpp" #include "caf/is_timeout_or_catch_all.hpp"
#include "caf/local_actor.hpp" #include "caf/local_actor.hpp"
#include "caf/mailbox_element.hpp" #include "caf/mailbox_element.hpp"
#include "caf/mailbox_policy.hpp"
#include "caf/none.hpp" #include "caf/none.hpp"
#include "caf/send.hpp" #include "caf/send.hpp"
#include "caf/typed_actor.hpp" #include "caf/typed_actor.hpp"
#include "caf/policy/arg.hpp"
#include "caf/policy/priority_aware.hpp"
#include "caf/policy/downstream_messages.hpp"
#include "caf/policy/normal_messages.hpp"
#include "caf/policy/upstream_messages.hpp"
#include "caf/policy/urgent_messages.hpp"
#include "caf/detail/apply_args.hpp" #include "caf/detail/apply_args.hpp"
#include "caf/detail/blocking_behavior.hpp" #include "caf/detail/blocking_behavior.hpp"
#include "caf/detail/type_list.hpp" #include "caf/detail/type_list.hpp"
#include "caf/detail/type_traits.hpp" #include "caf/detail/type_traits.hpp"
#include "caf/intrusive/drr_cached_queue.hpp"
#include "caf/intrusive/drr_queue.hpp"
#include "caf/intrusive/fifo_inbox.hpp"
#include "caf/intrusive/wdrr_dynamic_multiplexed_queue.hpp"
#include "caf/intrusive/wdrr_fixed_multiplexed_queue.hpp"
#include "caf/mixin/requester.hpp" #include "caf/mixin/requester.hpp"
#include "caf/mixin/sender.hpp" #include "caf/mixin/sender.hpp"
#include "caf/mixin/subscriber.hpp" #include "caf/mixin/subscriber.hpp"
...@@ -67,11 +79,37 @@ class blocking_actor ...@@ -67,11 +79,37 @@ class blocking_actor
mixin::subscriber>, mixin::subscriber>,
public dynamically_typed_actor_base { public dynamically_typed_actor_base {
public: public:
// -- member types ----------------------------------------------------------- // -- nested and member types ------------------------------------------------
/// Base type. /// Base type.
using super = extended_base; using super = extended_base;
/// Stores asynchronous messages with default priority.
using default_queue = intrusive::drr_cached_queue<policy::normal_messages>;
/// Stores asynchronous messages with hifh priority.
using urgent_queue = intrusive::drr_cached_queue<policy::urgent_messages>;
/// Configures the FIFO inbox with two nested queues:
///
/// 1. Default asynchronous messages
/// 2. High-priority asynchronous messages
struct mailbox_policy {
using deficit_type = size_t;
using mapped_type = mailbox_element;
using unique_pointer = mailbox_element_ptr;
using queue_type =
intrusive::wdrr_fixed_multiplexed_queue<policy::priority_aware,
default_queue, urgent_queue>;
static constexpr size_t default_queue_index = 0;
static constexpr size_t urgent_queue_index = 1;
};
/// A queue optimized for single-reader-many-writers. /// A queue optimized for single-reader-many-writers.
using mailbox_type = intrusive::fifo_inbox<mailbox_policy>; using mailbox_type = intrusive::fifo_inbox<mailbox_policy>;
...@@ -188,12 +226,6 @@ public: ...@@ -188,12 +226,6 @@ public:
message_id mid; message_id mid;
detail::blocking_behavior& bhvr; detail::blocking_behavior& bhvr;
/// Skips all streaming-related messages.
inline intrusive::task_result
operator()(size_t, mailbox_policy::stream_queue&, mailbox_element&) {
return intrusive::task_result::skip;
}
// Dispatches messages with high and normal priority to the same handler. // Dispatches messages with high and normal priority to the same handler.
template <class Queue> template <class Queue>
intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) { intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) {
......
...@@ -58,16 +58,14 @@ public: ...@@ -58,16 +58,14 @@ public:
return this->inbound_paths_.empty(); return this->inbound_paths_.empty();
} }
error handle(inbound_path*, downstream_msg::batch& x) override { void handle(inbound_path*, downstream_msg::batch& x) override {
CAF_LOG_TRACE(CAF_ARG(x)); CAF_LOG_TRACE(CAF_ARG(x));
using vec_type = std::vector<input_type>; using vec_type = std::vector<input_type>;
if (x.xs.match_elements<vec_type>()) { if (x.xs.match_elements<vec_type>()) {
auto& xs = x.xs.get_mutable_as<vec_type>(0); auto& xs = x.xs.get_mutable_as<vec_type>(0);
driver_.process(std::move(xs)); driver_.process(std::move(xs));
return none;
} }
CAF_LOG_ERROR("received unexpected batch type"); CAF_LOG_ERROR("received unexpected batch type (dropped)");
return sec::unexpected_message;
} }
protected: protected:
......
...@@ -70,17 +70,15 @@ public: ...@@ -70,17 +70,15 @@ public:
&& out_.clean(); && out_.clean();
} }
error handle(inbound_path*, downstream_msg::batch& x) override { void handle(inbound_path*, downstream_msg::batch& x) override {
CAF_LOG_TRACE(CAF_ARG(x)); CAF_LOG_TRACE(CAF_ARG(x));
using vec_type = std::vector<output_type>; using vec_type = std::vector<output_type>;
if (x.xs.match_elements<vec_type>()) { if (x.xs.match_elements<vec_type>()) {
auto& xs = x.xs.get_mutable_as<vec_type>(0); auto& xs = x.xs.get_mutable_as<vec_type>(0);
downstream<output_type> ds{out_.buf()}; downstream<output_type> ds{out_.buf()};
driver_.process(std::move(xs), ds); driver_.process(std::move(xs), ds);
return none;
} }
CAF_LOG_ERROR("received unexpected batch type"); CAF_LOG_ERROR("received unexpected batch type (dropped)");
return sec::unexpected_message;
} }
message make_handshake() const override { message make_handshake() const override {
......
...@@ -134,7 +134,7 @@ public: ...@@ -134,7 +134,7 @@ public:
void emit_regular_shutdown(local_actor* self); void emit_regular_shutdown(local_actor* self);
/// Sends a `stream_msg::forced_close` on this path. /// Sends a `stream_msg::forced_close` on this path.
void emit_regular_shutdown(local_actor* self, error reason); void emit_irregular_shutdown(local_actor* self, error reason);
/// Sends a `stream_msg::forced_close` on this path. /// Sends a `stream_msg::forced_close` on this path.
static void emit_irregular_shutdown(local_actor* self, stream_slots slots, static void emit_irregular_shutdown(local_actor* self, stream_slots slots,
......
...@@ -40,8 +40,6 @@ public: ...@@ -40,8 +40,6 @@ public:
// -- member types ---------------------------------------------------------- // -- member types ----------------------------------------------------------
using policy_type = Policy; using policy_type = Policy;
using deleter_type = typename policy_type::deleter_type;
using value_type = typename policy_type::mapped_type; using value_type = typename policy_type::mapped_type;
using node_type = typename value_type::node_type; using node_type = typename value_type::node_type;
...@@ -221,7 +219,7 @@ public: ...@@ -221,7 +219,7 @@ public:
// Fix deficit counter since we didn't actually use it. // Fix deficit counter since we didn't actually use it.
deficit_ += ts; deficit_ += ts;
} else { } else {
deleter_type d; typename unique_pointer::deleter_type d;
d(ptr); d(ptr);
++consumed; ++consumed;
if (!cache_.empty()) if (!cache_.empty())
......
...@@ -49,15 +49,15 @@ public: ...@@ -49,15 +49,15 @@ public:
using queue_type = typename policy_type::queue_type; using queue_type = typename policy_type::queue_type;
using value_type = typename policy_type::mapped_type; using deficit_type = typename policy_type::deficit_type;
using deleter_type = typename policy_type::deleter_type; using value_type = typename policy_type::mapped_type;
using lifo_inbox_type = lifo_inbox<policy_type>; using lifo_inbox_type = lifo_inbox<policy_type>;
using pointer = value_type*; using pointer = value_type*;
using unique_pointer = typename policy_type::unique_pointer; using unique_pointer = typename queue_type::unique_pointer;
using node_pointer = typename value_type::node_pointer; using node_pointer = typename value_type::node_pointer;
...@@ -152,7 +152,7 @@ public: ...@@ -152,7 +152,7 @@ public:
/// Run a new round with `quantum`, dispatching all tasks to `consumer`. /// Run a new round with `quantum`, dispatching all tasks to `consumer`.
template <class F> template <class F>
bool new_round(typename policy_type::deficit_type quantum, F& consumer) { bool new_round(deficit_type quantum, F& consumer) {
fetch_more(); fetch_more();
return queue_.new_round(quantum, consumer); return queue_.new_round(quantum, consumer);
} }
......
...@@ -46,10 +46,10 @@ public: ...@@ -46,10 +46,10 @@ public:
using node_pointer = node_type*; using node_pointer = node_type*;
using deleter_type = typename policy_type::deleter_type;
using unique_pointer = typename policy_type::unique_pointer; using unique_pointer = typename policy_type::unique_pointer;
using deleter_type = typename unique_pointer::deleter_type;
/// Tries to enqueue a new element to the inbox. /// Tries to enqueue a new element to the inbox.
/// @threadsafe /// @threadsafe
inbox_result push_front(pointer new_element) noexcept { inbox_result push_front(pointer new_element) noexcept {
......
...@@ -53,8 +53,6 @@ public: ...@@ -53,8 +53,6 @@ public:
using const_reference = const value_type&; using const_reference = const value_type&;
using deleter_type = typename policy_type::deleter_type;
using unique_pointer = typename policy_type::unique_pointer; using unique_pointer = typename policy_type::unique_pointer;
using task_size_type = typename policy_type::task_size_type; using task_size_type = typename policy_type::task_size_type;
...@@ -312,7 +310,7 @@ public: ...@@ -312,7 +310,7 @@ public:
for (auto i = head_.next; i != &tail_;) { for (auto i = head_.next; i != &tail_;) {
auto ptr = i; auto ptr = i;
i = i->next; i = i->next;
deleter_type d; typename unique_pointer::deleter_type d;
d(promote(ptr)); d(promote(ptr));
} }
} }
......
...@@ -33,8 +33,6 @@ class wdrr_dynamic_multiplexed_queue { ...@@ -33,8 +33,6 @@ class wdrr_dynamic_multiplexed_queue {
public: public:
using policy_type = Policy; using policy_type = Policy;
using deleter_type = typename policy_type::deleter_type;
using deficit_type = typename policy_type::deficit_type; using deficit_type = typename policy_type::deficit_type;
using mapped_type = typename policy_type::mapped_type; using mapped_type = typename policy_type::mapped_type;
...@@ -70,7 +68,7 @@ public: ...@@ -70,7 +68,7 @@ public:
i->second.push_back(ptr); i->second.push_back(ptr);
return true; return true;
} else { } else {
deleter_type d; typename unique_pointer::deleter_type d;
d(ptr); d(ptr);
return false; return false;
} }
...@@ -172,7 +170,7 @@ public: ...@@ -172,7 +170,7 @@ public:
if (i != qs_.end()) { if (i != qs_.end()) {
i->second.lifo_append(ptr); i->second.lifo_append(ptr);
} else { } else {
deleter_type d; typename unique_pointer::deleter_type d;
d(ptr); d(ptr);
} }
} }
......
...@@ -39,9 +39,9 @@ public: ...@@ -39,9 +39,9 @@ public:
using deficit_type = typename policy_type::deficit_type; using deficit_type = typename policy_type::deficit_type;
using mapped_type = typename policy_type::mapped_type; using value_type = typename policy_type::mapped_type;
using pointer = mapped_type*; using pointer = value_type*;
using unique_pointer = typename policy_type::unique_pointer; using unique_pointer = typename policy_type::unique_pointer;
...@@ -68,7 +68,7 @@ public: ...@@ -68,7 +68,7 @@ public:
return policy_; return policy_;
} }
bool push_back(mapped_type* ptr) noexcept { bool push_back(value_type* ptr) noexcept {
return push_back_recursion<0>(policy_.id_of(*ptr), ptr); return push_back_recursion<0>(policy_.id_of(*ptr), ptr);
} }
...@@ -78,7 +78,7 @@ public: ...@@ -78,7 +78,7 @@ public:
template <class... Ts> template <class... Ts>
bool emplace_back(Ts&&... xs) { bool emplace_back(Ts&&... xs) {
return push_back(new mapped_type(std::forward<Ts>(xs)...)); return push_back(new value_type(std::forward<Ts>(xs)...));
} }
/// Run a new round with `quantum`, dispatching all tasks to `consumer`. /// Run a new round with `quantum`, dispatching all tasks to `consumer`.
...@@ -138,13 +138,13 @@ private: ...@@ -138,13 +138,13 @@ private:
template <size_t I> template <size_t I>
detail::enable_if_t<I == num_queues, bool> detail::enable_if_t<I == num_queues, bool>
push_back_recursion(size_t, mapped_type*) noexcept { push_back_recursion(size_t, value_type*) noexcept {
return false; return false;
} }
template <size_t I> template <size_t I>
detail::enable_if_t<I != num_queues, bool> detail::enable_if_t<I != num_queues, bool>
push_back_recursion(size_t pos, mapped_type* ptr) noexcept { push_back_recursion(size_t pos, value_type* ptr) noexcept {
if (pos == I) { if (pos == I) {
auto& q = std::get<I>(qs_); auto& q = std::get<I>(qs_);
return q.push_back(ptr); return q.push_back(ptr);
......
...@@ -400,6 +400,27 @@ public: ...@@ -400,6 +400,27 @@ public:
message_id new_request_id(message_priority mp); message_id new_request_id(message_priority mp);
/// Creates a new path for incoming stream traffic from `sender`.
virtual inbound_path* make_inbound_path(stream_manager_ptr mgr,
stream_slots slots,
strong_actor_ptr sender);
/// Silently closes incoming stream traffic on `slot`.
virtual void erase_inbound_path_later(stream_slot slot);
/// Closes incoming stream traffic on `slot`. Emits a drop message if `reason
/// == none` and a `forced_drop` message otherwise.
virtual void erase_inbound_path_later(stream_slot slot, error reason);
/// Silently closes all inbound paths for `mgr`.
virtual void erase_inbound_paths_later(const stream_manager* mgr);
/// Closes all incoming stream traffic for a manager. Emits a drop message on
/// each path if `reason == none` and a `forced_drop` message on each path
/// otherwise.
virtual void erase_inbound_paths_later(const stream_manager* mgr,
error reason);
protected: protected:
// -- member variables ------------------------------------------------------- // -- member variables -------------------------------------------------------
......
...@@ -17,85 +17,67 @@ ...@@ -17,85 +17,67 @@
* http://www.boost.org/LICENSE_1_0.txt. * * http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/ ******************************************************************************/
#ifndef CAF_MAILBOX_POLICY_HPP #ifndef CAF_POLICY_CATEGORIZED_HPP
#define CAF_MAILBOX_POLICY_HPP #define CAF_POLICY_CATEGORIZED_HPP
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp" #include "caf/mailbox_element.hpp"
#include "caf/message_priority.hpp"
#include "caf/unit.hpp"
#include "caf/intrusive/drr_queue.hpp" #include "caf/policy/downstream_messages.hpp"
#include "caf/intrusive/drr_cached_queue.hpp" #include "caf/policy/normal_messages.hpp"
#include "caf/intrusive/wdrr_fixed_multiplexed_queue.hpp" #include "caf/policy/upstream_messages.hpp"
#include "caf/policy/urgent_messages.hpp"
namespace caf { namespace caf {
namespace policy {
/// Configures a mailbox queue containing four nested queues. /// Configures a cached WDRR fixed multiplexed queue for dispatching to four
class mailbox_policy { /// nested queue (one for each message category type).
class categorized {
public: public:
// -- nested types -----------------------------------------------------------
class default_queue;
// -- member types ----------------------------------------------------------- // -- member types -----------------------------------------------------------
using mapped_type = mailbox_element; using mapped_type = mailbox_element;
using key_type = size_t; using task_size_type = size_t;
using task_size_type = long;
using deficit_type = long; using deficit_type = size_t;
using deleter_type = detail::disposer;
using unique_pointer = mailbox_element_ptr; using unique_pointer = mailbox_element_ptr;
using stream_queue = intrusive::drr_queue<mailbox_policy>; // -- constructors, destructors, and assignment operators --------------------
using high_priority_queue = intrusive::drr_cached_queue<mailbox_policy>;
using queue_type = categorized() = default;
intrusive::wdrr_fixed_multiplexed_queue<mailbox_policy, default_queue,
stream_queue,
stream_queue,
high_priority_queue>;
static constexpr size_t default_queue_index = 0; categorized(const categorized&) = default;
static constexpr size_t high_priority_queue_index = 3; categorized& operator=(const categorized&) = default;
static inline key_type id_of(const mapped_type& x) noexcept { constexpr categorized(unit_t) {
return static_cast<key_type>(x.mid.category()); // nop
} }
static inline task_size_type task_size(const mapped_type&) noexcept { // -- interface required by wdrr_fixed_multiplexed_queue ---------------------
return 1;
}
static inline deficit_type quantum(const default_queue&, template <template <class> class Queue>
deficit_type x) noexcept { static deficit_type quantum(const Queue<urgent_messages>&,
return x; deficit_type x) noexcept {
return x * static_cast<deficit_type>(message_priority::high);
} }
static inline deficit_type quantum(const stream_queue&, template <class Queue>
deficit_type x) noexcept { static deficit_type quantum(const Queue&, deficit_type x) noexcept {
return x; return x;
} }
/// Handle 5 high priority messages for each default messages. static inline size_t id_of(const mailbox_element& x) noexcept {
static inline deficit_type quantum(const high_priority_queue&, return x.mid.category();
deficit_type x) noexcept {
return x * 5;
} }
}; };
class mailbox_policy::default_queue } // namespace policy
: public intrusive::drr_cached_queue<mailbox_policy> {
public:
using super = drr_cached_queue<mailbox_policy>;
using super::super;
};
} // namespace caf } // namespace caf
#endif // CAF_MAILBOX_POLICY_HPP #endif // CAF_POLICY_CATEGORIZED_HPP
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2017 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_POLICY_DONWSTREAM_MESSAGES_HPP
#define CAF_POLICY_DONWSTREAM_MESSAGES_HPP
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/stream_slot.hpp"
#include "caf/unit.hpp"
#include "caf/intrusive/drr_queue.hpp"
namespace caf {
namespace policy {
/// Configures a dynamic WDRR queue for holding downstream messages.
class downstream_messages {
public:
// -- nested types -----------------------------------------------------------
/// Configures a nested DRR queue.
class nested {
public:
// -- member types ---------------------------------------------------------
using mapped_type = mailbox_element;
using task_size_type = size_t;
using deficit_type = size_t;
using unique_pointer = mailbox_element_ptr;
static task_size_type task_size(const mailbox_element& x) noexcept;
// -- constructors, destructors, and assignment operators ------------------
template <class T>
nested(T&& x) : handler(std::forward<T>(x)) {
// nop
}
nested() = default;
nested(nested&&) = default;
nested& operator=(nested&&) = default;
nested(const nested&) = delete;
nested& operator=(const nested&) = delete;
// -- member variables -----------------------------------------------------
std::unique_ptr<inbound_path> handler;
};
// -- member types -----------------------------------------------------------
using mapped_type = mailbox_element;
using task_size_type = size_t;
using deficit_type = size_t;
using unique_pointer = mailbox_element_ptr;
using key_type = stream_slot;
using nested_queue_type = intrusive::drr_queue<nested>;
using queue_map_type = std::map<key_type, nested_queue_type>;
// -- required functions for wdrr_dynamic_multiplexed_queue ------------------
static key_type id_of(mailbox_element& x) noexcept;
static bool enabled(const nested_queue_type& q) noexcept;
static inline deficit_type quantum(const nested_queue_type&,
deficit_type x) noexcept {
return x;
}
// -- constructors, destructors, and assignment operators --------------------
downstream_messages() = default;
downstream_messages(const downstream_messages&) = default;
downstream_messages& operator=(const downstream_messages&) = default;
constexpr downstream_messages(unit_t) {
// nop
}
// -- required functions for drr_queue ---------------------------------------
static inline task_size_type task_size(const mailbox_element&) noexcept {
return 1;
}
};
} // namespace policy
} // namespace caf
#endif // CAF_POLICY_DONWSTREAM_MESSAGES_HPP
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2017 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_POLICY_NORMAL_MESSAGES_HPP
#define CAF_POLICY_NORMAL_MESSAGES_HPP
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/unit.hpp"
namespace caf {
namespace policy {
/// Configures a cached DRR queue for holding asynchronous messages with
/// default priority.
class normal_messages {
public:
// -- member types -----------------------------------------------------------
using mapped_type = mailbox_element;
using task_size_type = size_t;
using deficit_type = size_t;
using unique_pointer = mailbox_element_ptr;
// -- constructors, destructors, and assignment operators --------------------
normal_messages() = default;
normal_messages(const normal_messages&) = default;
normal_messages& operator=(const normal_messages&) = default;
constexpr normal_messages(unit_t) {
// nop
}
// -- interface required by drr_queue ----------------------------------------
static inline task_size_type task_size(const mailbox_element&) noexcept {
return 1;
}
};
} // namespace policy
} // namespace caf
#endif // CAF_POLICY_NORMAL_MESSAGES_HPP
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2017 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_POLICY_PRIORITY_AWARE_HPP
#define CAF_POLICY_PRIORITY_AWARE_HPP
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/message_priority.hpp"
#include "caf/unit.hpp"
#include "caf/policy/downstream_messages.hpp"
#include "caf/policy/normal_messages.hpp"
#include "caf/policy/upstream_messages.hpp"
#include "caf/policy/urgent_messages.hpp"
namespace caf {
namespace policy {
/// Configures a cached WDRR fixed multiplexed queue for dispatching to two
/// nested queue (one for each message priority).
class priority_aware {
public:
// -- member types -----------------------------------------------------------
using mapped_type = mailbox_element;
using task_size_type = size_t;
using deficit_type = size_t;
using unique_pointer = mailbox_element_ptr;
// -- constructors, destructors, and assignment operators --------------------
priority_aware() = default;
priority_aware(const priority_aware&) = default;
priority_aware& operator=(const priority_aware&) = default;
constexpr priority_aware(unit_t) {
// nop
}
// -- interface required by wdrr_fixed_multiplexed_queue ---------------------
template <template <class> class Queue>
static deficit_type quantum(const Queue<urgent_messages>&,
deficit_type x) noexcept {
return x * static_cast<deficit_type>(message_priority::high);
}
template <class Queue>
static deficit_type quantum(const Queue&, deficit_type x) noexcept {
return x;
}
static inline size_t id_of(const mailbox_element& x) noexcept {
return x.mid.category() != message_id::urgent_message_category ? 0u : 1u;
}
};
} // namespace policy
} // namespace caf
#endif // CAF_POLICY_PRIORITY_AWARE_HPP
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2017 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_POLICY_UPSTREAM_MESSAGES_HPP
#define CAF_POLICY_UPSTREAM_MESSAGES_HPP
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/unit.hpp"
namespace caf {
namespace policy {
/// Configures a DRR queue for holding upstream messages.
class upstream_messages {
public:
// -- member types -----------------------------------------------------------
using mapped_type = mailbox_element;
using task_size_type = size_t;
using deficit_type = size_t;
using unique_pointer = mailbox_element_ptr;
// -- constructors, destructors, and assignment operators --------------------
upstream_messages() = default;
upstream_messages(const upstream_messages&) = default;
upstream_messages& operator=(const upstream_messages&) = default;
constexpr upstream_messages(unit_t) {
// nop
}
// -- interface required by drr_queue ----------------------------------------
static inline task_size_type task_size(const mailbox_element&) noexcept {
return 1;
}
};
} // namespace policy
} // namespace caf
#endif // CAF_POLICY_UPSTREAM_MESSAGES_HPP
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2017 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#ifndef CAF_POLICY_URGENT_MESSAGES_HPP
#define CAF_POLICY_URGENT_MESSAGES_HPP
#include "caf/fwd.hpp"
#include "caf/mailbox_element.hpp"
#include "caf/unit.hpp"
namespace caf {
namespace policy {
/// Configures a cached DRR queue for holding asynchronous messages with
/// default priority.
class urgent_messages {
public:
// -- member types -----------------------------------------------------------
using mapped_type = mailbox_element;
using task_size_type = size_t;
using deficit_type = size_t;
using unique_pointer = mailbox_element_ptr;
// -- constructors, destructors, and assignment operators --------------------
urgent_messages() = default;
urgent_messages(const urgent_messages&) = default;
urgent_messages& operator=(const urgent_messages&) = default;
constexpr urgent_messages(unit_t) {
// nop
}
// -- interface required by drr_queue ----------------------------------------
static inline task_size_type task_size(const mailbox_element&) noexcept {
return 1;
}
};
} // namespace policy
} // namespace caf
#endif // CAF_POLICY_URGENT_MESSAGES_HPP
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include "caf/invoke_message_result.hpp" #include "caf/invoke_message_result.hpp"
#include "caf/local_actor.hpp" #include "caf/local_actor.hpp"
#include "caf/logger.hpp" #include "caf/logger.hpp"
#include "caf/mailbox_policy.hpp"
#include "caf/no_stages.hpp" #include "caf/no_stages.hpp"
#include "caf/response_handle.hpp" #include "caf/response_handle.hpp"
#include "caf/scheduled_actor.hpp" #include "caf/scheduled_actor.hpp"
...@@ -50,6 +49,11 @@ ...@@ -50,6 +49,11 @@
#include "caf/to_string.hpp" #include "caf/to_string.hpp"
#include "caf/policy/arg.hpp" #include "caf/policy/arg.hpp"
#include "caf/policy/categorized.hpp"
#include "caf/policy/downstream_messages.hpp"
#include "caf/policy/normal_messages.hpp"
#include "caf/policy/upstream_messages.hpp"
#include "caf/policy/urgent_messages.hpp"
#include "caf/detail/behavior_stack.hpp" #include "caf/detail/behavior_stack.hpp"
#include "caf/detail/stream_sink_driver_impl.hpp" #include "caf/detail/stream_sink_driver_impl.hpp"
...@@ -59,7 +63,11 @@ ...@@ -59,7 +63,11 @@
#include "caf/detail/stream_stage_driver_impl.hpp" #include "caf/detail/stream_stage_driver_impl.hpp"
#include "caf/detail/unordered_flat_map.hpp" #include "caf/detail/unordered_flat_map.hpp"
#include "caf/intrusive/drr_cached_queue.hpp"
#include "caf/intrusive/drr_queue.hpp"
#include "caf/intrusive/fifo_inbox.hpp" #include "caf/intrusive/fifo_inbox.hpp"
#include "caf/intrusive/wdrr_dynamic_multiplexed_queue.hpp"
#include "caf/intrusive/wdrr_fixed_multiplexed_queue.hpp"
#include "caf/mixin/behavior_changer.hpp" #include "caf/mixin/behavior_changer.hpp"
#include "caf/mixin/requester.hpp" #include "caf/mixin/requester.hpp"
...@@ -92,11 +100,75 @@ result<message> drop(scheduled_actor*, message_view&); ...@@ -92,11 +100,75 @@ result<message> drop(scheduled_actor*, message_view&);
/// @extends local_actor /// @extends local_actor
class scheduled_actor : public local_actor, public resumable { class scheduled_actor : public local_actor, public resumable {
public: public:
// -- member types ----------------------------------------------------------- // -- nested enums -----------------------------------------------------------
/// Categorizes incoming messages.
enum class message_category {
/// Denotes an expired and thus obsolete timeout.
expired_timeout,
/// Triggers the currently active timeout.
timeout,
/// Triggers the current behavior.
ordinary,
/// Triggers handlers for system messages such as `exit_msg` or `down_msg`.
internal
};
// Base type. /// Result of one-shot activations.
enum class activation_result {
/// Actor is still alive and handled the activation message.
success,
/// Actor handled the activation message and terminated.
terminated,
/// Actor skipped the activation message.
skipped,
/// Actor dropped the activation message.
dropped
};
// -- nested and member types ------------------------------------------------
/// Base type.
using super = local_actor; using super = local_actor;
/// Stores asynchronous messages with default priority.
using default_queue = intrusive::drr_cached_queue<policy::normal_messages>;
/// Stores asynchronous messages with hifh priority.
using urgent_queue = intrusive::drr_cached_queue<policy::urgent_messages>;
/// Stores upstream messages.
using upstream_queue = intrusive::drr_queue<policy::upstream_messages>;
/// Stores downstream messages.
using downstream_queue = intrusive::drr_queue<policy::downstream_messages>;
/// Configures the FIFO inbox with four nested queues:
///
/// 1. Default asynchronous messages
/// 2. High-priority asynchronous messages
/// 3. Upstream messages
/// 4. Downstream messages
///
/// The queue for downstream messages is in turn composed of a nested queues,
/// one for each active input slot.
struct mailbox_policy {
using deficit_type = size_t;
using mapped_type = mailbox_element;
using unique_pointer = mailbox_element_ptr;
using queue_type =
intrusive::wdrr_fixed_multiplexed_queue<policy::categorized,
default_queue, upstream_queue,
downstream_queue, urgent_queue>;
static constexpr size_t default_queue_index = 0;
static constexpr size_t urgent_queue_index = 3;
};
/// A queue optimized for single-reader-many-writers. /// A queue optimized for single-reader-many-writers.
using mailbox_type = intrusive::fifo_inbox<mailbox_policy>; using mailbox_type = intrusive::fifo_inbox<mailbox_policy>;
...@@ -130,55 +202,29 @@ public: ...@@ -130,55 +202,29 @@ public:
using exception_handler = std::function<error (pointer, std::exception_ptr&)>; using exception_handler = std::function<error (pointer, std::exception_ptr&)>;
# endif // CAF_NO_EXCEPTIONS # endif // CAF_NO_EXCEPTIONS
// -- nested enums ----------------------------------------------------------- /// Consumes messages from the mailbox.
/// @cond PRIVATE
/// Categorizes incoming messages.
enum class message_category {
/// Denotes an expired and thus obsolete timeout.
expired_timeout,
/// Triggers the currently active timeout.
timeout,
/// Triggers the current behavior.
ordinary,
/// Triggers handlers for system messages such as `exit_msg` or `down_msg`.
internal
};
/// Result of one-shot activations.
enum class activation_result {
/// Actor is still alive and handled the activation message.
success,
/// Actor handled the activation message and terminated.
terminated,
/// Actor skipped the activation message.
skipped,
/// Actor dropped the activation message.
dropped
};
/// @endcond
// -- nested classes ---------------------------------------------------------
struct mailbox_visitor { struct mailbox_visitor {
scheduled_actor* self; scheduled_actor* self;
resume_result& result; resume_result& result;
size_t& handled_msgs; size_t& handled_msgs;
size_t max_throughput; size_t max_throughput;
/// Skips all streaming-related messages. /// Consumes upstream messages.
intrusive::task_result operator()(size_t, mailbox_policy::stream_queue&, intrusive::task_result operator()(size_t, upstream_queue&,
mailbox_element&);
/// Consumes downstream messages.
intrusive::task_result operator()(size_t, downstream_queue&,
mailbox_element&); mailbox_element&);
// Dispatches messages with high and normal priority to the same handler. // Dispatches asynchronous messages with high and normal priority to the
// same handler.
template <class Queue> template <class Queue>
intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) { intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) {
return (*this)(x); return (*this)(x);
} }
// Consumes `x`. // Consumes asynchronous messages.
intrusive::task_result operator()(mailbox_element& x); intrusive::task_result operator()(mailbox_element& x);
}; };
...@@ -882,6 +928,20 @@ public: ...@@ -882,6 +928,20 @@ public:
return mailbox_; return mailbox_;
} }
// -- inbound_path management ------------------------------------------------
inbound_path* make_inbound_path(stream_manager_ptr mgr, stream_slots slots,
strong_actor_ptr sender) override;
void erase_inbound_path_later(stream_slot slot) override;
void erase_inbound_path_later(stream_slot slot, error reason) override;
void erase_inbound_paths_later(const stream_manager* mgr) override;
void erase_inbound_paths_later(const stream_manager* mgr,
error reason) override;
protected: protected:
/// @cond PRIVATE /// @cond PRIVATE
......
...@@ -59,19 +59,19 @@ public: ...@@ -59,19 +59,19 @@ public:
response_promise result_cb); response_promise result_cb);
*/ */
virtual error handle(inbound_path* from, downstream_msg::batch& x); virtual void handle(inbound_path* from, downstream_msg::batch& x);
virtual error handle(inbound_path* from, downstream_msg::close& x); virtual void handle(inbound_path* from, downstream_msg::close& x);
virtual error handle(inbound_path* from, downstream_msg::forced_close& x); virtual void handle(inbound_path* from, downstream_msg::forced_close& x);
virtual error handle(stream_slots, upstream_msg::ack_open& x); virtual void handle(stream_slots, upstream_msg::ack_open& x);
virtual error handle(outbound_path* from, upstream_msg::ack_batch& x); virtual void handle(stream_slots slots, upstream_msg::ack_batch& x);
virtual error handle(outbound_path* from, upstream_msg::drop& x); virtual void handle(stream_slots slots, upstream_msg::drop& x);
virtual error handle(outbound_path* from, upstream_msg::forced_drop& x); virtual void handle(stream_slots slots, upstream_msg::forced_drop& x);
/// Closes the stream when the parent terminates with default exit reason or /// Closes the stream when the parent terminates with default exit reason or
/// the stream reached its end. /// the stream reached its end.
......
...@@ -30,12 +30,6 @@ ...@@ -30,12 +30,6 @@
namespace caf { namespace caf {
namespace {
constexpr auto mpol = mailbox_policy{};
} // namespace <anonymous>
blocking_actor::receive_cond::~receive_cond() { blocking_actor::receive_cond::~receive_cond() {
// nop // nop
} }
...@@ -58,7 +52,7 @@ bool blocking_actor::accept_one_cond::post() { ...@@ -58,7 +52,7 @@ bool blocking_actor::accept_one_cond::post() {
blocking_actor::blocking_actor(actor_config& cfg) blocking_actor::blocking_actor(actor_config& cfg)
: super(cfg.add_flag(local_actor::is_blocking_flag)), : super(cfg.add_flag(local_actor::is_blocking_flag)),
mailbox_(mpol, mpol, mpol, mpol, mpol) { mailbox_(unit, unit, unit) {
// nop // nop
} }
...@@ -269,7 +263,7 @@ mailbox_element_ptr blocking_actor::dequeue() { ...@@ -269,7 +263,7 @@ mailbox_element_ptr blocking_actor::dequeue() {
auto& q1 = get<mailbox_policy::default_queue_index>(qs); auto& q1 = get<mailbox_policy::default_queue_index>(qs);
auto ptr = q1.take_front(); auto ptr = q1.take_front();
if (ptr == nullptr) { if (ptr == nullptr) {
auto& q2 = get<mailbox_policy::high_priority_queue_index>(qs); auto& q2 = get<mailbox_policy::urgent_queue_index>(qs);
ptr = q2.take_front(); ptr = q2.take_front();
} }
return ptr; return ptr;
......
/******************************************************************************
* ____ _ _____ *
* / ___| / \ | ___| C++ *
* | | / _ \ | |_ Actor *
* | |___ / ___ \| _| Framework *
* \____/_/ \_|_| *
* *
* Copyright (C) 2011 - 2017 *
* Dominik Charousset <dominik.charousset (at) haw-hamburg.de> *
* *
* Distributed under the terms and conditions of the BSD 3-Clause License or *
* (at your option) under the terms and conditions of the Boost Software *
* License 1.0. See accompanying files LICENSE and LICENSE_ALTERNATIVE. *
* *
* If you did not receive a copy of the license files, see *
* http://opensource.org/licenses/BSD-3-Clause and *
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#include "caf/policy/downstream_messages.hpp"
#include "caf/downstream_msg.hpp"
#include "caf/inbound_path.hpp"
namespace caf {
namespace policy {
namespace {
class task_size_calculator {
public:
using size_type = downstream_messages::nested::task_size_type;
inline size_type operator()(const downstream_msg::batch& x) const noexcept {
CAF_ASSERT(x.xs_size > 0);
return static_cast<size_type>(x.xs_size);
}
template <class T>
size_type operator()(const T&) const noexcept {
return 1;
}
};
} // namespace <anonymous>
auto downstream_messages::nested::task_size(const mailbox_element& x) noexcept
-> task_size_type {
task_size_calculator f;
return visit(f, x.content().get_as<downstream_msg>(0).content);
}
auto downstream_messages::id_of(mailbox_element& x) noexcept -> key_type {
return x.content().get_as<downstream_msg>(0).slots.receiver;
}
bool downstream_messages::enabled(const nested_queue_type& q) noexcept {
return !q.policy().handler->mgr->congested();
}
} // namespace policy
} // namespace caf
...@@ -111,7 +111,7 @@ void inbound_path::emit_regular_shutdown(local_actor* self) { ...@@ -111,7 +111,7 @@ void inbound_path::emit_regular_shutdown(local_actor* self) {
unsafe_send_as(self, hdl, make<upstream_msg::drop>(slots, self->address())); unsafe_send_as(self, hdl, make<upstream_msg::drop>(slots, self->address()));
} }
void inbound_path::emit_regular_shutdown(local_actor* self, error reason) { void inbound_path::emit_irregular_shutdown(local_actor* self, error reason) {
unsafe_send_as(self, hdl, unsafe_send_as(self, hdl,
make<upstream_msg::forced_drop>( make<upstream_msg::forced_drop>(
slots.invert(), self->address(), std::move(reason))); slots.invert(), self->address(), std::move(reason)));
......
...@@ -125,4 +125,25 @@ bool local_actor::cleanup(error&& fail_state, execution_unit* host) { ...@@ -125,4 +125,25 @@ bool local_actor::cleanup(error&& fail_state, execution_unit* host) {
return true; return true;
} }
inbound_path* local_actor::make_inbound_path(stream_manager_ptr, stream_slots,
strong_actor_ptr) {
return nullptr;
}
void local_actor::erase_inbound_path_later(stream_slot) {
// nop
}
void local_actor::erase_inbound_path_later(stream_slot, error) {
// nop
}
void local_actor::erase_inbound_paths_later(const stream_manager*) {
// nop
}
void local_actor::erase_inbound_paths_later(const stream_manager*, error) {
// nop
}
} // namespace caf } // namespace caf
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "caf/actor_ostream.hpp" #include "caf/actor_ostream.hpp"
#include "caf/config.hpp" #include "caf/config.hpp"
#include "caf/inbound_path.hpp"
#include "caf/to_string.hpp" #include "caf/to_string.hpp"
#include "caf/scheduler/abstract_coordinator.hpp" #include "caf/scheduler/abstract_coordinator.hpp"
...@@ -30,12 +31,6 @@ ...@@ -30,12 +31,6 @@
namespace caf { namespace caf {
namespace {
constexpr auto mpol = mailbox_policy{};
} // namespace <anonymous>
// -- related free functions --------------------------------------------------- // -- related free functions ---------------------------------------------------
result<message> reflect(scheduled_actor*, message_view& x) { result<message> reflect(scheduled_actor*, message_view& x) {
...@@ -103,7 +98,7 @@ error scheduled_actor::default_exception_handler(pointer ptr, ...@@ -103,7 +98,7 @@ error scheduled_actor::default_exception_handler(pointer ptr,
scheduled_actor::scheduled_actor(actor_config& cfg) scheduled_actor::scheduled_actor(actor_config& cfg)
: super(cfg), : super(cfg),
mailbox_(mpol, mpol, mpol, mpol, mpol), mailbox_(unit, unit, unit, unit, unit),
timeout_id_(0), timeout_id_(0),
default_handler_(print_and_drop), default_handler_(print_and_drop),
error_handler_(default_error_handler), error_handler_(default_error_handler),
...@@ -234,10 +229,18 @@ void scheduled_actor::intrusive_ptr_release_impl() { ...@@ -234,10 +229,18 @@ void scheduled_actor::intrusive_ptr_release_impl() {
intrusive_ptr_release(ctrl()); intrusive_ptr_release(ctrl());
} }
intrusive::task_result scheduled_actor::mailbox_visitor:: intrusive::task_result
operator()(size_t, mailbox_policy::stream_queue&, mailbox_element&) { scheduled_actor::mailbox_visitor:: operator()(size_t, upstream_queue&,
mailbox_element&) {
// TODO: implement me
return intrusive::task_result::stop;
}
intrusive::task_result
scheduled_actor::mailbox_visitor:: operator()(size_t, downstream_queue&,
mailbox_element&) {
// TODO: implement me // TODO: implement me
return intrusive::task_result::resume; return intrusive::task_result::stop;
} }
intrusive::task_result intrusive::task_result
...@@ -660,15 +663,85 @@ bool scheduled_actor::finalize() { ...@@ -660,15 +663,85 @@ bool scheduled_actor::finalize() {
void scheduled_actor::push_to_cache(mailbox_element_ptr ptr) { void scheduled_actor::push_to_cache(mailbox_element_ptr ptr) {
using namespace intrusive; using namespace intrusive;
auto& p = mailbox_.queue().policy(); auto& p = mailbox_.queue().policy();
auto ts = p.task_size(*ptr);
auto& qs = mailbox_.queue().queues(); auto& qs = mailbox_.queue().queues();
drr_cached_queue<mailbox_policy>* q; // TODO: use generic lambda to avoid code duplication when switching to C++14
if (p.id_of(*ptr) == mailbox_policy::default_queue_index) if (p.id_of(*ptr) == mailbox_policy::default_queue_index) {
q = &std::get<mailbox_policy::default_queue_index>(qs); auto& q = std::get<mailbox_policy::default_queue_index>(qs);
else q.inc_total_task_size(q.policy().task_size(*ptr));
q = &std::get<mailbox_policy::high_priority_queue_index>(qs); q.cache().push_back(ptr.release());
q->inc_total_task_size(ts); } else {
q->cache().push_back(ptr.release()); auto& q = std::get<mailbox_policy::default_queue_index>(qs);
q.inc_total_task_size(q.policy().task_size(*ptr));
q.cache().push_back(ptr.release());
}
}
inbound_path* scheduled_actor::make_inbound_path(stream_manager_ptr mgr,
stream_slots slots,
strong_actor_ptr sender) {
/*
auto res = get<2>(mailbox_.queues()).queues().emplace(slots.receiver, nullptr);
if (!res.second)
return nullptr;
auto path = new inbound_path(std::move(mgr), slots, std::move(sender));
res.first->second.policy().handler.reset(path);
return path;
*/
}
void scheduled_actor::erase_inbound_path_later(stream_slot slot) {
/*
get<2>(mailbox_.queues()).erase_later(slot);
*/
}
void scheduled_actor::erase_inbound_path_later(stream_slot slot, error reason) {
/*
using fn = void (*)(local_actor*, inbound_path&, error&);
fn regular = [](local_actor* self, inbound_path& in, error&) {
in.emit_regular_shutdown(self);
};
fn irregular = [](local_actor* self, inbound_path& in, error& rsn) {
in.emit_irregular_shutdown(self, rsn);
};
auto f = reason == none ? regular : irregular;
for (auto& kvp : get<2>(mbox.queues()).queues()) {
auto& path = kvp.second.policy().handler;
if (path != nullptr && path->mgr == mgr) {
f(this, *path, reason);
erase_inbound_path_later(kvp.first);
}
}
*/
}
void scheduled_actor::erase_inbound_paths_later(const stream_manager* mgr) {
/*
for (auto& kvp : get<2>(mailbox_.queues()).queues())
if (kvp.second.policy().handler->mgr == mgr)
erase_inbound_path_later(kvp.first);
*/
}
void scheduled_actor::erase_inbound_paths_later(const stream_manager* mgr,
error reason) {
/*
using fn = void (*)(local_actor*, inbound_path&, error&);
fn regular = [](local_actor* self, inbound_path& in, error&) {
in.emit_regular_shutdown(self);
};
fn irregular = [](local_actor* self, inbound_path& in, error& rsn) {
in.emit_irregular_shutdown(self, rsn);
};
auto f = reason == none ? regular : irregular;
for (auto& kvp : get<2>(mbox.queues()).queues()) {
auto& path = kvp.second.policy().handler;
if (path != nullptr && path->mgr == mgr) {
f(this, *path, reason);
erase_inbound_path_later(kvp.first);
}
}
*/
} }
stream_slot scheduled_actor::next_slot() { stream_slot scheduled_actor::next_slot() {
...@@ -683,69 +756,4 @@ stream_slot scheduled_actor::next_slot() { ...@@ -683,69 +756,4 @@ stream_slot scheduled_actor::next_slot() {
return result; return result;
} }
/*
bool scheduled_actor::handle_stream_msg(mailbox_element& x,
behavior* active_behavior) {
CAF_LOG_TRACE(CAF_ARG(x));
CAF_ASSERT(x.content().match_elements<stream_msg>());
auto& sm = x.content().get_mutable_as<stream_msg>(0);
if (sm.sender == nullptr) {
CAF_LOG_ERROR("received a stream_msg with invalid sender field");
return false;
}
stream_msg_visitor f{this, sm, active_behavior};
auto result = visit(f, sm.content);
if (streams_.empty() && !has_behavior())
quit(exit_reason::normal);
return result;
}
bool scheduled_actor::add_source(const stream_manager_ptr& mgr,
const stream_id& sid,
strong_actor_ptr source_ptr,
strong_actor_ptr original_stage,
stream_priority prio,
response_promise result_cb) {
CAF_LOG_TRACE(CAF_ARG(mgr) << CAF_ARG(sid) << CAF_ARG(source_ptr)
<< CAF_ARG(original_stage) << CAF_ARG(prio)
<< CAF_ARG(result_cb));
CAF_ASSERT(mgr != nullptr);
if (!source_ptr) {
CAF_LOG_ERROR("cannot add invalid source");
return false;
}
if (!sid.valid()) {
CAF_LOG_ERROR("cannot add source with invalid stream ID");
return false;
}
return mgr->add_source(sid, std::move(source_ptr),
std::move(original_stage), prio,
std::move(result_cb));
}
bool scheduled_actor::add_source(const stream_manager_ptr& mgr,
const stream_id& sid,
response_promise result_cb) {
CAF_LOG_TRACE(CAF_ARG(mgr) << CAF_ARG(sid));
CAF_ASSERT(mgr != nullptr);
CAF_ASSERT(current_mailbox_element() != nullptr);
if (!current_mailbox_element()->content().match_elements<stream_msg>()) {
CAF_LOG_ERROR("scheduled_actor::add_source called outside "
"a stream_msg handler");
return false;
}
auto& sm = current_mailbox_element()->content().get_mutable_as<stream_msg>(0);
if (!holds_alternative<stream_msg::open>(sm.content)) {
CAF_LOG_ERROR("scheduled_actor::add_source called outside "
"a stream_msg::open handler");
return false;
}
auto& opn = get<stream_msg::open>(sm.content);
auto source_ptr = std::move(opn.prev_stage);
return mgr->add_source(sid, std::move(source_ptr),
std::move(opn.original_stage), opn.priority,
std::move(result_cb));
}
*/
} // namespace caf } // namespace caf
...@@ -44,49 +44,53 @@ stream_manager::~stream_manager() { ...@@ -44,49 +44,53 @@ stream_manager::~stream_manager() {
// nop // nop
} }
error stream_manager::handle(inbound_path*, downstream_msg::batch&) { void stream_manager::handle(inbound_path*, downstream_msg::batch&) {
return none; CAF_LOG_WARNING("unimplemented base handler for batches called");
} }
error stream_manager::handle(inbound_path* from, downstream_msg::close&) { void stream_manager::handle(inbound_path*, downstream_msg::close&) {
out().take_path(from->slots); // nop
return none;
} }
error stream_manager::handle(inbound_path*, downstream_msg::forced_close&) { void stream_manager::handle(inbound_path*, downstream_msg::forced_close& x) {
return none; abort(std::move(x.reason));
} }
error stream_manager::handle(stream_slots slots, upstream_msg::ack_open& x) { void stream_manager::handle(stream_slots slots, upstream_msg::ack_open& x) {
auto path = out().add_path(slots.invert(), x.rebind_to); auto path = out().add_path(slots.invert(), x.rebind_to);
path->open_credit = x.initial_demand; path->open_credit = x.initial_demand;
path->desired_batch_size = x.desired_batch_size; path->desired_batch_size = x.desired_batch_size;
--pending_handshakes_; --pending_handshakes_;
generate_messages();
push(); push();
return none;
} }
error stream_manager::handle(outbound_path*, upstream_msg::ack_batch&) { void stream_manager::handle(stream_slots slots, upstream_msg::ack_batch& x) {
return none; auto path = out().path(slots.invert());
if (path != nullptr) {
path->open_credit += x.new_capacity;
path->desired_batch_size = x.desired_batch_size;
path->next_ack_id = x.acknowledged_id + 1;
push();
}
} }
error stream_manager::handle(outbound_path*, upstream_msg::drop&) { void stream_manager::handle(stream_slots slots, upstream_msg::drop&) {
return none; out().take_path(slots.invert());
} }
error stream_manager::handle(outbound_path*, upstream_msg::forced_drop&) { void stream_manager::handle(stream_slots slots, upstream_msg::forced_drop& x) {
return none; if (out().path(slots.invert()) != nullptr)
abort(std::move(x.reason));
} }
void stream_manager::close() { void stream_manager::close() {
out().close(); out().close();
// TODO: abort all input pahts as well self_->erase_inbound_paths_later(this);
} }
void stream_manager::abort(error reason) { void stream_manager::abort(error reason) {
out().abort(std::move(reason)); out().abort(std::move(reason));
// TODO: abort all input pahts as well self_->erase_inbound_paths_later(this, std::move(reason));
} }
void stream_manager::push() { void stream_manager::push() {
......
...@@ -55,6 +55,11 @@ ...@@ -55,6 +55,11 @@
#include "caf/variant.hpp" #include "caf/variant.hpp"
#include "caf/policy/arg.hpp" #include "caf/policy/arg.hpp"
#include "caf/policy/categorized.hpp"
#include "caf/policy/downstream_messages.hpp"
#include "caf/policy/normal_messages.hpp"
#include "caf/policy/upstream_messages.hpp"
#include "caf/policy/urgent_messages.hpp"
#include "caf/mixin/sender.hpp" #include "caf/mixin/sender.hpp"
...@@ -126,103 +131,19 @@ const char* name_of(const actor_addr& x) { ...@@ -126,103 +131,19 @@ const char* name_of(const actor_addr& x) {
return name_of(actor_cast<strong_actor_ptr>(x)); return name_of(actor_cast<strong_actor_ptr>(x));
} }
// -- policies and queues ------------------------------------------------------ // -- queues -------------------------------------------------------------------
struct policy_base { using default_queue = drr_queue<policy::normal_messages>;
using mapped_type = mailbox_element;
using task_size_type = size_t; using dmsg_queue = wdrr_dynamic_multiplexed_queue<policy::downstream_messages>;
using deficit_type = size_t; using umsg_queue = drr_queue<policy::upstream_messages>;
using deleter_type = detail::disposer; using urgent_queue = drr_queue<policy::urgent_messages>;
using unique_pointer = mailbox_element_ptr; using mboxqueue =
}; wdrr_fixed_multiplexed_queue<policy::categorized, default_queue, umsg_queue,
dmsg_queue, urgent_queue>;
struct default_queue_policy : policy_base {
static inline task_size_type task_size(const mailbox_element&) {
return 1;
}
};
using default_queue = drr_queue<default_queue_policy>;
struct umsg_queue_policy : policy_base {
stream_manager* mgr;
umsg_queue_policy(stream_manager* ptr) : mgr(ptr) {
// nop
}
static inline task_size_type task_size(const mailbox_element&) {
return 1;
}
};
using umsg_queue = drr_queue<umsg_queue_policy>;
struct inner_dmsg_queue_policy : policy_base {
using key_type = stream_slot;
task_size_type task_size(const mailbox_element& x) {
return visit(*this, x.content().get_as<downstream_msg>(0).content);
}
task_size_type operator()(const downstream_msg::batch& x) const {
CAF_ASSERT(x.xs_size > 0);
return static_cast<task_size_type>(x.xs_size);
}
template <class T>
task_size_type operator()(const T&) const {
return 1;
}
inner_dmsg_queue_policy(std::unique_ptr<inbound_path> ptr)
: handler(std::move(ptr)) {
// nop
}
std::unique_ptr<inbound_path> handler;
};
using inner_dmsg_queue = drr_queue<inner_dmsg_queue_policy>;
struct dmsg_queue_policy : policy_base {
using key_type = stream_slot;
using queue_map_type = std::map<stream_slot, inner_dmsg_queue>;
key_type id_of(mailbox_element& x) {
return x.content().get_as<downstream_msg>(0).slots.receiver;
}
template <class Queue>
static inline bool enabled(const Queue& q) {
return !q.policy().handler->mgr->congested();
}
template <class Queue>
deficit_type quantum(const Queue&, deficit_type x) {
return x;
}
};
using dmsg_queue = wdrr_dynamic_multiplexed_queue<dmsg_queue_policy>;
struct mboxpolicy : policy_base {
template <class Queue>
deficit_type quantum(const Queue&, deficit_type x) {
return x;
}
size_t id_of(const mailbox_element& x) {
return x.mid.category();
}
};
using mboxqueue = wdrr_fixed_multiplexed_queue<mboxpolicy, default_queue,
umsg_queue, dmsg_queue,
default_queue>;
// -- entity and mailbox visitor ----------------------------------------------- // -- entity and mailbox visitor -----------------------------------------------
...@@ -256,8 +177,7 @@ public: ...@@ -256,8 +177,7 @@ public:
entity(actor_config& cfg, const char* cstr_name, time_point* global_time, entity(actor_config& cfg, const char* cstr_name, time_point* global_time,
duration_type credit_interval, duration_type force_batches_interval) duration_type credit_interval, duration_type force_batches_interval)
: super(cfg), : super(cfg),
mbox(mboxpolicy{}, default_queue_policy{}, nullptr, dmsg_queue_policy{}, mbox(unit, unit, unit, unit, unit),
default_queue_policy{}),
name_(cstr_name), name_(cstr_name),
next_slot_(static_cast<stream_slot>(id())), next_slot_(static_cast<stream_slot>(id())),
global_time_(global_time), global_time_(global_time),
...@@ -398,11 +318,9 @@ public: ...@@ -398,11 +318,9 @@ public:
} }
managers_.emplace(id, mgr); managers_.emplace(id, mgr);
// Create a new queue in the mailbox for incoming traffic. // Create a new queue in the mailbox for incoming traffic.
auto ip = new inbound_path(mgr, id, hs.prev_stage); auto path = make_inbound_path(mgr, id, std::move(hs.prev_stage));
get<2>(mbox.queues()) CAF_REQUIRE_NOT_EQUAL(path, nullptr);
.queues() path->emit_ack_open(this, actor_cast<actor_addr>(hs.original_stage));
.emplace(slot, std::unique_ptr<inbound_path>{ip});
ip->emit_ack_open(this, actor_cast<actor_addr>(hs.original_stage));
} }
void operator()(stream_slots slots, actor_addr& sender, void operator()(stream_slots slots, actor_addr& sender,
...@@ -418,32 +336,16 @@ public: ...@@ -418,32 +336,16 @@ public:
pending_managers_.erase(i); pending_managers_.erase(i);
CAF_REQUIRE(res.second); CAF_REQUIRE(res.second);
res.first->second->handle(slots, x); res.first->second->handle(slots, x);
/*
auto to = actor_cast<strong_actor_ptr>(sender);
CAF_REQUIRE_NOT_EQUAL(to, nullptr);
auto out = i->second->out().add_path(slots.invert(), to);
i->second->handle(out, x);
i->second->generate_messages();
i->second->push();
*/
} }
void operator()(stream_slots input_slots, actor_addr& sender, void operator()(stream_slots slots, actor_addr& sender,
upstream_msg::ack_batch& x) { upstream_msg::ack_batch& x) {
TRACE(name_, ack_batch, CAF_ARG(input_slots), TRACE(name_, ack_batch, CAF_ARG(slots),
CAF_ARG2("sender", name_of(sender)), CAF_ARG(x)); CAF_ARG2("sender", name_of(sender)), CAF_ARG(x));
// Get the manager for that stream. // Get the manager for that stream.
auto i = managers_.find(input_slots); auto i = managers_.find(slots);
CAF_REQUIRE_NOT_EQUAL(i, managers_.end()); CAF_REQUIRE_NOT_EQUAL(i, managers_.end());
auto to = actor_cast<strong_actor_ptr>(sender); i->second->handle(slots, x);
CAF_REQUIRE_NOT_EQUAL(to, nullptr);
auto out = i->second->out().path(input_slots.invert());
CAF_REQUIRE_NOT_EQUAL(out, nullptr);
out->open_credit += x.new_capacity;
out->desired_batch_size = x.desired_batch_size;
out->next_ack_id = x.acknowledged_id + 1;
i->second->generate_messages();
i->second->push();
if (i->second->done()) { if (i->second->done()) {
CAF_MESSAGE(name_ << " is done sending batches"); CAF_MESSAGE(name_ << " is done sending batches");
i->second->close(); i->second->close();
...@@ -473,6 +375,36 @@ public: ...@@ -473,6 +375,36 @@ public:
tick_emitter_.update(now(), f); tick_emitter_.update(now(), f);
} }
inbound_path* make_inbound_path(stream_manager_ptr mgr, stream_slots slots,
strong_actor_ptr sender) override {
auto res = get<2>(mbox.queues()).queues().emplace(slots.receiver, nullptr);
if (!res.second)
return nullptr;
auto path = new inbound_path(std::move(mgr), slots, std::move(sender));
res.first->second.policy().handler.reset(path);
return path;
}
void erase_inbound_path_later(stream_slot slot) override {
get<2>(mbox.queues()).erase_later(slot);
}
void erase_inbound_path_later(stream_slot, error) override {
CAF_FAIL("unexpected function call");
}
void erase_inbound_paths_later(const stream_manager* mgr) override {
for (auto& kvp : get<2>(mbox.queues()).queues()) {
auto& path = kvp.second.policy().handler;
if (path != nullptr && path->mgr == mgr)
erase_inbound_path_later(kvp.first);
}
}
void erase_inbound_paths_later(const stream_manager*, error) override {
CAF_FAIL("unexpected function call");
}
time_point now() { time_point now() {
return global_time_ == nullptr ? clock_type::now() : *global_time_; return global_time_ == nullptr ? clock_type::now() : *global_time_;
} }
...@@ -515,10 +447,9 @@ struct msg_visitor { ...@@ -515,10 +447,9 @@ struct msg_visitor {
return intrusive::task_result::resume; return intrusive::task_result::resume;
} }
result_type operator()(is_urgent_async, default_queue& q, result_type operator()(is_urgent_async, urgent_queue&, mailbox_element&) {
mailbox_element& x) { CAF_FAIL("unexpected function call");
is_default_async token; return intrusive::task_result::stop;
return (*this)(token, q, x);
} }
result_type operator()(is_umsg, umsg_queue&, mailbox_element& x) { result_type operator()(is_umsg, umsg_queue&, mailbox_element& x) {
...@@ -543,7 +474,8 @@ struct msg_visitor { ...@@ -543,7 +474,8 @@ struct msg_visitor {
} }
result_type operator()(is_dmsg, dmsg_queue& qs, stream_slot, result_type operator()(is_dmsg, dmsg_queue& qs, stream_slot,
inner_dmsg_queue& q, mailbox_element& x) { policy::downstream_messages::nested_queue_type& q,
mailbox_element& x) {
CAF_REQUIRE(x.content().type_token() == make_type_token<downstream_msg>()); CAF_REQUIRE(x.content().type_token() == make_type_token<downstream_msg>());
auto inptr = q.policy().handler.get(); auto inptr = q.policy().handler.get();
if (inptr == nullptr) if (inptr == nullptr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment