Commit 8484a3b8 authored by Dominik Charousset's avatar Dominik Charousset

Redesign credit controller API

parent c634d7fb
......@@ -96,11 +96,14 @@ add_library(libcaf_core_obj OBJECT ${CAF_CORE_HEADERS}
src/detail/set_thread_name.cpp
src/detail/shared_spinlock.cpp
src/detail/simple_actor_clock.cpp
src/detail/size_based_credit_controller.cpp
src/detail/stringification_inspector.cpp
src/detail/sync_request_bouncer.cpp
src/detail/test_actor_clock.cpp
src/detail/thread_safe_actor_clock.cpp
src/detail/tick_emitter.cpp
src/detail/token_based_credit_controller.cpp
src/detail/token_based_credit_controller.cpp
src/detail/type_id_list_builder.cpp
src/detail/uri_impl.cpp
src/downstream_manager.cpp
......@@ -155,7 +158,6 @@ add_library(libcaf_core_obj OBJECT ${CAF_CORE_HEADERS}
src/sec_strings.cpp
src/serializer.cpp
src/settings.cpp
src/size_based_credit_controller.cpp
src/skip.cpp
src/stream_aborter.cpp
src/stream_manager.cpp
......@@ -169,7 +171,6 @@ add_library(libcaf_core_obj OBJECT ${CAF_CORE_HEADERS}
src/telemetry/metric_family.cpp
src/telemetry/metric_registry.cpp
src/term.cpp
src/test_credit_controller.cpp
src/thread_hook.cpp
src/timestamp.cpp
src/tracing_data.cpp
......
......@@ -204,18 +204,12 @@ public:
// -- stream parameters ------------------------------------------------------
/// @private
timespan stream_desired_batch_complexity;
/// @private
timespan stream_max_batch_delay;
/// @private
timespan stream_credit_round_interval;
/// @private
timespan stream_tick_duration() const noexcept;
// -- OpenSSL parameters -----------------------------------------------------
std::string openssl_certificate;
......
......@@ -295,7 +295,10 @@ private:
}
auto new_size = buffered();
CAF_ASSERT(old_size >= new_size);
this->shipped_messages(old_size - new_size);
auto shipped = old_size - new_size;
this->shipped_messages(shipped);
if (shipped > 0)
this->last_send_ = this->self()->now();
}
state_map_type state_map_;
......
......@@ -32,64 +32,34 @@ public:
// -- member types -----------------------------------------------------------
/// Wraps an assignment of the controller to its source.
struct CAF_CORE_EXPORT assignment {
/// Stores how much credit we assign to the source.
int32_t credit;
struct calibration {
/// Stores how much credit the path may emit at most.
int32_t max_credit;
/// Stores how many elements we demand per batch.
int32_t batch_size;
/// Stores how many batches the caller should wait before calling
/// `calibrate` again.
int32_t next_calibration;
};
// -- constructors, destructors, and assignment operators --------------------
explicit credit_controller(scheduled_actor* self);
virtual ~credit_controller();
// -- properties -------------------------------------------------------------
scheduled_actor* self() noexcept {
return self_;
}
// -- pure virtual functions -------------------------------------------------
/// Called before processing the batch `x` in order to allow the controller
/// to keep statistics on incoming batches.
virtual void before_processing(downstream_msg::batch& x) = 0;
/// Called after processing the batch `x` in order to allow the controller to
/// keep statistics on incoming batches.
/// @note The consumer may alter the batch while processing it, for example
/// by moving each element of the batch downstream.
virtual void after_processing(downstream_msg::batch& x) = 0;
virtual void before_processing(downstream_msg::batch& batch) = 0;
/// Assigns initial credit during the stream handshake.
/// @returns The initial credit for the new sources.
virtual assignment compute_initial() = 0;
/// Assigs new credit to the source after a cycle ends.
/// @param cycle Duration of a cycle.
virtual assignment compute(timespan cycle) = 0;
// -- virtual functions ------------------------------------------------------
/// Returns an initial calibration for the path.
virtual calibration init() = 0;
/// Computes a credit assignment to the source after crossing the
/// low-threshold. May assign zero credit.
virtual assignment compute_bridge();
/// Returns the threshold for when we may give extra credit to a source
/// during a cycle.
/// @returns Zero or a negative value if the controller never grants bridge
/// credit, otherwise the threshold for calling `compute_bridge` to
/// generate additional credit.
virtual int32_t threshold() const noexcept;
private:
// -- member variables -------------------------------------------------------
/// Points to the parent system.
scheduled_actor* self_;
virtual calibration calibrate() = 0;
};
} // namespace caf
......@@ -33,20 +33,60 @@
namespace caf::defaults::stream {
constexpr auto desired_batch_complexity = timespan{50'000};
constexpr auto max_batch_delay = timespan{5'000'000};
constexpr auto credit_round_interval = timespan{10'000'000};
constexpr auto credit_policy = string_view{"complexity"};
constexpr auto max_batch_delay = timespan{1'000'000};
/// Configures an algorithm for assigning credit and adjusting batch sizes.
///
/// The `size-based` controller (default) samples how many Bytes stream elements
/// occupy when serialized to CAF's binary wire format.
///
/// The `token-based` controller associates each stream element with one token.
/// Input buffer and batch sizes are then statically defined in terms of tokens.
/// This strategy makes no dynamic adjustment or sampling.
constexpr auto credit_policy = string_view{"size-based"};
[[deprecated("this parameter no longer has any effect")]] //
constexpr auto credit_round_interval
= max_batch_delay;
} // namespace caf::defaults::stream
namespace caf::defaults::stream::size_policy {
constexpr auto bytes_per_batch = int32_t{02 * 1024}; // 2 KB
/// Desired size of a single batch in Bytes, when serialized into CAF's binary
/// wire format.
constexpr auto bytes_per_batch = int32_t{2 * 1024}; // 2 KB
/// Number of Bytes (over all received elements) an inbound path may buffer.
/// Actors use heuristics for calculating the estimated memory use, so actors
/// may still allocate more memory in practice.
constexpr auto buffer_capacity = int32_t{64 * 1024}; // 64 KB
/// Frequency of computing the serialized size of incoming batches. Smaller
/// values may increase accuracy, but also add computational overhead.
constexpr auto sampling_rate = int32_t{25};
/// Frequency of re-calibrating batch sizes. For example, a calibration interval
/// of 10 and a sampling rate of 20 causes the actor to re-calibrate every 200
/// batches.
constexpr auto calibration_interval = int32_t{20};
/// Value between 0 and 1 representing the degree of weighting decrease for
/// adjusting batch sizes. A higher factor discounts older observations faster.
constexpr auto smoothing_factor = float{0.6};
} // namespace caf::defaults::stream::size_policy
namespace caf::defaults::stream::token_policy {
/// Number of elements in a single batch.
constexpr auto batch_size = int32_t{256}; // 2 KB for elements of size 8.
/// Maximum number of elements in the input buffer.
constexpr auto buffer_size = int32_t{4096}; // // 32 KB for elements of size 8.
} // namespace caf::defaults::stream::token_policy
namespace caf::defaults::scheduler {
constexpr auto policy = string_view{"stealing"};
......
......@@ -26,17 +26,10 @@ namespace caf::detail {
/// batches and constrains credit based on upper bounds for memory usage.
class size_based_credit_controller : public credit_controller {
public:
// -- member types -----------------------------------------------------------
using super = credit_controller;
// -- constants --------------------------------------------------------------
/// Configures at what buffer level we grant bridge credit (0 to 1).
static constexpr float buffer_threshold = 0.75f;
/// Configures how many samples we require for recalculating buffer sizes.
static constexpr int32_t min_samples = 10;
static constexpr int32_t min_samples = 50;
/// Stores how many elements we buffer at most after the handshake.
int32_t initial_buffer_size = 10;
......@@ -46,54 +39,50 @@ public:
// -- constructors, destructors, and assignment operators --------------------
explicit size_based_credit_controller(scheduled_actor* self);
explicit size_based_credit_controller(local_actor* self);
~size_based_credit_controller() override;
// -- overrides --------------------------------------------------------------
// -- interface functions ----------------------------------------------------
void before_processing(downstream_msg::batch& x) override;
void before_processing(downstream_msg::batch& batch) override;
void after_processing(downstream_msg::batch& x) override;
calibration init() override;
assignment compute_initial() override;
calibration calibrate() override;
assignment compute(timespan cycle) override;
private:
// -- member variables -------------------------------------------------------
assignment compute_bridge() override;
local_actor* self_;
int32_t threshold() const noexcept override;
/// Keeps track of when to sample a batch.
int32_t sample_counter_ = 0;
private:
// -- member variables -------------------------------------------------------
/// Stores the last computed (moving) average for the serialized size per
/// element in the stream.
int32_t bytes_per_element_ = 0;
/// Total number of elements in all processed batches in the current cycle.
int64_t num_batches_ = 0;
/// Stores how many elements were sampled since last calling `calibrate`.
int32_t sampled_elements_ = 0;
/// Stores how many elements the buffer should hold at most.
int32_t buffer_size_ = initial_buffer_size;
/// Stores how many bytes the sampled batches required when serialized.
int64_t sampled_total_size_ = 0;
/// Stores how many elements each batch should contain.
int32_t batch_size_ = initial_batch_size;
/// Stores whether this is the first run.
bool initializing_ = true;
/// Configures how many bytes we store in total.
int32_t buffer_capacity_;
// -- see caf::defaults::stream::size_policy --------------------------------
/// Configures how many bytes we transfer per batch.
int32_t bytes_per_batch_;
/// Stores how many elements we have sampled during the current cycle.
int32_t sampled_elements_ = 0;
int32_t buffer_capacity_;
/// Stores approximately how many bytes the sampled elements require when
/// serialized.
int32_t sampled_total_size_ = 0;
int32_t sampling_rate_ = 1;
/// Counter for keeping track of when to sample a batch.
int32_t sample_counter_ = 0;
int32_t calibration_interval_;
/// Configured how many batches we skip for the size sampling.
int32_t sample_rate_ = 1;
float smoothing_factor_;
};
} // namespace caf::detail
......@@ -20,36 +20,43 @@
#include "caf/credit_controller.hpp"
namespace caf {
namespace detail {
namespace caf::detail {
/// Computes predictable credit in unit tests.
class test_credit_controller : public credit_controller {
/// A credit controller that estimates the bytes required to store incoming
/// batches and constrains credit based on upper bounds for memory usage.
class token_based_credit_controller : public credit_controller {
public:
// -- member types -----------------------------------------------------------
// -- constants --------------------------------------------------------------
using super = credit_controller;
/// Configures how many samples we require for recalculating buffer sizes.
static constexpr int32_t min_samples = 50;
// -- constructors, destructors, and assignment operators --------------------
/// Stores how many elements we buffer at most after the handshake.
int32_t initial_buffer_size = 10;
/// Stores how many elements we allow per batch after the handshake.
int32_t initial_batch_size = 2;
using super::super;
// -- constructors, destructors, and assignment operators --------------------
~test_credit_controller() override;
explicit token_based_credit_controller(local_actor* self);
// -- overrides --------------------------------------------------------------
~token_based_credit_controller() override;
void before_processing(downstream_msg::batch& x) override;
// -- interface functions ----------------------------------------------------
void after_processing(downstream_msg::batch& x) override;
void before_processing(downstream_msg::batch& batch) override;
assignment compute_initial() override;
calibration init() override;
assignment compute(timespan cycle) override;
calibration calibrate() override;
private:
/// Total number of elements in all processed batches in the current cycle.
int64_t num_elements_ = 0;
// -- see caf::defaults::stream::token_policy -------------------------------
int32_t batch_size_;
int32_t buffer_size_;
};
} // namespace detail
} // namespace caf
} // namespace caf::detail
......@@ -21,9 +21,11 @@
#include <memory>
#include <vector>
#include "caf/actor_clock.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/fwd.hpp"
#include "caf/stream_slot.hpp"
#include "caf/timespan.hpp"
namespace caf {
......@@ -48,6 +50,9 @@ public:
/// Unique pointer to an outbound path.
using unique_path_ptr = std::unique_ptr<path_type>;
/// Discrete point in time, as reported by the actor clock.
using time_point = typename actor_clock::time_point;
/// Function object for iterating over all paths.
struct CAF_CORE_EXPORT path_visitor {
virtual ~path_visitor();
......@@ -83,6 +88,11 @@ public:
/// stream and never has outbound paths.
virtual bool terminal() const noexcept;
// -- time management --------------------------------------------------------
/// Forces underful batches after reaching the maximum delay.
void tick(time_point now, timespan max_batch_delay);
// -- path management --------------------------------------------------------
/// Applies `f` to each path.
......@@ -241,6 +251,9 @@ protected:
// -- member variables -------------------------------------------------------
stream_manager* parent_;
/// Stores the time stamp of our last batch.
time_point last_send_;
};
} // namespace caf
......@@ -41,12 +41,36 @@ namespace caf {
/// State for a path to an upstream actor (source).
class CAF_CORE_EXPORT inbound_path {
public:
// -- member types -----------------------------------------------------------
/// Message type for propagating graceful shutdowns.
using regular_shutdown = upstream_msg::drop;
/// Message type for propagating errors.
using irregular_shutdown = upstream_msg::forced_drop;
/// Wraps optional actor metrics collected by this path.
struct metrics_t {
telemetry::int_counter* processed_elements;
telemetry::int_gauge* input_buffer_size;
};
/// Discrete point in time, as reported by the actor clock.
using time_point = typename actor_clock::time_point;
/// Time interval, as reported by the actor clock.
using duration_type = typename actor_clock::duration_type;
// -- constructors, destructors, and assignment operators --------------------
/// Constructs a path for given handle and stream ID.
inbound_path(stream_manager_ptr mgr_ptr, stream_slots id,
strong_actor_ptr ptr, type_id_t input_type);
~inbound_path();
// -- member variables -------------------------------------------------------
/// Points to the manager responsible for incoming traffic.
stream_manager_ptr mgr;
......@@ -56,18 +80,24 @@ public:
/// Stores slot IDs for sender (hdl) and receiver (self).
stream_slots slots;
/// Optionally stores pointers to telemetry objects.
struct metrics_t {
telemetry::int_counter* processed_elements;
telemetry::int_gauge* input_buffer_size;
} metrics;
/// Stores pointers to optional telemetry objects.
metrics_t metrics;
/// Stores the last computed desired batch size.
/// Stores the last computed desired batch size. Adjusted at run-time by the
/// controller.
int32_t desired_batch_size = 0;
/// Amount of credit we have signaled upstream.
int32_t assigned_credit = 0;
/// Maximum amount of credit that the path may signal upstream. Adjusted at
/// run-time by the controller.
int32_t max_credit = 0;
/// Decremented whenever receiving a batch. Triggers a re-calibration by the
/// controller when reaching zero.
int32_t calibration_countdown = 10;
/// Priority of incoming batches from this source.
stream_priority prio = stream_priority::normal;
......@@ -80,17 +110,22 @@ public:
/// Controller for assigning credit to the source.
std::unique_ptr<credit_controller> controller_;
/// Stores the time point of the last credit decision for this source.
actor_clock::time_point last_credit_decision;
/// Stores when the last ACK was emitted.
time_point last_ack_time;
/// Stores the time point of the last credit decision for this source.
actor_clock::time_point next_credit_decision;
// -- properties -------------------------------------------------------------
/// Constructs a path for given handle and stream ID.
inbound_path(stream_manager_ptr mgr_ptr, stream_slots id,
strong_actor_ptr ptr, type_id_t input_type);
/// Returns whether the path received no input since last emitting
/// `ack_batch`, i.e., `last_acked_batch_id == last_batch_id`.
bool up_to_date() const noexcept;
~inbound_path();
/// Returns a pointer to the parent actor.
scheduled_actor* self() const noexcept;
/// Returns currently unassigned credit that we could assign to the source.
int32_t available_credit() const noexcept;
// -- callbacks --------------------------------------------------------------
/// Updates `last_batch_id` and `assigned_credit` before dispatching to the
/// manager.
......@@ -103,23 +138,19 @@ public:
mgr->handle(this, x);
}
/// Forces an ACK message after receiving no input for a considerable amount
/// of time.
void tick(time_point now, duration_type max_batch_delay);
// -- messaging --------------------------------------------------------------
/// Emits an `upstream_msg::ack_batch`.
void emit_ack_open(local_actor* self, actor_addr rebind_from);
/// Sends an `upstream_msg::ack_batch` for granting new credit. Credit is
/// calculated from sampled batch durations, the cycle duration and the
/// desired batch complexity.
/// Sends an `upstream_msg::ack_batch` for granting new credit.
/// @param self Points to the parent actor, i.e., sender of the message.
/// @param queued_items Accumulated size of all batches that are currently
/// waiting in the mailbox.
/// @param now Current timestamp.
/// @param cycle Time between credit rounds.
void emit_ack_batch(local_actor* self, int32_t queued_items,
actor_clock::time_point now, timespan cycle);
/// Returns whether the path received no input since last emitting
/// `ack_batch`, i.e., `last_acked_batch_id == last_batch_id`.
bool up_to_date();
/// @param new_credit Amount of new credit to assign to the source.
void emit_ack_batch(local_actor* self, int32_t new_credit);
/// Sends an `upstream_msg::drop` on this path.
void emit_regular_shutdown(local_actor* self);
......@@ -131,9 +162,6 @@ public:
static void
emit_irregular_shutdown(local_actor* self, stream_slots slots,
const strong_actor_ptr& hdl, error reason);
/// Returns a pointer to the parent actor.
scheduled_actor* self();
};
/// @relates inbound_path
......
......@@ -32,7 +32,6 @@
#include "caf/actor_traits.hpp"
#include "caf/detail/behavior_stack.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/detail/tick_emitter.hpp"
#include "caf/detail/unordered_flat_map.hpp"
#include "caf/error.hpp"
#include "caf/extend.hpp"
......@@ -681,6 +680,10 @@ public:
|| !pending_stream_managers_.empty();
}
auto max_batch_delay() const noexcept {
return max_batch_delay_;
}
/// @endcond
protected:
......@@ -723,15 +726,13 @@ protected:
/// yet received an ACK.
stream_manager_map pending_stream_managers_;
/// Controls batch and credit timeouts.
detail::tick_emitter stream_ticks_;
/// Stores how long the actor should try to accumulate more items in order to
/// send a full stream batch.
timespan max_batch_delay_;
/// Number of ticks per batch delay.
size_t max_batch_delay_ticks_;
/// Number of ticks of each credit round.
size_t credit_round_ticks_;
/// Pointer to a private thread object associated with a detached actor.
detail::private_thread* private_thread_;
......
......@@ -25,12 +25,14 @@
#include "caf/actor.hpp"
#include "caf/actor_cast.hpp"
#include "caf/actor_clock.hpp"
#include "caf/detail/core_export.hpp"
#include "caf/downstream_msg.hpp"
#include "caf/fwd.hpp"
#include "caf/ref_counted.hpp"
#include "caf/stream.hpp"
#include "caf/stream_slot.hpp"
#include "caf/timespan.hpp"
#include "caf/upstream_msg.hpp"
namespace caf {
......@@ -56,6 +58,11 @@ public:
using inbound_paths_list = std::vector<inbound_path*>;
/// Discrete point in time.
using time_point = typename actor_clock::time_point;
// -- constructors, destructors, and assignment operators --------------------
stream_manager(scheduled_actor* selfptr,
stream_priority prio = stream_priority::normal);
......@@ -83,10 +90,6 @@ public:
/// buffers of in- and outbound paths.
virtual void shutdown();
/// Tries to advance the stream by generating more credit or by sending
/// batches.
void advance();
/// Pushes new data to downstream actors by sending batches. The amount of
/// pushed data is limited by the available credit.
virtual void push();
......@@ -107,7 +110,7 @@ public:
/// messages.
virtual bool generate_messages();
// -- pure virtual member functions ------------------------------------------
// -- interface functions ----------------------------------------------------
/// Returns the manager for downstream communication.
virtual downstream_manager& out() = 0;
......@@ -127,6 +130,11 @@ public:
/// Advances time.
virtual void cycle_timeout(size_t cycle_nr);
/// Acquires credit on an inbound path. The calculated credit to fill our
/// queue for two cycles is `desired`, but the manager is allowed to return
/// any non-negative value.
virtual int32_t acquire_credit(inbound_path* path, int32_t desired);
// -- input path management --------------------------------------------------
/// Informs the manager that a new input path opens.
......@@ -184,10 +192,7 @@ public:
return self_;
}
/// Acquires credit on an inbound path. The calculated credit to fill our
/// queue for two cycles is `desired`, but the manager is allowed to return
/// any non-negative value.
virtual int32_t acquire_credit(inbound_path* path, int32_t desired);
// -- output path management -------------------------------------------------
/// Creates an outbound path to the current sender without any type checking.
/// @pre `out().terminal() == false`
......@@ -268,6 +273,10 @@ public:
/// @pre Current message is an `open_stream_msg`.
stream_slot add_unchecked_inbound_path_impl(type_id_t rtti);
// -- time management --------------------------------------------------------
void tick(time_point now);
protected:
// -- modifiers for self -----------------------------------------------------
......@@ -312,6 +321,10 @@ protected:
/// Stores individual flags, for continuous streaming or when shutting down.
int flags_;
/// Stores the maximum amount of time outbound paths should buffer elements
/// before sending underful batches.
timespan max_batch_delay_;
private:
void setf(int flag) noexcept {
auto x = flags_;
......
......@@ -62,7 +62,7 @@ public:
// -- properties -------------------------------------------------------------
/// Creates a new input path to the current sender.
inbound_stream_slot<input_type> add_inbound_path(const stream<input_type>&) {
inbound_stream_slot<input_type> add_inbound_path(stream<input_type>) {
return {this->add_unchecked_inbound_path_impl(type_id_v<input_type>)};
}
......
......@@ -59,7 +59,7 @@ public:
}
DownstreamManager& out() override {
return left_super::out();
return this->out_;
}
};
......
......@@ -57,9 +57,8 @@ actor_system_config::actor_system_config()
config_file_path(default_config_file),
slave_mode_fun(nullptr) {
// (1) hard-coded defaults
stream_desired_batch_complexity = defaults::stream::desired_batch_complexity;
stream_max_batch_delay = defaults::stream::max_batch_delay;
stream_credit_round_interval = defaults::stream::credit_round_interval;
stream_credit_round_interval = 2 * stream_max_batch_delay;
// fill our options vector for creating config file and CLI parsers
using std::string;
using string_list = std::vector<string>;
......@@ -70,14 +69,19 @@ actor_system_config::actor_system_config()
.add<string>(config_file_path, "config-file",
"set config file (default: caf-application.conf)");
opt_group{custom_options_, "caf.stream"}
.add<timespan>(stream_desired_batch_complexity, "desired-batch-complexity",
"processing time per batch")
.add<timespan>(stream_max_batch_delay, "max-batch-delay",
"maximum delay for partial batches")
.add<timespan>(stream_credit_round_interval, "credit-round-interval",
"time between emitting credit")
.add<string>("credit-policy",
"selects an algorithm for credit computation");
"selects an implementation for credit computation");
opt_group{custom_options_, "caf.stream.size-based-policy"}
.add<int32_t>("bytes-per-batch", "desired batch size in bytes")
.add<int32_t>("buffer-capacity", "maximum input buffer size in bytes")
.add<int32_t>("sampling-rate", "frequency of collecting batch sizes")
.add<int32_t>("calibration-interval", "frequency of re-calibrations")
.add<float>("smoothing-factor", "factor for discounting older samples");
opt_group{custom_options_, "caf.stream.token-based-policy"}
.add<int32_t>("batch-size", "number of elements per batch")
.add<int32_t>("buffer-size", "max. number of elements in the input buffer");
opt_group{custom_options_, "caf.scheduler"}
.add<string>("policy", "'stealing' (default) or 'sharing'")
.add<size_t>("max-threads", "maximum number of worker threads")
......@@ -117,12 +121,8 @@ settings actor_system_config::dump_content() const {
auto& caf_group = result["caf"].as_dictionary();
// -- streaming parameters
auto& stream_group = caf_group["stream"].as_dictionary();
put_missing(stream_group, "desired-batch-complexity",
defaults::stream::desired_batch_complexity);
put_missing(stream_group, "max-batch-delay",
defaults::stream::max_batch_delay);
put_missing(stream_group, "credit-round-interval",
defaults::stream::credit_round_interval);
put_missing(stream_group, "credit-policy", defaults::stream::credit_policy);
put_missing(stream_group, "size-policy.buffer-capacity",
defaults::stream::size_policy::buffer_capacity);
......@@ -354,12 +354,6 @@ actor_system_config& actor_system_config::set_impl(string_view name,
return *this;
}
timespan actor_system_config::stream_tick_duration() const noexcept {
auto ns_count = caf::detail::gcd(stream_credit_round_interval.count(),
stream_max_batch_delay.count());
return timespan{ns_count};
}
std::string actor_system_config::render(const error& x) {
return to_string(x);
}
......
......@@ -20,20 +20,8 @@
namespace caf {
credit_controller::credit_controller(scheduled_actor* self) : self_(self) {
// nop
}
credit_controller::~credit_controller() {
// nop
}
credit_controller::assignment credit_controller::compute_bridge() {
return {0, 0};
}
int32_t credit_controller::threshold() const noexcept {
return -1;
}
} // namespace caf
......@@ -24,44 +24,63 @@
#include "caf/detail/serialized_size.hpp"
#include "caf/scheduled_actor.hpp"
// Safe us some typing and very ugly formatting.
#define impl size_based_credit_controller
namespace {
// Sample the first 10 batches when starting up.
constexpr int32_t initial_sample_size = 10;
} // namespace
namespace caf::detail {
impl::impl(scheduled_actor* self) : super(self) {
auto& cfg = self->system().config();
buffer_capacity_ = get_or(cfg, "caf.stream.size-policy.buffer-capacity",
defaults::stream::size_policy::buffer_capacity);
bytes_per_batch_ = get_or(cfg, "caf.stream.size-policy.bytes-per-batch",
defaults::stream::size_policy::bytes_per_batch);
size_based_credit_controller::size_based_credit_controller(local_actor* ptr)
: self_(ptr) {
namespace fallback = defaults::stream::size_policy;
// Initialize from the config parameters.
auto& cfg = ptr->system().config();
if (auto section = get_if<settings>(&cfg, "caf.stream.size-based-policy")) {
bytes_per_batch_ = get_or(*section, "bytes-per-batch",
fallback::bytes_per_batch);
buffer_capacity_ = get_or(*section, "buffer-capacity",
fallback::buffer_capacity);
calibration_interval_ = get_or(*section, "calibration-interval",
fallback::calibration_interval);
smoothing_factor_ = get_or(*section, "smoothing-factor",
fallback::smoothing_factor);
} else {
buffer_capacity_ = fallback::buffer_capacity;
bytes_per_batch_ = fallback::bytes_per_batch;
calibration_interval_ = fallback::calibration_interval;
smoothing_factor_ = fallback::smoothing_factor;
}
}
impl::~impl() {
size_based_credit_controller::~size_based_credit_controller() {
// nop
}
void impl::before_processing(downstream_msg::batch& x) {
if (++sample_counter_ == sample_rate_) {
sampled_elements_ += x.xs_size;
sampled_total_size_ += serialized_size(self()->system(), x.xs);
void size_based_credit_controller::before_processing(downstream_msg::batch& x) {
if (++sample_counter_ == sampling_rate_) {
sample_counter_ = 0;
sampled_elements_ += x.xs_size;
// FIXME: this wildly overestimates the actual size per element, because we
// include the size of the meta data per message. This also punishes
// small batches and we probably never reach a stable point even if
// incoming data always has the exact same size per element.
sampled_total_size_ += static_cast<int64_t>(serialized_size(x.xs));
}
++num_batches_;
}
void impl::after_processing(downstream_msg::batch&) {
// nop
}
credit_controller::assignment impl::compute_initial() {
return {buffer_size_, batch_size_};
credit_controller::calibration size_based_credit_controller::init() {
// Initially, we simply assume that the size of one element equals
// bytes-per-batch.
return {buffer_capacity_ / bytes_per_batch_, 1, initial_sample_size};
}
credit_controller::assignment impl::compute(timespan) {
if (sampled_elements_ >= min_samples) {
// Helper for truncating a 64-bit integer to a 32-bit integer with a
// minimum value of 1.
credit_controller::calibration size_based_credit_controller::calibrate() {
CAF_ASSERT(sample_counter_ == 0);
// Helper for truncating a 64-bit integer to a 32-bit integer with a minimum
// value of 1.
auto clamp_i32 = [](int64_t x) -> int32_t {
static constexpr auto upper_bound = std::numeric_limits<int32_t>::max();
if (x > upper_bound)
......@@ -70,31 +89,26 @@ credit_controller::assignment impl::compute(timespan) {
return 1;
return static_cast<int32_t>(x);
};
// Calculate ideal batch size by size.
auto bytes_per_element = clamp_i32(sampled_total_size_ / sampled_elements_);
batch_size_ = clamp_i32(bytes_per_batch_ / bytes_per_element);
buffer_size_ = clamp_i32(buffer_capacity_ / bytes_per_element);
// Reset bookkeeping state.
sampled_elements_ = 0;
sampled_total_size_ = 0;
// Adjust the sample rate to reach min_samples in the next cycle.
sample_rate_ = clamp_i32(num_batches_ / min_samples);
if (sample_counter_ >= sample_rate_)
sample_counter_ = 0;
num_batches_ = 0;
if (!initializing_) {
auto bpe = clamp_i32(sampled_total_size_ / calibration_interval_);
bytes_per_element_ = static_cast<int32_t>(
smoothing_factor_ * bpe // weighted current measurement
+ (1.0 - smoothing_factor_) * bytes_per_element_ // past values
);
} else {
// After our first run, we continue with the actual sampling rate.
initializing_ = false;
sampling_rate_ = get_or(self_->config(),
"caf.stream.size-policy.sample-rate",
defaults::stream::size_policy::sampling_rate);
bytes_per_element_ = clamp_i32(sampled_total_size_ / initial_sample_size);
}
return {buffer_size_, batch_size_};
}
credit_controller::assignment impl::compute_bridge() {
CAF_ASSERT(batch_size_ > 0);
CAF_ASSERT(buffer_size_ > batch_size_);
return {buffer_size_, batch_size_};
}
int32_t impl::threshold() const noexcept {
return buffer_size_-1;
// return static_cast<int32_t>(buffer_size_ * buffer_threshold);
sampled_total_size_ = 0;
return {
clamp_i32(buffer_capacity_ / bytes_per_element_),
clamp_i32(bytes_per_batch_ / bytes_per_element_),
sampling_rate_ * calibration_interval_,
};
}
} // namespace caf::detail
......@@ -16,60 +16,45 @@
* http://www.boost.org/LICENSE_1_0.txt. *
******************************************************************************/
#include "caf/detail/test_credit_controller.hpp"
#include "caf/detail/token_based_credit_controller.hpp"
#include "caf/actor_system.hpp"
#include "caf/actor_system_config.hpp"
#include "caf/scheduled_actor.hpp"
namespace caf {
namespace detail {
test_credit_controller::~test_credit_controller() {
// nop
#include "caf/config_value.hpp"
#include "caf/defaults.hpp"
#include "caf/detail/serialized_size.hpp"
#include "caf/local_actor.hpp"
#include "caf/settings.hpp"
namespace caf::detail {
token_based_credit_controller::token_based_credit_controller(local_actor* ptr) {
namespace fallback = defaults::stream::token_policy;
// Initialize from the config parameters.
auto& cfg = ptr->system().config();
if (auto section = get_if<settings>(&cfg, "caf.stream.token-based-policy")) {
batch_size_ = get_or(*section, "batch-size", fallback::batch_size);
buffer_size_ = get_or(*section, "buffer-size", fallback::buffer_size);
} else {
batch_size_ = fallback::batch_size;
buffer_size_ = fallback::buffer_size;
}
}
void test_credit_controller::before_processing(downstream_msg::batch& x) {
num_elements_ += x.xs_size;
token_based_credit_controller::~token_based_credit_controller() {
// nop
}
void test_credit_controller::after_processing(downstream_msg::batch&) {
void token_based_credit_controller::before_processing(downstream_msg::batch&) {
// nop
}
credit_controller::assignment test_credit_controller::compute_initial() {
return {50, 50};
credit_controller::calibration token_based_credit_controller::init() {
return calibrate();
}
credit_controller::assignment test_credit_controller::compute(timespan cycle) {
auto& cfg = self()->system().config();
auto complexity = cfg.stream_desired_batch_complexity;
// Max throughput = C * (N / t), where C = cycle length, N = measured items,
// and t = measured time. Desired batch size is the same formula with D
// (desired complexity) instead of C. We compute our values in 64-bit for
// more precision before truncating to a 32-bit integer type at the end.
int64_t total_ns = num_elements_ * 1000; // calculate with 1us per element
if (total_ns == 0)
return {1, 1};
// Helper for truncating a 64-bit integer to a 32-bit integer with a minimum
// value of 1.
auto clamp = [](int64_t x) -> int32_t {
static constexpr auto upper_bound = std::numeric_limits<int32_t>::max();
if (x > upper_bound)
return upper_bound;
if (x <= 0)
return 1;
return static_cast<int32_t>(x);
};
// Instead of C * (N / t) we calculate (C * N) / t to avoid double conversion
// and rounding errors.
assignment result;
result.credit = clamp((cycle.count() * num_elements_) / total_ns);
result.batch_size = clamp((complexity.count() * num_elements_) / total_ns);
// Reset state and return.
num_elements_ = 0;
return result;
credit_controller::calibration token_based_credit_controller::calibrate() {
return {buffer_size_, batch_size_, std::numeric_limits<int32_t>::max()};
}
} // namespace detail
} // namespace caf
} // namespace caf::detail
......@@ -41,7 +41,7 @@ downstream_manager::downstream_manager::path_predicate::~path_predicate() {
downstream_manager::downstream_manager(stream_manager* parent)
: parent_(parent) {
// nop
last_send_ = parent->self()->now();
}
downstream_manager::~downstream_manager() {
......@@ -62,6 +62,13 @@ bool downstream_manager::terminal() const noexcept {
return true;
}
// -- time management ----------------------------------------------------------
void downstream_manager::tick(time_point now, timespan max_batch_delay) {
if (now >= last_send_ + max_batch_delay && buffered() > 0)
force_emit_batches();
}
// -- path management ----------------------------------------------------------
std::vector<stream_slot> downstream_manager::path_slots() {
......
......@@ -22,51 +22,27 @@
#include "caf/defaults.hpp"
#include "caf/detail/meta_object.hpp"
#include "caf/detail/size_based_credit_controller.hpp"
#include "caf/detail/test_credit_controller.hpp"
#include "caf/detail/token_based_credit_controller.hpp"
#include "caf/logger.hpp"
#include "caf/no_stages.hpp"
#include "caf/scheduled_actor.hpp"
#include "caf/send.hpp"
#include "caf/settings.hpp"
namespace caf {
namespace {
constexpr bool force_ack = true;
void emit_ack_batch(inbound_path& path, credit_controller::assignment x,
bool force_ack_msg = false) {
CAF_ASSERT(x.batch_size > 0);
auto& out = path.mgr->out();
path.desired_batch_size = x.batch_size;
int32_t new_credit = 0;
auto used = static_cast<int32_t>(out.buffered()) + path.assigned_credit;
auto guard = detail::make_scope_guard([&] {
if (!force_ack_msg || path.up_to_date())
return;
unsafe_send_as(
path.self(), path.hdl,
make<upstream_msg::ack_batch>(path.slots.invert(), path.self()->address(),
0, x.batch_size, path.last_batch_id));
path.last_acked_batch_id = path.last_batch_id;
});
if (x.credit <= used)
return;
new_credit = path.mgr->acquire_credit(&path, x.credit - used);
if (new_credit < 1)
return;
guard.disable();
unsafe_send_as(path.self(), path.hdl,
make<upstream_msg::ack_batch>(
path.slots.invert(), path.self()->address(), new_credit,
x.batch_size, path.last_batch_id));
path.last_acked_batch_id = path.last_batch_id;
path.assigned_credit += new_credit;
template <class T>
void set_controller(std::unique_ptr<caf::credit_controller>& ptr,
caf::local_actor* self) {
ptr = std::make_unique<T>(self);
}
} // namespace
namespace caf {
// -- constructors, destructors, and assignment operators ----------------------
inbound_path::inbound_path(stream_manager_ptr mgr_ptr, stream_slots id,
strong_actor_ptr ptr,
[[maybe_unused]] type_id_t in_type)
......@@ -80,55 +56,87 @@ inbound_path::inbound_path(stream_manager_ptr mgr_ptr, stream_slots id,
<< "opens input stream with element type"
<< detail::global_meta_object(in_type)->type_name
<< "at slot" << id.receiver << "from" << hdl);
auto setter = set_controller<detail::size_based_credit_controller>;
if (auto str = get_if<std::string>(&self->system().config(),
"caf.stream.credit-policy")) {
if (*str == "testing")
controller_.reset(new detail::test_credit_controller(self));
else if (*str == "size")
controller_.reset(new detail::size_based_credit_controller(self));
} else {
controller_.reset(new detail::size_based_credit_controller(self));
if (*str == "token-based")
setter = set_controller<detail::token_based_credit_controller>;
else if (*str != "size-based")
CAF_LOG_WARNING("unrecognized credit policy:"
<< *str << "(falling back to 'size-based')");
}
setter(controller_, self);
last_ack_time = self->now();
}
inbound_path::~inbound_path() {
mgr->deregister_input_path(this);
}
void inbound_path::handle(downstream_msg::batch& x) {
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(x));
auto batch_size = x.xs_size;
last_batch_id = x.id;
CAF_STREAM_LOG_DEBUG(mgr->self()->name() << "handles batch of size"
<< batch_size << "on slot" << slots.receiver << "with"
// -- properties ---------------------------------------------------------------
bool inbound_path::up_to_date() const noexcept {
return last_acked_batch_id == last_batch_id;
}
scheduled_actor* inbound_path::self() const noexcept {
return mgr->self();
}
int32_t inbound_path::available_credit() const noexcept {
// The max_credit may decrease as a result of re-calibration, in which case
// the source can have more than the maximum amount for a brief period.
return std::max(max_credit - assigned_credit, int32_t{0});
}
// -- callbacks ----------------------------------------------------------------
void inbound_path::handle(downstream_msg::batch& batch) {
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(batch));
// Handle batch.
auto batch_size = batch.xs_size;
last_batch_id = batch.id;
CAF_STREAM_LOG_DEBUG(self()->name() << "handles batch of size" << batch_size
<< "on slot" << slots.receiver << "with"
<< assigned_credit << "assigned credit");
if (assigned_credit <= batch_size) {
assigned_credit = 0;
// Do not log a message when "running out of credit" for the first batch
// that can easily consume the initial credit in one shot.
CAF_STREAM_LOG_DEBUG_IF(next_credit_decision.time_since_epoch().count() > 0,
mgr->self()->name()
<< "ran out of credit at slot" << slots.receiver);
} else {
assigned_credit -= batch_size;
CAF_ASSERT(assigned_credit >= 0);
controller_->before_processing(batch);
mgr->handle(this, batch);
// Update settings as necessary.
if (--calibration_countdown == 0) {
auto [cmax, bsize, countdown] = controller_->calibrate();
max_credit = cmax;
desired_batch_size = bsize;
calibration_countdown = countdown;
}
auto threshold = controller_->threshold();
if (threshold >= 0 && assigned_credit <= threshold)
caf::emit_ack_batch(*this, controller_->compute_bridge());
controller_->before_processing(x);
mgr->handle(this, x);
controller_->after_processing(x);
// Send ACK whenever we can assign credit for another batch to the source.
if (auto available = available_credit(); available >= desired_batch_size)
if (auto acquired = mgr->acquire_credit(this, available); acquired > 0)
emit_ack_batch(self(), acquired);
// FIXME: move this up to the actor
mgr->push();
}
void inbound_path::tick(time_point now, duration_type max_batch_delay) {
if (now >= last_ack_time + (2 * max_batch_delay)) {
int32_t new_credit = 0;
if (auto available = available_credit(); available > 0)
new_credit = mgr->acquire_credit(this, available);
emit_ack_batch(self(), new_credit);
}
}
// -- messaging ----------------------------------------------------------------
void inbound_path::emit_ack_open(local_actor* self, actor_addr rebind_from) {
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(rebind_from));
// Update state.
auto initial = controller_->compute_initial();
assigned_credit = mgr->acquire_credit(this, initial.credit);
CAF_ASSERT(assigned_credit >= 0);
desired_batch_size = std::min(initial.batch_size, assigned_credit);
auto [cmax, bsize, countdown] = controller_->init();
max_credit = cmax;
assigned_credit = mgr->acquire_credit(this, cmax);
desired_batch_size = bsize;
calibration_countdown = countdown;
// Make sure we receive errors from this point on.
stream_aborter::add(hdl, self->address(), slots.receiver,
stream_aborter::source_aborter);
......@@ -137,19 +145,21 @@ void inbound_path::emit_ack_open(local_actor* self, actor_addr rebind_from) {
make<upstream_msg::ack_open>(
slots.invert(), self->address(), std::move(rebind_from),
self->ctrl(), assigned_credit, desired_batch_size));
last_credit_decision = self->clock().now();
last_ack_time = self->now();
}
void inbound_path::emit_ack_batch(local_actor*, int32_t,
actor_clock::time_point now, timespan cycle) {
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(cycle));
last_credit_decision = now;
next_credit_decision = now + cycle;
caf::emit_ack_batch(*this, controller_->compute(cycle), force_ack);
}
bool inbound_path::up_to_date() {
return last_acked_batch_id == last_batch_id;
void inbound_path::emit_ack_batch(local_actor* self, int32_t new_credit) {
CAF_LOG_TRACE(CAF_ARG(new_credit));
CAF_ASSERT(desired_batch_size > 0);
if (last_acked_batch_id == last_batch_id && new_credit == 0)
return;
unsafe_send_as(self, hdl,
make<upstream_msg::ack_batch>(slots.invert(), self->address(),
new_credit, desired_batch_size,
last_batch_id));
last_acked_batch_id = last_batch_id;
assigned_credit += new_credit;
last_ack_time = self->now();
}
void inbound_path::emit_regular_shutdown(local_actor* self) {
......@@ -180,8 +190,4 @@ void inbound_path::emit_irregular_shutdown(local_actor* self,
std::move(reason)));
}
scheduled_actor* inbound_path::self() {
return mgr->self();
}
} // namespace caf
......@@ -94,6 +94,10 @@ void local_actor::setup_metrics() {
metrics_ = make_instance_metrics(this);
}
auto local_actor::now() const noexcept -> clock_type::time_point {
return clock().now();
}
void local_actor::request_response_timeout(timespan timeout, message_id mid) {
CAF_LOG_TRACE(CAF_ARG(timeout) << CAF_ARG(mid));
if (timeout == infinite)
......
......@@ -21,6 +21,7 @@
#include "caf/actor_ostream.hpp"
#include "caf/actor_system_config.hpp"
#include "caf/config.hpp"
#include "caf/defaults.hpp"
#include "caf/detail/default_invoke_result_visitor.hpp"
#include "caf/detail/meta_object.hpp"
#include "caf/detail/private_thread.hpp"
......@@ -136,19 +137,8 @@ scheduled_actor::scheduled_actor(actor_config& cfg)
#endif // CAF_ENABLE_EXCEPTIONS
{
auto& sys_cfg = home_system().config();
auto interval = sys_cfg.stream_tick_duration();
CAF_ASSERT(interval.count() > 0);
stream_ticks_.interval(interval);
CAF_ASSERT(sys_cfg.stream_max_batch_delay.count() > 0);
auto div = [](timespan x, timespan y) {
return static_cast<size_t>(x.count() / y.count());
};
max_batch_delay_ticks_ = div(sys_cfg.stream_max_batch_delay, interval);
CAF_ASSERT(max_batch_delay_ticks_ > 0);
credit_round_ticks_ = div(sys_cfg.stream_credit_round_interval, interval);
CAF_ASSERT(credit_round_ticks_ > 0);
CAF_LOG_DEBUG(CAF_ARG(interval) << CAF_ARG(max_batch_delay_ticks_)
<< CAF_ARG(credit_round_ticks_));
max_batch_delay_ = get_or(sys_cfg, "caf.stream.max_batch_delay",
defaults::stream::max_batch_delay);
}
scheduled_actor::~scheduled_actor() {
......@@ -913,8 +903,6 @@ bool scheduled_actor::finalize() {
i = stream_managers_.erase(i);
else
++i;
if (stream_managers_.empty())
stream_ticks_.stop();
}
}
// An actor is considered alive as long as it has a behavior and didn't set
......@@ -1075,8 +1063,6 @@ stream_slot scheduled_actor::next_slot() {
void scheduled_actor::assign_slot(stream_slot x, stream_manager_ptr mgr) {
CAF_LOG_TRACE(CAF_ARG(x));
if (stream_managers_.empty())
stream_ticks_.start(clock().now());
CAF_ASSERT(stream_managers_.count(x) == 0);
stream_managers_.emplace(x, std::move(mgr));
}
......@@ -1106,15 +1092,12 @@ scheduled_actor::assign_next_pending_slot_to(stream_manager_ptr mgr) {
bool scheduled_actor::add_stream_manager(stream_slot id,
stream_manager_ptr mgr) {
CAF_LOG_TRACE(CAF_ARG(id));
if (stream_managers_.empty())
stream_ticks_.start(clock().now());
return stream_managers_.emplace(id, std::move(mgr)).second;
}
void scheduled_actor::erase_stream_manager(stream_slot id) {
CAF_LOG_TRACE(CAF_ARG(id));
if (stream_managers_.erase(id) != 0 && stream_managers_.empty())
stream_ticks_.stop();
stream_managers_.erase(id);
CAF_LOG_DEBUG(CAF_ARG2("stream_managers_.size", stream_managers_.size()));
}
......@@ -1133,8 +1116,6 @@ void scheduled_actor::erase_stream_manager(const stream_manager_ptr& mgr) {
i = stream_managers_.erase(i);
else
++i;
if (stream_managers_.empty())
stream_ticks_.stop();
}
{ // Lifetime scope of second iterator pair.
auto i = pending_stream_managers_.begin();
......@@ -1193,16 +1174,9 @@ scheduled_actor::handle_open_stream_msg(mailbox_element& x) {
actor_clock::time_point
scheduled_actor::advance_streams(actor_clock::time_point now) {
CAF_LOG_TRACE("");
if (!stream_ticks_.started()) {
CAF_LOG_DEBUG("tick emitter not started yet");
CAF_LOG_TRACE(CAF_ARG(now));
if (stream_managers_.empty())
return actor_clock::time_point::max();
}
/// Advance time for driving forced batches and credit.
auto bitmask = stream_ticks_.timeouts(now, {max_batch_delay_ticks_,
credit_round_ticks_});
// Force batches on all output paths.
if ((bitmask & 0x01) != 0 && !stream_managers_.empty()) {
std::vector<stream_manager*> managers;
managers.reserve(stream_managers_.size());
for (auto& kvp : stream_managers_)
......@@ -1210,24 +1184,11 @@ scheduled_actor::advance_streams(actor_clock::time_point now) {
std::sort(managers.begin(), managers.end());
auto e = std::unique(managers.begin(), managers.end());
for (auto i = managers.begin(); i != e; ++i)
(*i)->out().force_emit_batches();
}
// Fill up credit on each input path.
if ((bitmask & 0x02) != 0) {
CAF_LOG_DEBUG("new credit round");
auto cycle = stream_ticks_.interval();
cycle *= static_cast<decltype(cycle)::rep>(credit_round_ticks_);
auto& qs = get_downstream_queue().queues();
for (auto& kvp : qs) {
auto inptr = kvp.second.policy().handler.get();
if (inptr != nullptr) {
auto tts = static_cast<int32_t>(kvp.second.total_task_size());
inptr->emit_ack_batch(this, tts, now, cycle);
}
}
}
return stream_ticks_.next_timeout(now, {max_batch_delay_ticks_,
credit_round_ticks_});
(*i)->tick(now);
auto idle = [](const stream_manager* mgr) { return mgr->idle(); };
if (std::all_of(managers.begin(), e, idle))
return actor_clock::time_point::max();
return now + max_batch_delay_;
}
} // namespace caf
......@@ -23,6 +23,7 @@
#include "caf/actor_control_block.hpp"
#include "caf/actor_system.hpp"
#include "caf/actor_system_config.hpp"
#include "caf/defaults.hpp"
#include "caf/error.hpp"
#include "caf/expected.hpp"
#include "caf/inbound_path.hpp"
......@@ -37,7 +38,9 @@ namespace caf {
stream_manager::stream_manager(scheduled_actor* selfptr, stream_priority prio)
: self_(selfptr), pending_handshakes_(0), priority_(prio), flags_(0) {
// nop
auto& cfg = selfptr->config();
max_batch_delay_ = get_or(cfg, "caf.stream.max-batch-delay",
defaults::stream::max_batch_delay);
}
stream_manager::~stream_manager() {
......@@ -147,28 +150,6 @@ void stream_manager::shutdown() {
ipath->emit_regular_shutdown(self_);
}
void stream_manager::advance() {
CAF_LOG_TRACE("");
// Try to emit more credit.
if (!inbound_paths_.empty()) {
auto now = self_->clock().now();
auto& cfg = self_->system().config();
auto interval = cfg.stream_credit_round_interval;
auto& qs = self_->get_downstream_queue().queues();
// Iterate all queues for inbound traffic.
for (auto& kvp : qs) {
auto inptr = kvp.second.policy().handler.get();
// Ignore inbound paths of other managers.
if (inptr->mgr.get() == this) {
auto tts = static_cast<int32_t>(kvp.second.total_task_size());
inptr->emit_ack_batch(self_, tts, now, interval);
}
}
}
// Try to generate more batches.
push();
}
void stream_manager::push() {
CAF_LOG_TRACE("");
do {
......@@ -316,6 +297,12 @@ stream_manager::add_unchecked_inbound_path_impl(type_id_t input_type) {
return slot;
}
void stream_manager::tick(time_point now) {
for (auto path : inbound_paths_)
path->tick(now, max_batch_delay_);
out().tick(now, max_batch_delay_);
}
stream_slot stream_manager::assign_next_slot() {
return self_->assign_next_slot_to(this);
}
......
......@@ -150,7 +150,7 @@ using fixture = test_coordinator_fixture<>;
CAF_TEST_FIXTURE_SCOPE(local_streaming_tests, fixture)
CAF_TEST(depth_3_pipeline_with_fork) {
auto src = sys.spawn(file_reader, 50u);
auto src = sys.spawn(file_reader, 60u);
auto stg = sys.spawn(stream_multiplexer);
auto snk1 = sys.spawn(sum_up);
auto snk2 = sys.spawn(sum_up);
......@@ -168,14 +168,14 @@ CAF_TEST(depth_3_pipeline_with_fork) {
run();
CAF_CHECK_EQUAL(st.stage->out().num_paths(), 2u);
CAF_CHECK_EQUAL(st.stage->inbound_paths().size(), 0u);
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk1).state.x, 1275);
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk2).state.x, 1275);
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk1).state.x, sum(60));
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk2).state.x, sum(60));
self->send_exit(stg, exit_reason::kill);
}
CAF_TEST(depth_3_pipeline_with_join) {
auto src1 = sys.spawn(file_reader, 50u);
auto src2 = sys.spawn(file_reader, 50u);
auto src1 = sys.spawn(file_reader, 60u);
auto src2 = sys.spawn(file_reader, 60u);
auto stg = sys.spawn(stream_multiplexer);
auto snk = sys.spawn(sum_up);
auto& st = deref<stream_multiplexer_actor>(stg).state;
......@@ -192,46 +192,7 @@ CAF_TEST(depth_3_pipeline_with_join) {
run();
CAF_CHECK_EQUAL(st.stage->out().num_paths(), 1u);
CAF_CHECK_EQUAL(st.stage->inbound_paths().size(), 0u);
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.x, 2550);
self->send_exit(stg, exit_reason::kill);
}
CAF_TEST(closing_downstreams_before_end_of_stream) {
auto src = sys.spawn(file_reader, 10000u);
auto stg = sys.spawn(stream_multiplexer);
auto snk1 = sys.spawn(sum_up);
auto snk2 = sys.spawn(sum_up);
auto& st = deref<stream_multiplexer_actor>(stg).state;
CAF_MESSAGE("connect sinks to the stage (fork)");
self->send(snk1, join_atom_v, stg);
self->send(snk2, join_atom_v, stg);
consume_messages();
CAF_CHECK_EQUAL(st.stage->out().num_paths(), 2u);
CAF_MESSAGE("connect source to the stage (fork)");
self->send(stg * src, "numbers.txt");
consume_messages();
CAF_CHECK_EQUAL(st.stage->out().num_paths(), 2u);
CAF_CHECK_EQUAL(st.stage->inbound_paths().size(), 1u);
CAF_MESSAGE("do a single round of credit");
trigger_timeouts();
consume_messages();
CAF_MESSAGE("make sure the stream isn't done yet");
CAF_REQUIRE(!deref<file_reader_actor>(src).state.buf.empty());
CAF_CHECK_EQUAL(st.stage->out().num_paths(), 2u);
CAF_CHECK_EQUAL(st.stage->inbound_paths().size(), 1u);
CAF_MESSAGE("get the next not-yet-buffered integer");
auto next_pending = deref<file_reader_actor>(src).state.buf.front();
CAF_REQUIRE_GREATER(next_pending, 0);
auto sink1_result = sum(next_pending - 1);
CAF_MESSAGE("gracefully close sink 1, next pending: " << next_pending);
self->send(stg, close_atom_v, 0);
expect((close_atom, int32_t), from(self).to(stg));
CAF_MESSAGE("ship remaining elements");
run();
CAF_CHECK_EQUAL(st.stage->out().num_paths(), 1u);
CAF_CHECK_EQUAL(st.stage->inbound_paths().size(), 0u);
CAF_CHECK_LESS(deref<sum_up_actor>(snk1).state.x, sink1_result);
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk2).state.x, sum(10000));
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.x, sum(60) * 2);
self->send_exit(stg, exit_reason::kill);
}
......
......@@ -45,7 +45,6 @@
#include "caf/detail/stream_sink_impl.hpp"
#include "caf/detail/stream_source_impl.hpp"
#include "caf/detail/stream_stage_impl.hpp"
#include "caf/detail/tick_emitter.hpp"
#include "caf/downstream_manager.hpp"
#include "caf/downstream_msg.hpp"
#include "caf/inbound_path.hpp"
......@@ -149,9 +148,6 @@ public:
/// Defines the container for storing message handlers.
using behavior_type = behavior;
/// The type of a single tick.
using clock_type = detail::tick_emitter::clock_type;
/// The type of a single tick.
using time_point = clock_type::time_point;
......@@ -163,21 +159,12 @@ public:
// -- constructors, destructors, and assignment operators --------------------
entity(actor_config& cfg, const char* cstr_name, time_point* global_time,
duration_type credit_interval, duration_type force_batches_interval)
entity(actor_config& cfg, const char* cstr_name, time_point* global_time)
: super(cfg),
mbox(unit, unit, unit, unit, unit),
name_(cstr_name),
global_time_(global_time),
tick_emitter_(global_time == nullptr ? clock_type::now()
: *global_time) {
auto cycle = detail::gcd(credit_interval.count(),
force_batches_interval.count());
ticks_per_force_batches_interval =
static_cast<size_t>(force_batches_interval.count() / cycle);
ticks_per_credit_interval =
static_cast<size_t>(credit_interval.count() / cycle);
tick_emitter_.interval(duration_type{cycle});
global_time_(global_time) {
CAF_ASSERT(global_time_ != nullptr);
}
void enqueue(mailbox_element_ptr what, execution_unit*) override {
......@@ -256,13 +243,13 @@ public:
public:
using super = stream_stage_driver<int32_t, downstream_manager>;
driver(downstream_manager& out, vector<int32_t>* log)
: super(out),
log_(log) {
driver(downstream_manager& out, vector<int32_t>* log, const char* name)
: super(out), log_(log), name(name) {
// nop
}
void process(downstream<int>& out, vector<int>& batch) override {
CAF_MESSAGE(name << " forwards " << batch.size() << " elements");
log_->insert(log_->end(), batch.begin(), batch.end());
out.append(batch.begin(), batch.end());
}
......@@ -273,8 +260,9 @@ public:
private:
vector<int>* log_;
const char* name;
};
forwarder = detail::make_stream_stage<driver>(this, &data);
forwarder = detail::make_stream_stage<driver>(this, &data, name_);
auto res = forwarder->add_outbound_path(ref.ctrl());
CAF_MESSAGE(name_ << " starts forwarding to " << ref.name()
<< " on slot " << res.value());
......@@ -321,25 +309,9 @@ public:
scheduled_actor::handle_upstream_msg(slots, sender, x);
}
void advance_time() {
auto cycle = std::chrono::milliseconds(100);
auto f = [&](tick_type x) {
if (x % ticks_per_force_batches_interval == 0) {
// Force batches on all output paths.
void tick() {
for (auto& kvp : stream_managers())
kvp.second->out().force_emit_batches();
}
if (x % ticks_per_credit_interval == 0) {
// Fill credit on each input path up to 30.
auto& qs = get<dmsg_id::value>(mbox.queues()).queues();
for (auto& kvp : qs) {
auto inptr = kvp.second.policy().handler.get();
auto tts = static_cast<int32_t>(kvp.second.total_task_size());
inptr->emit_ack_batch(this, tts, now(), cycle);
}
}
};
tick_emitter_.update(now(), f);
kvp.second->tick(now());
}
inbound_path* make_inbound_path(stream_manager_ptr mgr, stream_slots slots,
......@@ -375,7 +347,7 @@ public:
}
time_point now() {
return global_time_ == nullptr ? clock_type::now() : *global_time_;
return *global_time_;
}
// -- member variables -------------------------------------------------------
......@@ -388,7 +360,6 @@ public:
tick_type ticks_per_force_batches_interval;
tick_type ticks_per_credit_interval;
time_point* global_time_;
detail::tick_emitter tick_emitter_;
};
struct msg_visitor {
......@@ -447,6 +418,7 @@ struct msg_visitor {
auto& dm = x.content().get_mutable_as<downstream_msg>(0);
auto f = detail::make_overload(
[&](downstream_msg::batch& y) {
TRACE(self->name(), batch, CAF_ARG(dm.slots), CAF_ARG(y.xs_size));
inptr->handle(y);
if (inptr->mgr->done()) {
CAF_MESSAGE(self->name()
......@@ -492,15 +464,7 @@ struct msg_visitor {
struct fixture {
using scheduler_type = scheduler::test_coordinator;
struct timing_config {
timespan credit_interval = std::chrono::milliseconds(100);
timespan force_batches_interval = std::chrono::milliseconds(50);
timespan step = force_batches_interval;
};
timing_config tc;
timespan max_batch_delay = defaults::stream::max_batch_delay;
actor_system_config cfg;
actor_system sys;
......@@ -513,13 +477,11 @@ struct fixture {
entity& bob;
entity& carl;
static actor spawn(actor_system& sys, actor_id id, const char* name,
timing_config& tc) {
static actor spawn(actor_system& sys, actor_id id, const char* name) {
actor_config conf;
auto& clock = dynamic_cast<scheduler_type&>(sys.scheduler()).clock();
auto global_time = &clock.current_time;
return make_actor<entity>(id, node_id{}, &sys, conf, name, global_time,
tc.credit_interval, tc.force_batches_interval);
return make_actor<entity>(id, node_id{}, &sys, conf, name, global_time);
}
static entity& fetch(const actor& hdl) {
......@@ -531,15 +493,18 @@ struct fixture {
caf::test::engine::argv()))
CAF_FAIL("parsing the config failed: " << to_string(err));
cfg.set("caf.scheduler.policy", "testing");
cfg.set("caf.stream.credit-policy", "token-based");
cfg.set("caf.stream.token-based-policy.batch-size", 50);
cfg.set("caf.stream.token-based-policy.buffer-size", 200);
return cfg;
}
fixture()
: sys(init_config(cfg)),
sched(dynamic_cast<scheduler_type&>(sys.scheduler())),
alice_hdl(spawn(sys, 0, "alice", tc)),
bob_hdl(spawn(sys, 1, "bob", tc)),
carl_hdl(spawn(sys, 2, "carl", tc)),
alice_hdl(spawn(sys, 0, "alice")),
bob_hdl(spawn(sys, 1, "bob")),
carl_hdl(spawn(sys, 2, "carl")),
alice(fetch(alice_hdl)),
bob(fetch(bob_hdl)),
carl(fetch(carl_hdl)) {
......@@ -568,10 +533,10 @@ struct fixture {
template <class... Ts>
void next_cycle(Ts&... xs) {
entity* es[] = {&xs...};
CAF_MESSAGE("advance clock by " << tc.credit_interval.count() << "ns");
sched.clock().current_time += tc.credit_interval;
CAF_MESSAGE("advance clock by " << max_batch_delay);
sched.clock().current_time += max_batch_delay;
for (auto e : es)
e->advance_time();
e->tick();
}
template <class F, class... Ts>
......@@ -583,10 +548,10 @@ struct fixture {
while (!std::all_of(std::begin(fs), std::end(fs), mailbox_empty))
for (auto& f : fs)
f.self->mbox.new_round(1, f);
CAF_MESSAGE("advance clock by " << tc.step.count() << "ns");
sched.clock().current_time += tc.step;
CAF_MESSAGE("advance clock by " << max_batch_delay);
sched.clock().current_time += max_batch_delay;
for (auto e : es)
e->advance_time();
e->tick();
}
while (!pred());
}
......@@ -614,16 +579,12 @@ CAF_TEST_FIXTURE_SCOPE(native_streaming_classes_tests, fixture)
CAF_TEST(depth_2_pipeline_30_items) {
alice.start_streaming(bob, 30);
loop(alice, bob);
next_cycle(alice, bob); // emit first ack_batch
loop(alice, bob);
next_cycle(alice, bob); // to emit final ack_batch
loop(alice, bob);
loop_until([&] { return done_streaming(); }, alice, bob);
CAF_CHECK_EQUAL(bob.data, make_iota(0, 30));
}
CAF_TEST(depth_2_pipeline_2000_items) {
constexpr size_t num_messages = 2000;
CAF_TEST(depth_2_pipeline_500_items) {
constexpr size_t num_messages = 500;
alice.start_streaming(bob, num_messages);
loop_until([&] { return done_streaming(); }, alice, bob);
CAF_CHECK_EQUAL(bob.data, make_iota(0, num_messages));
......@@ -632,19 +593,13 @@ CAF_TEST(depth_2_pipeline_2000_items) {
CAF_TEST(depth_3_pipeline_30_items) {
bob.forward_to(carl);
alice.start_streaming(bob, 30);
loop(alice, bob, carl);
next_cycle(alice, bob, carl); // emit first ack_batch
loop(alice, bob, carl);
next_cycle(alice, bob, carl);
loop(alice, bob, carl);
next_cycle(alice, bob, carl); // emit final ack_batch
loop(alice, bob, carl);
loop_until([&] { return done_streaming(); }, alice, bob, carl);
CAF_CHECK_EQUAL(bob.data, make_iota(0, 30));
CAF_CHECK_EQUAL(carl.data, make_iota(0, 30));
}
CAF_TEST(depth_3_pipeline_2000_items) {
constexpr size_t num_messages = 2000;
CAF_TEST(depth_3_pipeline_500_items) {
constexpr size_t num_messages = 500;
bob.forward_to(carl);
alice.start_streaming(bob, num_messages);
CAF_MESSAGE("loop over alice and bob until bob is congested");
......
......@@ -40,33 +40,8 @@ TESTEE_SETUP();
using buf = std::deque<int>;
std::function<void(buf&)> init(size_t buf_size) {
return [=](buf& xs) {
xs.resize(buf_size);
std::iota(xs.begin(), xs.end(), 1);
};
}
void push_from_buf(buf& xs, downstream<int>& out, size_t num) {
CAF_MESSAGE("push " << num << " messages downstream");
auto n = std::min(num, xs.size());
for (size_t i = 0; i < n; ++i)
out.push(xs[i]);
xs.erase(xs.begin(), xs.begin() + static_cast<ptrdiff_t>(n));
}
std::function<bool(const buf&)> is_done(scheduled_actor* self) {
return [=](const buf& xs) {
if (xs.empty()) {
CAF_MESSAGE(self->name() << " exhausted its buffer");
return true;
}
return false;
};
}
template <class T, class Self>
std::function<void(T&, const error&)> fin(Self* self) {
auto fin(Self* self) {
return [=](T&, const error& err) {
self->state.fin_called += 1;
if (err == none) {
......@@ -102,18 +77,38 @@ TESTEE_STATE(file_reader) {
};
VARARGS_TESTEE(file_reader, size_t buf_size) {
auto init = [](size_t buf_size) {
return [=](buf& xs) {
xs.resize(buf_size);
std::iota(xs.begin(), xs.end(), 1);
};
};
auto push_from_buf = [](buf& xs, downstream<int>& out, size_t num) {
CAF_MESSAGE("push " << num << " messages downstream");
auto n = std::min(num, xs.size());
for (size_t i = 0; i < n; ++i)
out.push(xs[i]);
xs.erase(xs.begin(), xs.begin() + static_cast<ptrdiff_t>(n));
};
auto is_done = [self](const buf& xs) {
if (xs.empty()) {
CAF_MESSAGE(self->name() << " exhausted its buffer");
return true;
}
return false;
};
return {
[=](string& fname) -> result<stream<int>> {
CAF_CHECK_EQUAL(fname, "numbers.txt");
CAF_CHECK_EQUAL(self->mailbox().empty(), true);
return attach_stream_source(self, init(buf_size), push_from_buf,
is_done(self), fin<buf>(self));
return attach_stream_source(self, init(buf_size), push_from_buf, is_done,
fin<buf>(self));
},
[=](string& fname, actor next) {
CAF_CHECK_EQUAL(fname, "numbers.txt");
CAF_CHECK_EQUAL(self->mailbox().empty(), true);
attach_stream_source(self, next, init(buf_size), push_from_buf,
is_done(self), fin<buf>(self));
attach_stream_source(self, next, init(buf_size), push_from_buf, is_done,
fin<buf>(self));
},
};
}
......@@ -259,11 +254,7 @@ CAF_TEST(depth_2_pipeline_50_items) {
expect((upstream_msg::ack_open), from(snk).to(src));
CAF_MESSAGE("start data transmission (a single batch)");
expect((downstream_msg::batch), from(src).to(snk));
tick();
expect((timeout_msg), from(snk).to(snk));
expect((timeout_msg), from(src).to(src));
expect((upstream_msg::ack_batch), from(snk).to(src));
CAF_MESSAGE("expect close message from src and then result from snk");
expect((downstream_msg::close), from(src).to(snk));
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.x, 1275);
CAF_MESSAGE("verify that each actor called its finalizer once");
......@@ -278,15 +269,11 @@ CAF_TEST(depth_2_pipeline_setup2_50_items) {
CAF_MESSAGE("initiate stream handshake");
self->send(src, "numbers.txt", snk);
expect((string, actor), from(self).to(src).with("numbers.txt", snk));
expect((open_stream_msg), from(strong_actor_ptr{nullptr}).to(snk));
expect((open_stream_msg), to(snk));
expect((upstream_msg::ack_open), from(snk).to(src));
CAF_MESSAGE("start data transmission (a single batch)");
expect((downstream_msg::batch), from(src).to(snk));
tick();
expect((timeout_msg), from(snk).to(snk));
expect((timeout_msg), from(src).to(src));
expect((upstream_msg::ack_batch), from(snk).to(src));
CAF_MESSAGE("expect close message from src and then result from snk");
expect((downstream_msg::close), from(src).to(snk));
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.x, 1275);
CAF_MESSAGE("verify that each actor called its finalizer once");
......@@ -311,9 +298,6 @@ CAF_TEST(delayed_depth_2_pipeline_50_items) {
expect((upstream_msg::ack_open), from(snk).to(src));
CAF_MESSAGE("start data transmission (a single batch)");
expect((downstream_msg::batch), from(src).to(snk));
tick();
expect((timeout_msg), from(snk).to(snk));
expect((timeout_msg), from(src).to(src));
expect((upstream_msg::ack_batch), from(snk).to(src));
CAF_MESSAGE("expect close message from src and then result from snk");
expect((downstream_msg::close), from(src).to(snk));
......@@ -381,6 +365,9 @@ CAF_TEST(depth_2_pipeline_error_at_source) {
CAF_MESSAGE("start data transmission (and abort source)");
hard_kill(src);
expect((downstream_msg::batch), from(src).to(snk));
expect((downstream_msg::batch), from(src).to(snk));
expect((downstream_msg::batch), from(src).to(snk));
expect((downstream_msg::batch), from(src).to(snk));
expect((downstream_msg::forced_close), from(_).to(snk));
CAF_MESSAGE("verify that the sink called its finalizer once");
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.fin_called, 1);
......
......@@ -684,7 +684,9 @@ public:
cfg.set("caf.middleman.manual-multiplexing", true);
cfg.set("caf.middleman.workers", size_t{0});
}
cfg.set("caf.stream.credit-policy", "testing");
cfg.set("caf.stream.credit-policy", "token-based");
cfg.set("caf.stream.token-based-policy.batch-size", 50);
cfg.set("caf.stream.token-based-policy.buffer-size", 200);
return cfg;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment