Commit b23bf288 authored by Dominik Charousset's avatar Dominik Charousset

Allow actors to consume all upstream msgs at once

parent 8ec58ee2
...@@ -67,6 +67,14 @@ public: ...@@ -67,6 +67,14 @@ public:
return x + x; return x + x;
} }
template <template <class> class Queue>
static deficit_type
quantum(const Queue<upstream_messages>& q, deficit_type) noexcept {
// Allow actors to consume *all* upstream messages. They are lightweight by
// design and require little processing.
return q.total_task_size();
}
template <class Queue> template <class Queue>
static deficit_type quantum(const Queue&, deficit_type x) noexcept { static deficit_type quantum(const Queue&, deficit_type x) noexcept {
return x; return x;
......
...@@ -200,52 +200,6 @@ public: ...@@ -200,52 +200,6 @@ public:
using exception_handler = std::function<error(pointer, std::exception_ptr&)>; using exception_handler = std::function<error(pointer, std::exception_ptr&)>;
#endif // CAF_ENABLE_EXCEPTIONS #endif // CAF_ENABLE_EXCEPTIONS
/// Consumes messages from the mailbox.
struct mailbox_visitor {
scheduled_actor* self;
size_t& handled_msgs;
size_t max_throughput;
bool collect_metrics;
/// Consumes upstream messages.
intrusive::task_result operator()(size_t, upstream_queue&,
mailbox_element&);
/// Consumes downstream messages.
intrusive::task_result
operator()(size_t, downstream_queue&, stream_slot slot,
policy::downstream_messages::nested_queue_type&,
mailbox_element&);
// Dispatches asynchronous messages with high and normal priority to the
// same handler.
template <class Queue>
intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) {
return (*this)(x);
}
// Consumes asynchronous messages.
intrusive::task_result operator()(mailbox_element& x);
template <class F>
intrusive::task_result run(mailbox_element& x, F body) {
if (collect_metrics) {
auto t0 = std::chrono::steady_clock::now();
auto mbox_time = x.seconds_until(t0);
auto res = body();
if (res != intrusive::task_result::skip) {
auto& builtins = self->builtin_metrics();
telemetry::timer::observe(builtins.processing_time, t0);
builtins.mailbox_time->observe(mbox_time);
builtins.mailbox_size->dec();
}
return res;
} else {
return body();
}
}
};
// -- static helper functions ------------------------------------------------ // -- static helper functions ------------------------------------------------
static void default_error_handler(pointer ptr, error& x); static void default_error_handler(pointer ptr, error& x);
...@@ -517,6 +471,9 @@ public: ...@@ -517,6 +471,9 @@ public:
/// Pushes `ptr` to the cache of the default queue. /// Pushes `ptr` to the cache of the default queue.
void push_to_cache(mailbox_element_ptr ptr); void push_to_cache(mailbox_element_ptr ptr);
/// Returns the queue of the mailbox that stores high priority messages.
urgent_queue& get_urgent_queue();
/// Returns the default queue of the mailbox that stores ordinary messages. /// Returns the default queue of the mailbox that stores ordinary messages.
normal_queue& get_normal_queue(); normal_queue& get_normal_queue();
...@@ -526,9 +483,6 @@ public: ...@@ -526,9 +483,6 @@ public:
/// Returns the queue of the mailbox that stores `downstream_msg` messages. /// Returns the queue of the mailbox that stores `downstream_msg` messages.
downstream_queue& get_downstream_queue(); downstream_queue& get_downstream_queue();
/// Returns the queue of the mailbox that stores high priority messages.
urgent_queue& get_urgent_queue();
// -- inbound_path management ------------------------------------------------ // -- inbound_path management ------------------------------------------------
/// Creates a new path for incoming stream traffic from `sender`. /// Creates a new path for incoming stream traffic from `sender`.
...@@ -569,8 +523,7 @@ public: ...@@ -569,8 +523,7 @@ public:
return; return;
} }
CAF_LOG_INFO("no manager found:" << CAF_ARG(slots)); CAF_LOG_INFO("no manager found:" << CAF_ARG(slots));
// TODO: replace with `if constexpr` when switching to C++17 if constexpr (std::is_same<T, upstream_msg::ack_batch>::value) {
if (std::is_same<T, upstream_msg::ack_batch>::value) {
// Make sure the other actor does not falsely believe us a source. // Make sure the other actor does not falsely believe us a source.
inbound_path::emit_irregular_shutdown(this, slots, current_sender(), inbound_path::emit_irregular_shutdown(this, slots, current_sender(),
sec::invalid_upstream); sec::invalid_upstream);
...@@ -657,6 +610,12 @@ public: ...@@ -657,6 +610,12 @@ public:
/// Removes the stream manager mapped to `id` in `O(log n)`. /// Removes the stream manager mapped to `id` in `O(log n)`.
void erase_pending_stream_manager(stream_slot id); void erase_pending_stream_manager(stream_slot id);
/// Moves a pending stream manager to the list of active stream managers.
/// @returns `true` and a pointer to the moved stream manager on success,
/// `false` and `nullptr` otherwise.
[[nodiscard]] std::pair<bool, stream_manager*>
ack_pending_stream_manager(stream_slot id);
/// Removes all entries for `mgr` in `O(n)`. /// Removes all entries for `mgr` in `O(n)`.
void erase_stream_manager(const stream_manager_ptr& mgr); void erase_stream_manager(const stream_manager_ptr& mgr);
...@@ -683,6 +642,10 @@ public: ...@@ -683,6 +642,10 @@ public:
return max_batch_delay_; return max_batch_delay_;
} }
void active_stream_managers(std::vector<stream_manager*>& result);
std::vector<stream_manager*> active_stream_managers();
/// @endcond /// @endcond
protected: protected:
...@@ -746,7 +709,23 @@ protected: ...@@ -746,7 +709,23 @@ protected:
exception_handler exception_handler_; exception_handler exception_handler_;
#endif // CAF_ENABLE_EXCEPTIONS #endif // CAF_ENABLE_EXCEPTIONS
/// @endcond private:
template <class F>
intrusive::task_result run_with_metrics(mailbox_element& x, F body) {
if (metrics_.mailbox_time) {
auto t0 = std::chrono::steady_clock::now();
auto mbox_time = x.seconds_until(t0);
auto res = body();
if (res != intrusive::task_result::skip) {
telemetry::timer::observe(metrics_.processing_time, t0);
metrics_.mailbox_time->observe(mbox_time);
metrics_.mailbox_size->dec();
}
return res;
} else {
return body();
}
}
}; };
} // namespace caf } // namespace caf
...@@ -113,8 +113,6 @@ void inbound_path::handle(downstream_msg::batch& batch) { ...@@ -113,8 +113,6 @@ void inbound_path::handle(downstream_msg::batch& batch) {
if (auto available = available_credit(); available >= desired_batch_size) if (auto available = available_credit(); available >= desired_batch_size)
if (auto acquired = mgr->acquire_credit(this, available); acquired > 0) if (auto acquired = mgr->acquire_credit(this, available); acquired > 0)
emit_ack_batch(self(), acquired); emit_ack_batch(self(), acquired);
// FIXME: move this up to the actor
mgr->push();
} }
void inbound_path::tick(time_point now, duration_type max_batch_delay) { void inbound_path::tick(time_point now, duration_type max_batch_delay) {
......
...@@ -278,129 +278,17 @@ void scheduled_actor::intrusive_ptr_release_impl() { ...@@ -278,129 +278,17 @@ void scheduled_actor::intrusive_ptr_release_impl() {
intrusive_ptr_release(ctrl()); intrusive_ptr_release(ctrl());
} }
namespace {
// TODO: replace with generic lambda when switching to C++14
struct upstream_msg_visitor {
scheduled_actor* selfptr;
upstream_msg& um;
template <class T>
void operator()(T& x) {
selfptr->handle_upstream_msg(um.slots, um.sender, x);
}
};
} // namespace
intrusive::task_result
scheduled_actor::mailbox_visitor::operator()(size_t, upstream_queue&,
mailbox_element& x) {
CAF_ASSERT(x.content().match_elements<upstream_msg>());
return run(x, [&] {
self->current_mailbox_element(&x);
CAF_LOG_RECEIVE_EVENT((&x));
CAF_BEFORE_PROCESSING(self, x);
auto& um = x.content().get_mutable_as<upstream_msg>(0);
auto f = [&](auto& content) {
self->handle_upstream_msg(um.slots, um.sender, content);
};
visit(f, um.content);
CAF_AFTER_PROCESSING(self, invoke_message_result::consumed);
return ++handled_msgs < max_throughput ? intrusive::task_result::resume
: intrusive::task_result::stop_all;
});
}
intrusive::task_result scheduled_actor::mailbox_visitor::operator()(
size_t, downstream_queue& qs, stream_slot,
policy::downstream_messages::nested_queue_type& q, mailbox_element& x) {
CAF_LOG_TRACE(CAF_ARG(x) << CAF_ARG(handled_msgs));
return run(x, [&, this] {
self->current_mailbox_element(&x);
CAF_LOG_RECEIVE_EVENT((&x));
CAF_BEFORE_PROCESSING(self, x);
CAF_ASSERT(x.content().match_elements<downstream_msg>());
auto& dm = x.content().get_mutable_as<downstream_msg>(0);
auto f = [&, this](auto& content) {
using content_type = std::decay_t<decltype(content)>;
auto& inptr = q.policy().handler;
if (inptr == nullptr)
return intrusive::task_result::stop;
if (auto processed_elements = inptr->metrics.processed_elements) {
auto num_elements = q.policy().task_size(content);
auto input_buffer_size = inptr->metrics.input_buffer_size;
CAF_ASSERT(input_buffer_size != nullptr);
processed_elements->inc(num_elements);
input_buffer_size->dec(num_elements);
}
// Hold onto a strong reference since we might reset `inptr`.
auto mgr = stream_manager_ptr{inptr->mgr};
inptr->handle(content);
// The sender slot can be 0. This is the case for forced_close or
// forced_drop messages from stream aborters.
CAF_ASSERT(inptr->slots == dm.slots
|| (dm.slots.sender == 0
&& dm.slots.receiver == inptr->slots.receiver));
if constexpr (std::is_same<content_type, downstream_msg::close>::value
|| std::is_same<content_type,
downstream_msg::forced_close>::value) {
if (auto input_buffer_size = inptr->metrics.input_buffer_size)
input_buffer_size->dec(q.total_task_size());
inptr.reset();
qs.erase_later(dm.slots.receiver);
self->erase_stream_manager(dm.slots.receiver);
if (mgr->done()) {
CAF_LOG_DEBUG("path is done receiving and closes its manager");
self->erase_stream_manager(mgr);
mgr->stop();
}
return intrusive::task_result::stop;
} else if (mgr->done()) {
CAF_LOG_DEBUG("path is done receiving and closes its manager");
self->erase_stream_manager(mgr);
mgr->stop();
return intrusive::task_result::stop;
}
return intrusive::task_result::resume;
};
auto res = visit(f, dm.content);
CAF_AFTER_PROCESSING(self, invoke_message_result::consumed);
return ++handled_msgs < max_throughput ? res
: intrusive::task_result::stop_all;
});
}
intrusive::task_result
scheduled_actor::mailbox_visitor::operator()(mailbox_element& x) {
CAF_LOG_TRACE(CAF_ARG(x) << CAF_ARG(handled_msgs));
return run(x, [&, this] {
switch (self->reactivate(x)) {
case activation_result::terminated:
return intrusive::task_result::stop;
case activation_result::success:
return ++handled_msgs < max_throughput
? intrusive::task_result::resume
: intrusive::task_result::stop_all;
case activation_result::skipped:
return intrusive::task_result::skip;
default:
return intrusive::task_result::resume;
}
});
}
resumable::resume_result scheduled_actor::resume(execution_unit* ctx, resumable::resume_result scheduled_actor::resume(execution_unit* ctx,
size_t max_throughput) { size_t max_throughput) {
CAF_PUSH_AID(id()); CAF_PUSH_AID(id());
CAF_LOG_TRACE(CAF_ARG(max_throughput)); CAF_LOG_TRACE(CAF_ARG(max_throughput));
if (!activate(ctx)) if (!activate(ctx))
return resumable::done; return resumable::done;
size_t handled_msgs = 0; size_t consumed = 0;
actor_clock::time_point tout{actor_clock::duration_type{0}}; actor_clock::time_point tout{actor_clock::duration_type{0}};
auto reset_timeouts_if_needed = [&] { auto reset_timeouts_if_needed = [&] {
// Set a new receive timeout if we called our behavior at least once. // Set a new receive timeout if we called our behavior at least once.
if (handled_msgs > 0) if (consumed > 0)
set_receive_timeout(); set_receive_timeout();
// Set a new stream timeout. // Set a new stream timeout.
if (!stream_managers_.empty()) { if (!stream_managers_.empty()) {
...@@ -410,30 +298,153 @@ resumable::resume_result scheduled_actor::resume(execution_unit* ctx, ...@@ -410,30 +298,153 @@ resumable::resume_result scheduled_actor::resume(execution_unit* ctx,
set_stream_timeout(tout); set_stream_timeout(tout);
} }
}; };
mailbox_visitor f{this, handled_msgs, max_throughput, // Callback for handling urgent and normal messages.
getf(abstract_actor::collects_metrics_flag)}; auto handle_async = [this, max_throughput, &consumed](mailbox_element& x) {
return run_with_metrics(x, [this, max_throughput, &consumed, &x] {
switch (reactivate(x)) {
case activation_result::terminated:
return intrusive::task_result::stop;
case activation_result::success:
return ++consumed < max_throughput ? intrusive::task_result::resume
: intrusive::task_result::stop_all;
case activation_result::skipped:
return intrusive::task_result::skip;
default:
return intrusive::task_result::resume;
}
});
};
// Callback for handling upstream messages (e.g., ACKs).
auto handle_umsg = [this, max_throughput, &consumed](mailbox_element& x) {
return run_with_metrics(x, [this, max_throughput, &consumed, &x] {
current_mailbox_element(&x);
CAF_LOG_RECEIVE_EVENT((&x));
CAF_BEFORE_PROCESSING(this, x);
CAF_ASSERT(x.content().match_elements<upstream_msg>());
auto& um = x.content().get_mutable_as<upstream_msg>(0);
auto f = [&](auto& content) {
handle_upstream_msg(um.slots, um.sender, content);
};
visit(f, um.content);
CAF_AFTER_PROCESSING(this, invoke_message_result::consumed);
return ++consumed < max_throughput ? intrusive::task_result::resume
: intrusive::task_result::stop_all;
});
};
// Callback for handling downstream messages (e.g., batches).
auto handle_dmsg = [this, &consumed, max_throughput](stream_slot, auto& q,
mailbox_element& x) {
return run_with_metrics(x, [this, max_throughput, &consumed, &q, &x] {
current_mailbox_element(&x);
CAF_LOG_RECEIVE_EVENT((&x));
CAF_BEFORE_PROCESSING(self, x);
CAF_ASSERT(x.content().match_elements<downstream_msg>());
auto& dm = x.content().get_mutable_as<downstream_msg>(0);
auto f = [&, this](auto& content) {
using content_type = std::decay_t<decltype(content)>;
auto& inptr = q.policy().handler;
if (inptr == nullptr)
return intrusive::task_result::stop;
if (auto processed_elements = inptr->metrics.processed_elements) {
auto num_elements = q.policy().task_size(content);
auto input_buffer_size = inptr->metrics.input_buffer_size;
CAF_ASSERT(input_buffer_size != nullptr);
processed_elements->inc(num_elements);
input_buffer_size->dec(num_elements);
}
// Hold onto a strong reference since we might reset `inptr`.
auto mgr = stream_manager_ptr{inptr->mgr};
inptr->handle(content);
// The sender slot can be 0. This is the case for forced_close or
// forced_drop messages from stream aborters.
CAF_ASSERT(inptr->slots == dm.slots
|| (dm.slots.sender == 0
&& dm.slots.receiver == inptr->slots.receiver));
if constexpr (std::is_same<content_type, downstream_msg::close>::value
|| std::is_same<content_type,
downstream_msg::forced_close>::value) {
if (auto input_buffer_size = inptr->metrics.input_buffer_size)
input_buffer_size->dec(q.total_task_size());
inptr.reset();
get_downstream_queue().erase_later(dm.slots.receiver);
erase_stream_manager(dm.slots.receiver);
if (mgr->done()) {
CAF_LOG_DEBUG("path is done receiving and closes its manager");
erase_stream_manager(mgr);
mgr->stop();
}
return intrusive::task_result::stop;
} else if (mgr->done()) {
CAF_LOG_DEBUG("path is done receiving and closes its manager");
erase_stream_manager(mgr);
mgr->stop();
return intrusive::task_result::stop;
}
return intrusive::task_result::resume;
};
auto res = visit(f, dm.content);
CAF_AFTER_PROCESSING(self, invoke_message_result::consumed);
return ++consumed < max_throughput ? res
: intrusive::task_result::stop_all;
});
};
std::vector<stream_manager*> managers;
mailbox_element_ptr ptr; mailbox_element_ptr ptr;
// Timeout for calling `advance_streams`. while (consumed < max_throughput) {
while (handled_msgs < max_throughput) {
CAF_LOG_DEBUG("start new DRR round"); CAF_LOG_DEBUG("start new DRR round");
mailbox_.fetch_more();
auto prev = consumed; // Caches the value before processing more.
// TODO: maybe replace '3' with configurable / adaptive value? // TODO: maybe replace '3' with configurable / adaptive value?
// Dispatch on the different message categories in our mailbox. static constexpr size_t quantum = 3;
auto consumed = mailbox_.new_round(3, f).consumed_items; // Dispatch urgent and normal (asynchronous) messages.
if (consumed == 0) { get_urgent_queue().new_round(quantum * 3, handle_async);
get_normal_queue().new_round(quantum, handle_async);
// Consume all upstream messages. They are lightweight by design and ACKs
// come with new credit, allowing us to advance stream traffic.
if (auto tts = get_upstream_queue().total_task_size(); tts > 0) {
get_upstream_queue().new_round(tts, handle_umsg);
// After processing ACKs, we may have new credit that enables us to ship
// some batches from our output buffers. This step also may re-enable
// inbound paths by draining output buffers here.
active_stream_managers(managers);
for (auto mgr : managers)
mgr->push();
}
// Note: a quantum of 1 means "1 batch" for this queue.
if (get_downstream_queue().new_round(quantum, handle_dmsg).consumed_items
> 0) {
do {
// Processing batches, enables stages to push more data downstream. This
// in turn may allow the stage to process more batches again. Hence the
// loop. By not giving additional quanta, we simply allow the stage to
// consume what it was allowed to in the first place.
active_stream_managers(managers);
for (auto mgr : managers)
mgr->push();
} while (
consumed < max_throughput
&& get_downstream_queue().new_round(0, handle_dmsg).consumed_items > 0);
}
// Update metrics or try returning if the actor consumed nothing.
auto delta = consumed - prev;
CAF_LOG_DEBUG("consumed" << delta << "messages this round");
if (delta > 0) {
auto signed_val = static_cast<int64_t>(delta);
home_system().base_metrics().processed_messages->inc(signed_val);
} else {
reset_timeouts_if_needed(); reset_timeouts_if_needed();
if (mailbox().try_block()) if (mailbox().try_block())
return resumable::awaiting_message; return resumable::awaiting_message;
} else { CAF_LOG_DEBUG("mailbox().try_block() returned false");
auto signed_val = static_cast<int64_t>(consumed);
home_system().base_metrics().processed_messages->inc(signed_val);
} }
// Check whether the visitor left the actor without behavior. CAF_LOG_DEBUG("allow stream managers to send batches");
if (finalize()) { active_stream_managers(managers);
for (auto mgr : managers)
mgr->push();
CAF_LOG_DEBUG("check for shutdown or advance streams");
if (finalize())
return resumable::done; return resumable::done;
} if (auto now = clock().now(); now >= tout)
// Advance streams, i.e., try to generating credit or to emit batches.
auto now = clock().now();
if (now >= tout)
tout = advance_streams(now); tout = advance_streams(now);
} }
CAF_LOG_DEBUG("max throughput reached"); CAF_LOG_DEBUG("max throughput reached");
...@@ -934,6 +945,10 @@ void scheduled_actor::push_to_cache(mailbox_element_ptr ptr) { ...@@ -934,6 +945,10 @@ void scheduled_actor::push_to_cache(mailbox_element_ptr ptr) {
} }
} }
scheduled_actor::urgent_queue& scheduled_actor::get_urgent_queue() {
return get<urgent_queue_index>(mailbox_.queue().queues());
}
scheduled_actor::normal_queue& scheduled_actor::get_normal_queue() { scheduled_actor::normal_queue& scheduled_actor::get_normal_queue() {
return get<normal_queue_index>(mailbox_.queue().queues()); return get<normal_queue_index>(mailbox_.queue().queues());
} }
...@@ -946,10 +961,6 @@ scheduled_actor::downstream_queue& scheduled_actor::get_downstream_queue() { ...@@ -946,10 +961,6 @@ scheduled_actor::downstream_queue& scheduled_actor::get_downstream_queue() {
return get<downstream_queue_index>(mailbox_.queue().queues()); return get<downstream_queue_index>(mailbox_.queue().queues());
} }
scheduled_actor::urgent_queue& scheduled_actor::get_urgent_queue() {
return get<urgent_queue_index>(mailbox_.queue().queues());
}
bool scheduled_actor::add_inbound_path(type_id_t, bool scheduled_actor::add_inbound_path(type_id_t,
std::unique_ptr<inbound_path> path) { std::unique_ptr<inbound_path> path) {
static constexpr size_t queue_index = downstream_queue_index; static constexpr size_t queue_index = downstream_queue_index;
...@@ -1020,20 +1031,12 @@ void scheduled_actor::handle_upstream_msg(stream_slots slots, ...@@ -1020,20 +1031,12 @@ void scheduled_actor::handle_upstream_msg(stream_slots slots,
CAF_IGNORE_UNUSED(sender); CAF_IGNORE_UNUSED(sender);
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(sender) << CAF_ARG(x)); CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(sender) << CAF_ARG(x));
CAF_ASSERT(sender == x.rebind_to); CAF_ASSERT(sender == x.rebind_to);
// Get the manager for that stream, move it from `pending_managers_` to if (auto [moved, ptr] = ack_pending_stream_manager(slots.receiver); moved) {
// `managers_`, and handle `x`. CAF_ASSERT(ptr != nullptr);
auto i = pending_stream_managers_.find(slots.receiver); ptr->handle(slots, x);
if (i == pending_stream_managers_.end()) { } else {
CAF_LOG_WARNING("found no corresponding manager for received ack_open"); CAF_LOG_WARNING("found no corresponding manager for received ack_open");
return;
}
auto ptr = std::move(i->second);
pending_stream_managers_.erase(i);
if (!add_stream_manager(slots.receiver, ptr)) {
CAF_LOG_WARNING("unable to add stream manager after receiving ack_open");
return;
} }
ptr->handle(slots, x);
} }
uint64_t scheduled_actor::set_timeout(std::string type, uint64_t scheduled_actor::set_timeout(std::string type,
...@@ -1102,6 +1105,19 @@ void scheduled_actor::erase_pending_stream_manager(stream_slot id) { ...@@ -1102,6 +1105,19 @@ void scheduled_actor::erase_pending_stream_manager(stream_slot id) {
pending_stream_managers_.erase(id); pending_stream_managers_.erase(id);
} }
std::pair<bool, stream_manager*>
scheduled_actor::ack_pending_stream_manager(stream_slot id) {
CAF_LOG_TRACE(CAF_ARG(id));
if (auto i = pending_stream_managers_.find(id);
i != pending_stream_managers_.end()) {
auto ptr = std::move(i->second);
auto raw_ptr = ptr.get();
pending_stream_managers_.erase(i);
return {add_stream_manager(id, std::move(ptr)), raw_ptr};
}
return {false, nullptr};
}
void scheduled_actor::erase_stream_manager(const stream_manager_ptr& mgr) { void scheduled_actor::erase_stream_manager(const stream_manager_ptr& mgr) {
CAF_LOG_TRACE(""); CAF_LOG_TRACE("");
if (!stream_managers_.empty()) { if (!stream_managers_.empty()) {
...@@ -1173,18 +1189,31 @@ scheduled_actor::advance_streams(actor_clock::time_point now) { ...@@ -1173,18 +1189,31 @@ scheduled_actor::advance_streams(actor_clock::time_point now) {
CAF_LOG_TRACE(CAF_ARG(now)); CAF_LOG_TRACE(CAF_ARG(now));
if (stream_managers_.empty()) if (stream_managers_.empty())
return actor_clock::time_point::max(); return actor_clock::time_point::max();
std::vector<stream_manager*> managers; auto managers = active_stream_managers();
managers.reserve(stream_managers_.size()); for (auto ptr : managers)
for (auto& kvp : stream_managers_) ptr->tick(now);
managers.emplace_back(kvp.second.get());
std::sort(managers.begin(), managers.end());
auto e = std::unique(managers.begin(), managers.end());
for (auto i = managers.begin(); i != e; ++i)
(*i)->tick(now);
auto idle = [](const stream_manager* mgr) { return mgr->idle(); }; auto idle = [](const stream_manager* mgr) { return mgr->idle(); };
if (std::all_of(managers.begin(), e, idle)) if (std::all_of(managers.begin(), managers.end(), idle))
return actor_clock::time_point::max(); return actor_clock::time_point::max();
return now + max_batch_delay_; return now + max_batch_delay_;
} }
void scheduled_actor::active_stream_managers(std::vector<stream_manager*>& xs) {
xs.clear();
if (stream_managers_.empty())
return;
xs.reserve(stream_managers_.size());
for (auto& kvp : stream_managers_)
xs.emplace_back(kvp.second.get());
std::sort(xs.begin(), xs.end());
auto e = std::unique(xs.begin(), xs.end());
xs.erase(e, xs.end());
}
std::vector<stream_manager*> scheduled_actor::active_stream_managers() {
std::vector<stream_manager*> result;
active_stream_managers(result);
return result;
}
} // namespace caf } // namespace caf
...@@ -92,15 +92,13 @@ bool stream_manager::handle(stream_slots slots, upstream_msg::ack_open& x) { ...@@ -92,15 +92,13 @@ bool stream_manager::handle(stream_slots slots, upstream_msg::ack_open& x) {
CAF_ASSERT(ptr->open_credit >= 0); CAF_ASSERT(ptr->open_credit >= 0);
ptr->set_desired_batch_size(x.desired_batch_size); ptr->set_desired_batch_size(x.desired_batch_size);
--pending_handshakes_; --pending_handshakes_;
push();
return true; return true;
} }
void stream_manager::handle(stream_slots slots, upstream_msg::ack_batch& x) { void stream_manager::handle(stream_slots slots, upstream_msg::ack_batch& x) {
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(x)); CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(x));
CAF_ASSERT(x.desired_batch_size > 0); CAF_ASSERT(x.desired_batch_size > 0);
auto path = out().path(slots.receiver); if (auto path = out().path(slots.receiver); path != nullptr) {
if (path != nullptr) {
path->open_credit += x.new_capacity; path->open_credit += x.new_capacity;
CAF_ASSERT(path->open_credit >= 0); CAF_ASSERT(path->open_credit >= 0);
path->set_desired_batch_size(x.desired_batch_size); path->set_desired_batch_size(x.desired_batch_size);
...@@ -108,7 +106,6 @@ void stream_manager::handle(stream_slots slots, upstream_msg::ack_batch& x) { ...@@ -108,7 +106,6 @@ void stream_manager::handle(stream_slots slots, upstream_msg::ack_batch& x) {
// Gravefully remove path after receiving its final ACK. // Gravefully remove path after receiving its final ACK.
if (path->closing && out().clean(slots.receiver)) if (path->closing && out().clean(slots.receiver))
out().remove_path(slots.receiver, none, false); out().remove_path(slots.receiver, none, false);
push();
} }
} }
...@@ -300,9 +297,11 @@ stream_manager::add_unchecked_inbound_path_impl(type_id_t input_type, ...@@ -300,9 +297,11 @@ stream_manager::add_unchecked_inbound_path_impl(type_id_t input_type,
} }
void stream_manager::tick(time_point now) { void stream_manager::tick(time_point now) {
for (auto path : inbound_paths_) do {
path->tick(now, max_batch_delay_); out().tick(now, max_batch_delay_);
out().tick(now, max_batch_delay_); for (auto path : inbound_paths_)
path->tick(now, max_batch_delay_);
} while (generate_messages());
} }
stream_slot stream_manager::assign_next_slot() { stream_slot stream_manager::assign_next_slot() {
......
...@@ -49,37 +49,39 @@ TESTEE_STATE(file_reader) { ...@@ -49,37 +49,39 @@ TESTEE_STATE(file_reader) {
}; };
VARARGS_TESTEE(file_reader, size_t buf_size) { VARARGS_TESTEE(file_reader, size_t buf_size) {
return {[=](string& fname) -> result<stream<int32_t>, string> { return {
CAF_CHECK_EQUAL(fname, "numbers.txt"); [=](string& fname) -> result<stream<int32_t>, string> {
CAF_CHECK_EQUAL(self->mailbox().empty(), true); CAF_CHECK_EQUAL(fname, "numbers.txt");
return attach_stream_source( CAF_CHECK_EQUAL(self->mailbox().empty(), true);
self, return attach_stream_source(
// forward file name in handshake to next stage self,
std::forward_as_tuple(std::move(fname)), // forward file name in handshake to next stage
// initialize state std::forward_as_tuple(std::move(fname)),
[=](unit_t&) { // initialize state
auto& xs = self->state.buf; [=](unit_t&) {
xs.resize(buf_size); auto& xs = self->state.buf;
std::iota(xs.begin(), xs.end(), 1); xs.resize(buf_size);
}, std::iota(xs.begin(), xs.end(), 1);
// get next element },
[=](unit_t&, downstream<int32_t>& out, size_t num) { // get next element
auto& xs = self->state.buf; [=](unit_t&, downstream<int32_t>& out, size_t num) {
CAF_MESSAGE("push " << num << " messages downstream"); auto& xs = self->state.buf;
auto n = std::min(num, xs.size()); CAF_MESSAGE("push " << num << " messages downstream");
for (size_t i = 0; i < n; ++i) auto n = std::min(num, xs.size());
out.push(xs[i]); for (size_t i = 0; i < n; ++i)
xs.erase(xs.begin(), xs.begin() + static_cast<ptrdiff_t>(n)); out.push(xs[i]);
}, xs.erase(xs.begin(), xs.begin() + static_cast<ptrdiff_t>(n));
// check whether we reached the end },
[=](const unit_t&) { // check whether we reached the end
if (self->state.buf.empty()) { [=](const unit_t&) {
CAF_MESSAGE(self->name() << " is done"); if (self->state.buf.empty()) {
return true; CAF_MESSAGE(self->name() << " is done");
} return true;
return false; }
}); return false;
}}; });
},
};
} }
TESTEE_STATE(sum_up) { TESTEE_STATE(sum_up) {
...@@ -87,26 +89,28 @@ TESTEE_STATE(sum_up) { ...@@ -87,26 +89,28 @@ TESTEE_STATE(sum_up) {
}; };
TESTEE(sum_up) { TESTEE(sum_up) {
return {[=](stream<int32_t>& in, const string& fname) { return {
CAF_CHECK_EQUAL(fname, "numbers.txt"); [=](stream<int32_t>& in, const string& fname) {
using int_ptr = int32_t*; CAF_CHECK_EQUAL(fname, "numbers.txt");
return attach_stream_sink( using int_ptr = int32_t*;
self, return attach_stream_sink(
// input stream self,
in, // input stream
// initialize state in,
[=](int_ptr& x) { x = &self->state.x; }, // initialize state
// processing step [=](int_ptr& x) { x = &self->state.x; },
[](int_ptr& x, int32_t y) { *x += y; }, // processing step
// cleanup [](int_ptr& x, int32_t y) { *x += y; },
[=](int_ptr&, const error&) { // cleanup
CAF_MESSAGE(self->name() << " is done"); [=](int_ptr&, const error&) {
}); CAF_MESSAGE(self->name() << " is done");
}, });
[=](join_atom atm, actor src) { },
CAF_MESSAGE(self->name() << " joins a stream"); [=](join_atom atm, actor src) {
self->send(self * src, atm); CAF_MESSAGE(self->name() << " joins a stream");
}}; self->send(self * src, atm);
},
};
} }
TESTEE_STATE(stream_multiplexer) { TESTEE_STATE(stream_multiplexer) {
......
...@@ -314,7 +314,7 @@ public: ...@@ -314,7 +314,7 @@ public:
kvp.second->tick(now()); kvp.second->tick(now());
} }
virtual bool add_inbound_path(type_id_t input_type, virtual bool add_inbound_path(type_id_t,
std::unique_ptr<inbound_path> path) override { std::unique_ptr<inbound_path> path) override {
using policy_type = policy::downstream_messages::nested; using policy_type = policy::downstream_messages::nested;
auto res = get<dmsg_id::value>(mbox.queues()) auto res = get<dmsg_id::value>(mbox.queues())
...@@ -348,6 +348,13 @@ public: ...@@ -348,6 +348,13 @@ public:
return *global_time_; return *global_time_;
} }
void push() {
if (forwarder)
forwarder->push();
for (auto mgr : active_stream_managers())
mgr->push();
}
// -- member variables ------------------------------------------------------- // -- member variables -------------------------------------------------------
mboxqueue mbox; mboxqueue mbox;
...@@ -402,6 +409,7 @@ struct msg_visitor { ...@@ -402,6 +409,7 @@ struct msg_visitor {
); );
visit(f, um.content); visit(f, um.content);
self->current_mailbox_element(nullptr); self->current_mailbox_element(nullptr);
self->push();
return intrusive::task_result::resume; return intrusive::task_result::resume;
} }
......
...@@ -394,12 +394,6 @@ CAF_TEST(depth_3_pipeline_50_items) { ...@@ -394,12 +394,6 @@ CAF_TEST(depth_3_pipeline_50_items) {
auto src = sys.spawn(file_reader, 50u); auto src = sys.spawn(file_reader, 50u);
auto stg = sys.spawn(filter); auto stg = sys.spawn(filter);
auto snk = sys.spawn(sum_up); auto snk = sys.spawn(sum_up);
auto next_cycle = [&] {
tick();
allow((timeout_msg), from(snk).to(snk));
allow((timeout_msg), from(stg).to(stg));
allow((timeout_msg), from(src).to(src));
};
CAF_MESSAGE(CAF_ARG(self) << CAF_ARG(src) << CAF_ARG(stg) << CAF_ARG(snk)); CAF_MESSAGE(CAF_ARG(self) << CAF_ARG(src) << CAF_ARG(stg) << CAF_ARG(snk));
CAF_MESSAGE("initiate stream handshake"); CAF_MESSAGE("initiate stream handshake");
self->send(snk * stg * src, "numbers.txt"); self->send(snk * stg * src, "numbers.txt");
...@@ -412,17 +406,9 @@ CAF_TEST(depth_3_pipeline_50_items) { ...@@ -412,17 +406,9 @@ CAF_TEST(depth_3_pipeline_50_items) {
expect((downstream_msg::batch), from(src).to(stg)); expect((downstream_msg::batch), from(src).to(stg));
CAF_MESSAGE("the stage should delay its first batch since its underfull"); CAF_MESSAGE("the stage should delay its first batch since its underfull");
disallow((downstream_msg::batch), from(stg).to(snk)); disallow((downstream_msg::batch), from(stg).to(snk));
next_cycle(); CAF_MESSAGE("after running the pipeline the sink received all batches");
CAF_MESSAGE("the source shuts down and the stage sends the final batch"); run();
expect((upstream_msg::ack_batch), from(stg).to(src));
expect((downstream_msg::close), from(src).to(stg));
expect((downstream_msg::batch), from(stg).to(snk));
next_cycle();
CAF_MESSAGE("the stage shuts down and the sink produces its final result");
expect((upstream_msg::ack_batch), from(snk).to(stg));
expect((downstream_msg::close), from(stg).to(snk));
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.x, 625); CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.x, 625);
CAF_MESSAGE("verify that each actor called its finalizer once");
CAF_CHECK_EQUAL(deref<file_reader_actor>(src).state.fin_called, 1); CAF_CHECK_EQUAL(deref<file_reader_actor>(src).state.fin_called, 1);
CAF_CHECK_EQUAL(deref<filter_actor>(stg).state.fin_called, 1); CAF_CHECK_EQUAL(deref<filter_actor>(stg).state.fin_called, 1);
CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.fin_called, 1); CAF_CHECK_EQUAL(deref<sum_up_actor>(snk).state.fin_called, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment