Commit efbe5827 authored by Dominik Charousset's avatar Dominik Charousset

Fix excessive buffering in stream stages

By not considering how many items are currently buffered at a stage, CAF
happily hands out infinite credit to upstream stages. By restricting the
maximum capacity we communicate upstream by the number of currently
cached elements, we achieve the correct behavior.
parent 16730fc6
......@@ -144,10 +144,10 @@ public:
/// @param self Points to the parent actor, i.e., sender of the message.
/// @param queued_items Accumulated size of all batches that are currently
/// waiting in the mailbox.
/// @param now Current timestamp.
/// @param cycle Time between credit rounds.
/// @param desired_batch_complexity Desired processing time per batch.
void emit_ack_batch(local_actor* self, int32_t queued_items,
int32_t max_downstream_capacity,
actor_clock::time_point now, timespan cycle,
timespan desired_batch_complexity);
......
......@@ -133,11 +133,9 @@ void inbound_path::emit_ack_open(local_actor* self, actor_addr rebind_from) {
}
void inbound_path::emit_ack_batch(local_actor* self, int32_t queued_items,
int32_t max_downstream_capacity,
actor_clock::time_point now, timespan cycle,
timespan complexity) {
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(queued_items)
<< CAF_ARG(max_downstream_capacity) << CAF_ARG(cycle)
CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(queued_items) << CAF_ARG(cycle)
<< CAF_ARG(complexity));
CAF_IGNORE_UNUSED(queued_items);
// Update timestamps.
......@@ -147,22 +145,28 @@ void inbound_path::emit_ack_batch(local_actor* self, int32_t queued_items,
// the downstream capacity.
auto x = stats.calculate(cycle, complexity);
auto stats_guard = detail::make_scope_guard([&] { stats.reset(); });
auto max_capacity = std::min(x.max_throughput * 2, max_downstream_capacity);
auto& out = mgr->out();
auto max_capacity = std::min(x.max_throughput * 2, out.max_capacity());
CAF_ASSERT(max_capacity > 0);
// Protect against overflow on `assigned_credit`.
auto max_new_credit = std::numeric_limits<int32_t>::max() - assigned_credit;
// Compute the amount of credit we grant in this round.
auto credit = std::min(std::max(max_capacity - assigned_credit, 0),
auto credit = std::min(std::max(max_capacity
- static_cast<int32_t>(out.buffered())
- assigned_credit,
0),
max_new_credit);
CAF_ASSERT(credit >= 0);
// The manager can restrict or adjust the amount of credit.
credit = std::min(mgr->acquire_credit(this, credit), max_new_credit);
CAF_STREAM_LOG_DEBUG(mgr->self()->name() << "grants" << credit
<< "new credit at slot" << slots.receiver
<< "after receiving" << stats.num_elements
<< "elements that took" << stats.processing_time
CAF_STREAM_LOG_DEBUG(mgr->self()->name()
<< "grants" << credit << "new credit at slot"
<< slots.receiver << "after receiving"
<< stats.num_elements << "elements that took"
<< stats.processing_time
<< CAF_ARG2("max_throughput", x.max_throughput)
<< CAF_ARG(max_downstream_capacity)
<< CAF_ARG2("max_downstream_capacity",
out.max_capacity())
<< CAF_ARG(assigned_credit));
if (credit == 0 && up_to_date())
return;
......
......@@ -1156,8 +1156,7 @@ scheduled_actor::advance_streams(actor_clock::time_point now) {
for (auto& kvp : qs) {
auto inptr = kvp.second.policy().handler.get();
auto bs = static_cast<int32_t>(kvp.second.total_task_size());
inptr->emit_ack_batch(this, bs, inptr->mgr->out().max_capacity(),
now, cycle, bc);
inptr->emit_ack_batch(this, bs, now, cycle, bc);
}
}
return stream_ticks_.next_timeout(now, {max_batch_delay_ticks_,
......
......@@ -161,8 +161,7 @@ void stream_manager::advance() {
// Ignore inbound paths of other managers.
if (inptr->mgr.get() == this) {
auto bs = static_cast<int32_t>(kvp.second.total_task_size());
inptr->emit_ack_batch(self_, bs, out().max_capacity(), now, interval,
bc);
inptr->emit_ack_batch(self_, bs, now, interval, bc);
}
}
}
......
......@@ -342,7 +342,7 @@ public:
for (auto& kvp : qs) {
auto inptr = kvp.second.policy().handler.get();
auto bs = static_cast<int32_t>(kvp.second.total_task_size());
inptr->emit_ack_batch(this, bs, 30, now(), cycle,
inptr->emit_ack_batch(this, bs, now(), cycle,
desired_batch_complexity);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment