Commit e60feb3d authored by Dominik Charousset's avatar Dominik Charousset Committed by Dominik Charousset

Communicate desired batch size upstream

parent 1d9b8d02
......@@ -40,15 +40,19 @@ public:
void emit_batches() override {
CAF_LOG_TRACE("");
auto chunk = this->get_chunk(this->min_credit());
auto csize = static_cast<long>(chunk.size());
CAF_LOG_TRACE(CAF_ARG(chunk));
if (csize == 0)
return;
auto wrapped_chunk = make_message(std::move(chunk));
for (auto& x : this->paths_) {
CAF_ASSERT(x->open_credit >= csize);
x->emit_batch(csize, wrapped_chunk);
auto hint = this->min_desired_batch_size();
auto next_chunk = [&] {
// TODO: this iterates paths_ every time again even though we could
// easily keep track of remaining credit
return this->get_chunk(std::min(this->min_credit(), hint));
};
for (auto chunk = next_chunk(); !chunk.empty(); chunk = next_chunk()) {
auto csize = static_cast<long>(chunk.size());
auto wrapped_chunk = make_message(std::move(chunk));
for (auto& x : this->paths_) {
CAF_ASSERT(x->open_credit >= csize);
x->emit_batch(csize, wrapped_chunk);
}
}
}
};
......
......@@ -52,14 +52,20 @@ public:
this->fan_out();
for (auto& kvp : this->lanes_) {
auto& l = kvp.second;
auto chunk = super::get_chunk(l.buf, super::min_credit(l.paths));
auto csize = static_cast<long>(chunk.size());
if (csize == 0)
continue;
auto wrapped_chunk = make_message(std::move(chunk));
for (auto& x : l.paths) {
CAF_ASSERT(x->open_credit >= csize);
x->emit_batch(csize, wrapped_chunk);
auto hint = super::min_desired_batch_size(l.paths);
auto next_chunk = [&] {
// TODO: this iterates paths_ every time again even though we could
// easily keep track of remaining credit
return super::get_chunk(l.buf,
std::min(super::min_credit(l.paths), hint));
};
for (auto chunk = next_chunk(); !chunk.empty(); chunk = next_chunk()) {
auto csize = static_cast<long>(chunk.size());
auto wrapped_chunk = make_message(std::move(chunk));
for (auto& x : l.paths) {
CAF_ASSERT(x->open_credit >= csize);
x->emit_batch(csize, wrapped_chunk);
}
}
}
}
......
......@@ -35,17 +35,11 @@ public:
}
using super::min_batch_size;
using super::max_batch_size;
using super::min_buffer_size;
long min_batch_size() const override {
return 1;
}
long max_batch_size() const override {
return 5;
}
long min_buffer_size() const override {
return 5;
}
......
......@@ -111,7 +111,7 @@ public:
path_ptr confirm_path(const stream_id& sid, const actor_addr& from,
strong_actor_ptr to, long initial_demand,
bool redeployable) override {
long desired_batch_size, bool redeployable) override {
CAF_LOG_TRACE(CAF_ARG(sid) << CAF_ARG(from) << CAF_ARG(to)
<< CAF_ARG(initial_demand) << CAF_ARG(redeployable));
return first_hit([&](pointer ptr) -> outbound_path* {
......@@ -119,7 +119,8 @@ public:
// this will trigger forced_close messages.
if (ptr->find(sid, from) == nullptr)
return nullptr;
return ptr->confirm_path(sid, from, to, initial_demand, redeployable);
return ptr->confirm_path(sid, from, to, initial_demand,
desired_batch_size, redeployable);
});
}
......@@ -208,10 +209,6 @@ public:
return main_stream().min_batch_size();
}
long max_batch_size() const override {
return main_stream().max_batch_size();
}
long min_buffer_size() const override {
return main_stream().min_buffer_size();
}
......@@ -224,14 +221,6 @@ public:
main_stream().min_batch_size(x);
}
void max_batch_size(long x) override {
main_stream().max_batch_size(x);
}
void min_buffer_size(long x) override {
main_stream().min_buffer_size(x);
}
void max_batch_delay(duration x) override {
main_stream().max_batch_delay(x);
}
......
......@@ -65,6 +65,9 @@ public:
/// Amount of credit we have signaled upstream.
long assigned_credit;
/// Ideal size for individual batches.
long desired_batch_size;
/// Stores whether the source actor is failsafe, i.e., allows the runtime to
/// redeploy it on failure.
bool redeployable;
......
......@@ -38,7 +38,7 @@ public:
path_ptr confirm_path(const stream_id& sid, const actor_addr& from,
strong_actor_ptr to, long initial_demand,
bool redeployable) override;
long desired_batch_size, bool redeployable) override;
bool remove_path(const stream_id& sid, const actor_addr& x,
error reason, bool silent) override;
......@@ -67,18 +67,12 @@ public:
long min_batch_size() const override;
long max_batch_size() const override;
long min_buffer_size() const override;
duration max_batch_delay() const override;
void min_batch_size(long x) override;
void max_batch_size(long x) override;
void min_buffer_size(long x) override;
void max_batch_delay(duration x) override;
};
......
......@@ -67,6 +67,9 @@ public:
/// Currently available credit for this path.
long open_credit;
/// Batch size configured by the downstream actor.
long desired_batch_size;
/// Stores whether the downstream actor is failsafe, i.e., allows the runtime
/// to redeploy it on failure. If this field is set to `false` then
/// `unacknowledged_batches` is unused.
......
......@@ -53,11 +53,14 @@ public:
auto& l = kvp.second;
super::sort_by_credit(l.paths);
for (auto& x : l.paths) {
auto chunk = super::get_chunk(l.buf, x->open_credit);
auto csize = static_cast<long>(chunk.size());
if (csize == 0)
break;
x->emit_batch(csize, make_message(std::move(chunk)));
auto hint = x->desired_batch_size;
auto next_chunk = [&] {
return super::get_chunk(l.buf, std::min(x->open_credit, hint));
};
for (auto chunk = next_chunk(); !chunk.empty(); chunk = next_chunk()) {
auto csize = static_cast<long>(chunk.size());
x->emit_batch(csize, make_message(std::move(chunk)));
}
}
}
}
......
......@@ -62,7 +62,7 @@ public:
/// @pre `hdl != nullptr`
virtual error ack_open(const stream_id& sid, const actor_addr& rebind_from,
strong_actor_ptr rebind_to, long initial_demand,
bool redeployable);
long desired_batch_size, bool redeployable);
/// Handles `stream_msg::batch` messages.
/// @param hdl Handle to the sender.
......@@ -80,7 +80,8 @@ public:
/// @param cumulative_batch_id Id of last handled batch.
/// @pre `hdl != nullptr`
virtual error ack_batch(const stream_id& sid, const actor_addr& hdl,
long new_demand, int64_t cumulative_batch_id);
long new_demand, long desired_batch_size,
int64_t cumulative_batch_id);
/// Handles `stream_msg::close` messages.
/// @param hdl Handle to the sender.
......
......@@ -70,6 +70,8 @@ struct stream_msg : tag::boxing_type {
strong_actor_ptr rebind_to;
/// Grants credit to the source.
int32_t initial_demand;
/// Desired size of individual batches.
int32_t desired_batch_size;
/// Tells the upstream whether rebindings can occur on this path.
bool redeployable;
};
......@@ -93,6 +95,8 @@ struct stream_msg : tag::boxing_type {
using outer_type = stream_msg;
/// Newly available credit.
int32_t new_capacity;
/// Desired size of individual batches for the next cycle.
int32_t desired_batch_size;
/// Cumulative ack ID.
int64_t acknowledged_id;
};
......@@ -195,7 +199,7 @@ typename Inspector::result_type inspect(Inspector& f, stream_msg::open& x) {
template <class Inspector>
typename Inspector::result_type inspect(Inspector& f, stream_msg::ack_open& x) {
return f(meta::type_name("ack_open"), x.rebind_from, x.rebind_to,
x.initial_demand, x.redeployable);
x.initial_demand, x.desired_batch_size, x.redeployable);
}
template <class Inspector>
......@@ -206,7 +210,8 @@ typename Inspector::result_type inspect(Inspector& f, stream_msg::batch& x) {
template <class Inspector>
typename Inspector::result_type inspect(Inspector& f,
stream_msg::ack_batch& x) {
return f(meta::type_name("ack_batch"), x.new_capacity, x.acknowledged_id);
return f(meta::type_name("ack_batch"), x.new_capacity, x.desired_batch_size,
x.acknowledged_id);
}
template <class Inspector>
......
......@@ -56,7 +56,7 @@ public:
/// Adds a path to a sink and initiates the handshake.
virtual path_ptr confirm_path(const stream_id& sid, const actor_addr& from,
strong_actor_ptr to, long initial_demand,
bool redeployable) = 0;
long desired_batch_size, bool redeployable) = 0;
/// Removes a path from the scatterer.
virtual bool remove_path(const stream_id& sid, const actor_addr& x,
......@@ -104,15 +104,12 @@ public:
virtual long buffered() const = 0;
/// Minimum amount of messages required to emit a batch. A value of 0
/// disables batch delays.
/// disables batch accumulation.
virtual long min_batch_size() const = 0;
/// Maximum amount of messages to put into a single batch. Causes the actor
/// to split a buffer into more batches if necessary.
virtual long max_batch_size() const = 0;
/// Minimum amount of messages we wish to store at the actor in order to emit
/// new batches immediately when receiving new downstream demand.
/// new batches immediately when receiving new downstream demand. Usually
/// dynamically adjusted based on the output rate.
virtual long min_buffer_size() const = 0;
/// Forces to actor to emit a batch even if the minimum batch size was not
......@@ -123,14 +120,6 @@ public:
/// disables batch delays.
virtual void min_batch_size(long x) = 0;
/// Maximum amount of messages to put into a single batch. Causes the actor
/// to split a buffer into more batches if necessary.
virtual void max_batch_size(long x) = 0;
/// Minimum amount of messages we wish to store at the actor in order to emit
/// new batches immediately when receiving new downstream demand.
virtual void min_buffer_size(long x) = 0;
/// Forces to actor to emit a batch even if the minimum batch size was not
/// reached.
virtual void max_batch_delay(duration x) = 0;
......
......@@ -67,13 +67,45 @@ public:
return !xs.empty()
? fold_credit(xs, std::numeric_limits<long>::max(),
[](long x, long y) { return std::min(x, y); })
: 0;
: 0l;
}
/// Returns the maximum number of credit in `xs`.
template <class PathContainer>
static long max_credit(const PathContainer& xs) {
return fold_credit(xs, 0l, [](long x, long y) { return std::max(x, y); });
}
}
/// Folds `paths()` by extracting the `desired_batch_size` from each path.
template <class PathContainer, class F>
static long fold_desired_batch_size(const PathContainer& xs, long x0, F f) {
auto g = [f](long x, const typename PathContainer::value_type& y) {
return f(x, y->desired_batch_size);
};
return super::fold(xs, x0, std::move(g));
}
/// Returns the total number (sum) of all desired batch sizes in `xs`.
template <class PathContainer>
static long total_desired_batch_size(const PathContainer& xs) {
return fold_desired_batch_size(xs, 0l,
[](long x, long y) { return x + y; });
}
/// Returns the minimum number of desired batch sizes in `xs`.
template <class PathContainer>
static long min_desired_batch_size(const PathContainer& xs) {
return !xs.empty() ? fold_desired_batch_size(
xs, std::numeric_limits<long>::max(),
[](long x, long y) { return std::min(x, y); })
: 0l;
}
/// Returns the maximum number of desired batch sizes in `xs`.
template <class PathContainer>
static long max_desired_batch_size(const PathContainer& xs) {
return fold_credit(xs, 0l, [](long x, long y) { return std::max(x, y); });
}
// -- convenience functions for children classes -----------------------------
......@@ -86,6 +118,15 @@ public:
/// Returns the maximum number of credit in `paths()`.
long max_credit() const;
/// Returns the total number (sum) of all desired batch sizes in `paths()`.
long total_desired_batch_size() const;
/// Returns the minimum number of desired batch sizes in `paths()`.
long min_desired_batch_size() const;
/// Returns the maximum number of desired batch sizes in `paths()`.
long max_desired_batch_size() const;
// -- overridden functions ---------------------------------------------------
void close() override;
......@@ -98,30 +139,22 @@ public:
path_ptr confirm_path(const stream_id& sid, const actor_addr& from,
strong_actor_ptr to, long initial_demand,
bool redeployable) override;
long desired_batch_size, bool redeployable) override;
bool paths_clean() const override;
long min_batch_size() const override;
long max_batch_size() const override;
long min_buffer_size() const override;
duration max_batch_delay() const override;
void min_batch_size(long x) override;
void max_batch_size(long x) override;
void min_buffer_size(long x) override;
void max_batch_delay(duration x) override;
protected:
long min_batch_size_;
long max_batch_size_;
long min_buffer_size_;
duration max_batch_delay_;
};
......
......@@ -39,7 +39,7 @@ public:
path_ptr confirm_path(const stream_id& sid, const actor_addr& from,
strong_actor_ptr to, long initial_demand,
bool redeployable) override;
long desired_batch_size, bool redeployable) override;
bool remove_path(const stream_id& sid, const actor_addr& x,
error reason, bool silent) override;
......@@ -70,18 +70,12 @@ public:
long min_batch_size() const override;
long max_batch_size() const override;
long min_buffer_size() const override;
duration max_batch_delay() const override;
void min_batch_size(long x) override;
void max_batch_size(long x) override;
void min_buffer_size(long x) override;
void max_batch_delay(duration x) override;
};
......
......@@ -34,6 +34,7 @@ inbound_path::inbound_path(local_actor* selfptr, const stream_id& id,
last_acked_batch_id(0),
last_batch_id(0),
assigned_credit(0),
desired_batch_size(50), // TODO: at least put default in some header
redeployable(false) {
// nop
}
......@@ -60,20 +61,23 @@ void inbound_path::emit_ack_open(actor_addr rebind_from,
<< CAF_ARG(is_redeployable));
assigned_credit = initial_demand;
redeployable = is_redeployable;
auto batch_size = static_cast<int32_t>(desired_batch_size);
unsafe_send_as(self, hdl,
make<stream_msg::ack_open>(
sid, self->address(), std::move(rebind_from), self->ctrl(),
static_cast<int32_t>(initial_demand), is_redeployable));
static_cast<int32_t>(initial_demand), batch_size,
is_redeployable));
}
void inbound_path::emit_ack_batch(long new_demand) {
CAF_LOG_TRACE(CAF_ARG(new_demand));
last_acked_batch_id = last_batch_id;
assigned_credit += new_demand;
auto batch_size = static_cast<int32_t>(desired_batch_size);
unsafe_send_as(self, hdl,
make<stream_msg::ack_batch>(sid, self->address(),
static_cast<int32_t>(new_demand),
last_batch_id));
batch_size, last_batch_id));
}
void inbound_path::emit_irregular_shutdown(local_actor* self,
......
......@@ -37,7 +37,7 @@ invalid_stream_scatterer::add_path(const stream_id&, strong_actor_ptr,
stream_scatterer::path_ptr
invalid_stream_scatterer::confirm_path(const stream_id&, const actor_addr&,
strong_actor_ptr, long, bool) {
strong_actor_ptr, long, long, bool) {
CAF_LOG_ERROR("invalid_stream_scatterer::confirm_path called");
return nullptr;
}
......@@ -97,10 +97,6 @@ long invalid_stream_scatterer::min_batch_size() const {
return 0;
}
long invalid_stream_scatterer::max_batch_size() const {
return 0;
}
long invalid_stream_scatterer::min_buffer_size() const {
return 0;
}
......@@ -113,14 +109,6 @@ void invalid_stream_scatterer::min_batch_size(long) {
// nop
}
void invalid_stream_scatterer::max_batch_size(long) {
// nop
}
void invalid_stream_scatterer::min_buffer_size(long) {
// nop
}
void invalid_stream_scatterer::max_batch_delay(duration) {
// nop
}
......
......@@ -32,6 +32,7 @@ outbound_path::outbound_path(local_actor* selfptr, const stream_id& id,
hdl(std::move(ptr)),
next_batch_id(0),
open_credit(0),
desired_batch_size(0),
redeployable(false),
next_ack_id(0) {
// nop
......
......@@ -55,7 +55,7 @@ error stream_manager::open(const stream_id& sid, strong_actor_ptr hdl,
error stream_manager::ack_open(const stream_id& sid,
const actor_addr& rebind_from,
strong_actor_ptr rebind_to, long initial_demand,
bool redeployable) {
long desired_batch_size, bool redeployable) {
CAF_LOG_TRACE(CAF_ARG(sid) << CAF_ARG(rebind_from) << CAF_ARG(rebind_to)
<< CAF_ARG(initial_demand) << CAF_ARG(redeployable));
if (rebind_from == nullptr)
......@@ -67,7 +67,8 @@ error stream_manager::ack_open(const stream_id& sid,
return sec::invalid_downstream;
}
auto ptr = out().confirm_path(sid, rebind_from, std::move(rebind_to),
initial_demand, redeployable);
initial_demand, desired_batch_size,
redeployable);
if (ptr == nullptr)
return sec::invalid_downstream;
downstream_demand(ptr, initial_demand);
......@@ -102,12 +103,14 @@ error stream_manager::batch(const stream_id& sid, const actor_addr& hdl,
}
error stream_manager::ack_batch(const stream_id& sid, const actor_addr& hdl,
long demand, int64_t) {
long demand, long batch_size, int64_t) {
CAF_LOG_TRACE(CAF_ARG(sid) << CAF_ARG(hdl) << CAF_ARG(demand));
auto ptr = out().find(sid, hdl);
if (ptr == nullptr)
return sec::invalid_downstream;
ptr->open_credit += demand;
if (ptr->desired_batch_size != batch_size)
ptr->desired_batch_size = batch_size;
downstream_demand(ptr, demand);
return none;
}
......
......@@ -102,7 +102,8 @@ auto stream_msg_visitor::operator()(stream_msg::ack_open& x) -> result_type {
CAF_LOG_TRACE(CAF_ARG(x));
return invoke([&](stream_manager_ptr& mgr) {
return mgr->ack_open(sid_, x.rebind_from, std::move(x.rebind_to),
x.initial_demand, x.redeployable);
x.initial_demand, x.desired_batch_size,
x.redeployable);
});
}
......@@ -116,7 +117,7 @@ auto stream_msg_visitor::operator()(stream_msg::batch& x) -> result_type {
auto stream_msg_visitor::operator()(stream_msg::ack_batch& x) -> result_type {
CAF_LOG_TRACE(CAF_ARG(x));
return invoke([&](stream_manager_ptr& mgr) {
return mgr->ack_batch(sid_, sender_, static_cast<long>(x.new_capacity),
return mgr->ack_batch(sid_, sender_, x.new_capacity, x.desired_batch_size,
x.acknowledged_id);
});
}
......
......@@ -26,8 +26,6 @@ namespace caf {
stream_scatterer_impl::stream_scatterer_impl(local_actor* selfptr)
: super(selfptr),
min_batch_size_(1),
max_batch_size_(150),
min_buffer_size_(50),
max_batch_delay_(infinite) {
// nop
}
......@@ -54,12 +52,12 @@ stream_scatterer_impl::add_path(const stream_id& sid, strong_actor_ptr origin,
return ptr;
}
stream_scatterer::path_ptr
stream_scatterer_impl::confirm_path(const stream_id& sid,
const actor_addr& from, strong_actor_ptr to,
long initial_demand, bool redeployable) {
stream_scatterer::path_ptr stream_scatterer_impl::confirm_path(
const stream_id& sid, const actor_addr& from, strong_actor_ptr to,
long initial_demand, long desired_batch_size, bool redeployable) {
CAF_LOG_TRACE(CAF_ARG(sid) << CAF_ARG(from) << CAF_ARG(to)
<< CAF_ARG(initial_demand) << CAF_ARG(redeployable));
<< CAF_ARG(initial_demand) << CAF_ARG(desired_batch_size)
<< CAF_ARG(redeployable));
auto ptr = find(sid, from);
if (ptr == nullptr) {
CAF_LOG_WARNING("cannot confirm unknown path");
......@@ -71,6 +69,8 @@ stream_scatterer_impl::confirm_path(const stream_id& sid,
ptr->hdl = std::move(to);
ptr->redeployable = redeployable;
ptr->open_credit += initial_demand;
if (ptr->desired_batch_size != desired_batch_size)
ptr->desired_batch_size = desired_batch_size;
return ptr;
}
......@@ -108,16 +108,24 @@ long stream_scatterer_impl::max_credit() const {
return max_credit(paths_);
}
long stream_scatterer_impl::min_batch_size() const {
return min_batch_size_;
long stream_scatterer_impl::total_desired_batch_size() const {
return total_desired_batch_size(paths_);
}
long stream_scatterer_impl::min_desired_batch_size() const {
return min_desired_batch_size(paths_);
}
long stream_scatterer_impl::max_desired_batch_size() const {
return max_desired_batch_size(paths_);
}
long stream_scatterer_impl::max_batch_size() const {
return max_batch_size_;
long stream_scatterer_impl::min_batch_size() const {
return min_batch_size_;
}
long stream_scatterer_impl::min_buffer_size() const {
return min_buffer_size_;
return 50; // TODO: at least place the default in a header
}
duration stream_scatterer_impl::max_batch_delay() const {
......@@ -128,14 +136,6 @@ void stream_scatterer_impl::min_batch_size(long x) {
min_batch_size_ = x;
}
void stream_scatterer_impl::max_batch_size(long x) {
max_batch_size_ = x;
}
void stream_scatterer_impl::min_buffer_size(long x) {
min_buffer_size_ = x;
}
void stream_scatterer_impl::max_batch_delay(duration x) {
max_batch_delay_ = std::move(x);
}
......
......@@ -42,7 +42,7 @@ terminal_stream_scatterer::add_path(const stream_id&, strong_actor_ptr,
stream_scatterer::path_ptr
terminal_stream_scatterer::confirm_path(const stream_id&, const actor_addr&,
strong_actor_ptr, long, bool) {
strong_actor_ptr, long, long, bool) {
CAF_LOG_ERROR("terminal_stream_scatterer::confirm_path called");
return nullptr;
}
......@@ -107,10 +107,6 @@ long terminal_stream_scatterer::min_batch_size() const {
return 0;
}
long terminal_stream_scatterer::max_batch_size() const {
return 0;
}
long terminal_stream_scatterer::min_buffer_size() const {
return 0;
}
......@@ -123,14 +119,6 @@ void terminal_stream_scatterer::min_batch_size(long) {
// nop
}
void terminal_stream_scatterer::max_batch_size(long) {
// nop
}
void terminal_stream_scatterer::min_buffer_size(long) {
// nop
}
void terminal_stream_scatterer::max_batch_delay(duration) {
// nop
}
......
......@@ -216,7 +216,7 @@ CAF_TEST(fork_setup) {
expect((stream_msg::open),
from(_).to(d1).with(_, splitter, _, _, _, false));
expect((stream_msg::ack_open),
from(d1).to(splitter).with(_, _, 5, _, false));
from(d1).to(splitter).with(_, _, 5, _, _, false));
CAF_MESSAGE("spawn second sink");
auto d2 = sys.spawn(storage, splitter, filter_type{"key2"});
sched.run_once();
......@@ -225,7 +225,7 @@ CAF_TEST(fork_setup) {
expect((stream_msg::open),
from(_).to(d2).with(_, splitter, _, _, _, false));
expect((stream_msg::ack_open),
from(d2).to(splitter).with(_, _, 5, _, false));
from(d2).to(splitter).with(_, _, 5, _, _, false));
CAF_MESSAGE("spawn source");
auto src = sys.spawn(nores_streamer, splitter);
sched.run_once();
......@@ -233,7 +233,7 @@ CAF_TEST(fork_setup) {
expect((stream_msg::open),
from(_).to(splitter).with(_, src, _, _, _, false));
expect((stream_msg::ack_open),
from(splitter).to(src).with(_, _, 5, _, false));
from(splitter).to(src).with(_, _, 5, _, _, false));
// First batch.
expect((stream_msg::batch),
from(src).to(splitter)
......@@ -250,9 +250,9 @@ CAF_TEST(fork_setup) {
expect((stream_msg::batch),
from(splitter).to(d1)
.with(3, batch{{"key1", "a"}, {"key1", "b"}, {"key1", "c"}}, 0));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, 0));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(3, 0));
expect((stream_msg::ack_batch), from(splitter).to(src).with(5, 0));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, _, 0));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(3, _, 0));
expect((stream_msg::ack_batch), from(splitter).to(src).with(5, _, 0));
// Second batch.
expect((stream_msg::batch),
from(src).to(splitter)
......@@ -261,9 +261,9 @@ CAF_TEST(fork_setup) {
from(splitter).to(d1).with(1, batch{{"key1", "d"}}, 1));
expect((stream_msg::batch),
from(splitter).to(d2).with(2, batch{{"key2", "c"}, {"key2", "d"}}, 1));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(1, 1));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, 1));
expect((stream_msg::ack_batch), from(splitter).to(src).with(3, 1));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(1, _, 1));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, _, 1));
expect((stream_msg::ack_batch), from(splitter).to(src).with(3, _, 1));
// Source is done, splitter remains open.
expect((stream_msg::close), from(src).to(splitter).with());
CAF_REQUIRE(!sched.has_job());
......@@ -291,7 +291,7 @@ CAF_TEST(fork_setup) {
expect((stream_msg::open),
from(_).to(splitter).with(_, src2, _, _, _, false));
expect((stream_msg::ack_open),
from(splitter).to(src2).with(_, _, 5, _, false));
from(splitter).to(src2).with(_, _, 5, _, _, false));
// First batch.
expect((stream_msg::batch),
from(src2).to(splitter)
......@@ -308,9 +308,9 @@ CAF_TEST(fork_setup) {
expect((stream_msg::batch),
from(splitter).to(d1)
.with(3, batch{{"key1", "a"}, {"key1", "b"}, {"key1", "c"}}, 2));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, 2));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(3, 2));
expect((stream_msg::ack_batch), from(splitter).to(src2).with(5, 0));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, _, 2));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(3, _, 2));
expect((stream_msg::ack_batch), from(splitter).to(src2).with(5, _, 0));
// Second batch.
expect((stream_msg::batch),
from(src2).to(splitter)
......@@ -319,9 +319,9 @@ CAF_TEST(fork_setup) {
from(splitter).to(d1).with(1, batch{{"key1", "d"}}, 3));
expect((stream_msg::batch),
from(splitter).to(d2).with(2, batch{{"key2", "c"}, {"key2", "d"}}, 3));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(1, 3));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, 3));
expect((stream_msg::ack_batch), from(splitter).to(src2).with(3, 1));
expect((stream_msg::ack_batch), from(d1).to(splitter).with(1, _, 3));
expect((stream_msg::ack_batch), from(d2).to(splitter).with(2, _, 3));
expect((stream_msg::ack_batch), from(splitter).to(src2).with(3, _, 1));
// Source is done, splitter remains open.
expect((stream_msg::close), from(src2).to(splitter).with());
CAF_REQUIRE(!sched.has_job());
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment