Commit ada14c6f authored by Dominik Charousset's avatar Dominik Charousset

Refine bounded buffer API, add logging and tests

parent 2b9fbe1a
......@@ -587,34 +587,6 @@ public:
return res;
}
/// Creates a new, cooperatively scheduled `flow::coordinator`. The returned
/// coordinator is constructed but has not been added to the scheduler yet to
/// allow the caller to set up flows.
/// @returns A pointer to the new coordinator and a function object that the
/// caller must invoke to launch the coordinator. After the
/// coordinator started running, the caller *must not* access the
/// pointer again.
template <class Impl, spawn_options = no_spawn_options, class... Ts>
auto make_flow_coordinator(Ts&&... xs) {
static_assert(std::is_base_of_v<scheduled_actor, Impl>,
"make_flow_coordinator only supports scheduled actors ATM");
CAF_SET_LOGGER_SYS(this);
actor_config cfg{dummy_execution_unit(), nullptr};
auto res = make_actor<Impl>(next_actor_id(), node(), this, cfg,
std::forward<Ts>(xs)...);
auto ptr = static_cast<Impl*>(actor_cast<abstract_actor*>(res));
#ifdef CAF_ENABLE_ACTOR_PROFILER
profiler_add_actor(*ptr, cfg.parent);
#endif
auto launch = [res, host{cfg.host}] {
// Note: we pass `res` to this lambda instead of `ptr` to keep a strong
// reference to the actor.
static_cast<Impl*>(actor_cast<abstract_actor*>(res))
->launch(host, false, false);
};
return std::make_tuple(ptr, launch);
}
/// Creates a new, cooperatively scheduled actor. The returned actor is
/// constructed but has not been added to the scheduler yet to allow the
/// caller to set up any additional logic on the actor before it starts.
......
......@@ -40,14 +40,6 @@ struct delay_errors_t {
/// @relates delay_errors_t
constexpr auto delay_errors = delay_errors_t{};
/// Policy type for having `consume` treat errors as ordinary shutdowns.
struct ignore_errors_t {
static constexpr bool calls_on_error = false;
};
/// @relates ignore_errors_t
constexpr auto ignore_errors = ignore_errors_t{};
/// A bounded buffer for transmitting events from one producer to one consumer.
template <class T>
class bounded_buffer : public ref_counted {
......@@ -68,6 +60,7 @@ public:
/// Appends to the buffer and calls `on_producer_wakeup` on the consumer if
/// the buffer becomes non-empty.
/// @returns the remaining capacity after inserting the items.
size_t push(span<const T> items) {
std::unique_lock guard{mtx_};
CAF_ASSERT(producer_ != nullptr);
......@@ -84,37 +77,30 @@ public:
return push(make_span(&item, 1));
}
/// Consumes up to `demand` items from the buffer with `on_next`, ignoring any
/// errors set by the producer.
/// @tparam Policy Either `instant_error_t` or `delay_error_t`. Instant
/// error propagation requires also passing `on_error` handler.
/// Delaying errors without passing an `on_error` handler
/// effectively suppresses errors.
/// @returns `true` if no more elements are available, `false` otherwise.
template <class Policy, class OnNext, class OnError = unit_t>
bool
consume(Policy, size_t demand, OnNext on_next, OnError on_error = OnError{}) {
/// Consumes up to `demand` items from the buffer.
/// @tparam Policy Either `instant_error_t`, `delay_error_t` or
/// `ignore_errors_t`. The former two policies require also
/// passing an `on_error` handler.
/// @returns true if the consumer may call `pull` again, otherwise `false`.
/// When returning `false`, the function has called `on_complete` or
/// `on_error` on the observer.
template <class Policy, class Observer>
std::pair<bool, size_t> pull(Policy, size_t demand, Observer& dst) {
static constexpr size_t local_buf_size = 16;
if constexpr (Policy::calls_on_error)
static_assert(!std::is_same_v<OnError, unit_t>,
"Policy requires an on_error handler");
else
static_assert(std::is_same_v<OnError, unit_t>,
"Policy prohibits an on_error handler");
T local_buf[local_buf_size];
std::unique_lock guard{mtx_};
CAF_ASSERT(demand > 0);
CAF_ASSERT(consumer_ != nullptr);
if constexpr (std::is_same_v<Policy, prioritize_errors_t>) {
if (err_) {
on_error(err_);
consumer_ = nullptr;
return true;
dst.on_error(err_);
return {false, 0};
}
}
auto next_n = [this, &demand] {
return std::min({local_buf_size, demand, size()});
};
size_t consumed = 0;
for (auto n = next_n(); n > 0; n = next_n()) {
auto first = buf_ + rd_pos_;
std::move(first, first + n, local_buf);
......@@ -123,19 +109,20 @@ public:
shift_elements();
signal_demand(n);
guard.unlock();
on_next(make_span(local_buf, n));
dst.on_next(make_span(local_buf, n));
demand -= n;
consumed += n;
guard.lock();
}
if (!empty() || !closed_) {
return false;
return {true, consumed};
} else {
if constexpr (std::is_same_v<Policy, delay_errors_t>) {
if (err_)
on_error(err_);
}
consumer_ = nullptr;
return true;
if (err_)
dst.on_error(err_);
else
dst.on_complete();
return {false, consumed};
}
}
......@@ -145,34 +132,50 @@ public:
return !empty();
}
/// Checks whether the there is data available or the producer has closed or
/// aborted the flow.
bool has_consumer_event() const noexcept {
std::unique_lock guard{mtx_};
return !empty() || closed_;
}
/// Returns how many items are currently available.
size_t available() const noexcept {
std::unique_lock guard{mtx_};
return size();
}
/// Closes the buffer by request of the producer.
void close() {
std::unique_lock guard{mtx_};
CAF_ASSERT(producer_ != nullptr);
closed_ = true;
producer_ = nullptr;
if (empty() && consumer_)
consumer_->on_producer_wakeup();
if (producer_) {
closed_ = true;
producer_ = nullptr;
if (empty() && consumer_)
consumer_->on_producer_wakeup();
}
}
/// Closes the buffer and signals an error by request of the producer.
void abort(error reason) {
std::unique_lock guard{mtx_};
closed_ = true;
err_ = std::move(reason);
producer_ = nullptr;
if (empty() && consumer_) {
consumer_->on_producer_wakeup();
consumer_ = nullptr;
if (producer_) {
closed_ = true;
err_ = std::move(reason);
producer_ = nullptr;
if (empty() && consumer_)
consumer_->on_producer_wakeup();
}
}
/// Closes the buffer by request of the consumer.
void cancel() {
std::unique_lock guard{mtx_};
if (producer_)
producer_->on_consumer_cancel();
consumer_ = nullptr;
if (consumer_) {
consumer_ = nullptr;
if (producer_)
producer_->on_consumer_cancel();
}
}
void set_consumer(consumer_ptr consumer) {
......
......@@ -55,6 +55,8 @@ public:
virtual void on_cancel(observer_impl<T>* sink) = 0;
observable as_observable() noexcept;
protected:
disposable do_subscribe(observer_impl<T>* snk);
};
......@@ -267,11 +269,17 @@ private:
intrusive_ptr<impl> pimpl_;
};
template <class T>
observable<T> observable<T>::impl::as_observable() noexcept {
return observable<T>{intrusive_ptr{this}};
}
template <class T>
disposable observable<T>::impl::do_subscribe(observer_impl<T>* snk) {
auto ptr = make_counted<sub_impl>(ctx(), this, snk);
snk->on_subscribe(subscription{ptr});
return disposable{std::move(ptr)};
snk->on_subscribe(subscription{make_counted<sub_impl>(ctx(), this, snk)});
// Note: we do NOT return the subscription here because this object is private
// to the observer. Outside code must call dispose() on the observer.
return disposable{intrusive_ptr<typename disposable::impl>{snk}};
}
template <class T>
......@@ -353,17 +361,9 @@ public:
return this;
}
observer<In> as_observer() noexcept {
return observer<In>{as_observer_ptr()};
}
observable_impl<In>* as_observable_ptr() noexcept {
return this;
}
observable<In> as_observable() noexcept {
return observable<In>{as_observable_ptr()};
}
};
explicit processor(intrusive_ptr<impl> pimpl) noexcept
......@@ -522,6 +522,7 @@ public:
// -- implementation of disposable::impl -------------------------------------
void dispose() override {
CAF_LOG_TRACE("");
if (!completed_) {
completed_ = true;
buf_.clear();
......@@ -551,6 +552,7 @@ public:
}
void on_request(observer_impl<T>* sink, size_t n) override {
CAF_LOG_TRACE(CAF_ARG(n));
if (auto i = find(sink); i != outputs_.end()) {
i->demand += n;
update_max_demand();
......@@ -559,6 +561,7 @@ public:
}
void on_cancel(observer_impl<T>* sink) override {
CAF_LOG_TRACE("");
if (auto i = find(sink); i != outputs_.end()) {
outputs_.erase(i);
if (outputs_.empty()) {
......@@ -605,19 +608,25 @@ public:
/// Stops the source, but allows observers to still consume buffered data.
virtual void shutdown() {
CAF_LOG_TRACE("");
if (!completed_) {
completed_ = true;
if (done()) {
CAF_LOG_DEBUG("observable done, call on_complete on" << outputs_.size()
<< "outputs");
for (auto& out : outputs_)
out.sink.on_complete();
outputs_.clear();
do_on_complete();
} else {
CAF_LOG_DEBUG("not done yet, delay on_complete calls");
}
}
}
/// Stops the source and drops any remaining data.
virtual void abort(const error& reason) {
CAF_LOG_TRACE(CAF_ARG(reason));
if (!completed_) {
completed_ = true;
for (auto& out : outputs_)
......@@ -629,8 +638,10 @@ public:
/// Tries to push data from the buffer downstream.
void try_push() {
CAF_LOG_TRACE("");
if (!batch_.empty()) {
// Can only be true if a sink calls try_push in on_next.
// Shortcuts nested calls to try_push. Can only be true if a sink calls
// try_push in on_next.
return;
}
size_t batch_size = std::min(desired_capacity_, defaults::flow::batch_size);
......@@ -752,6 +763,12 @@ public:
// nop
}
// -- disambiguation ---------------------------------------------------------
observable<Out> as_observable() noexcept {
return super::as_observable();
}
// -- implementation of disposable::impl -------------------------------------
coordinator* ctx() const noexcept override {
......@@ -1062,7 +1079,7 @@ template <class T>
template <class OnNext>
disposable observable<T>::for_each(OnNext on_next) {
auto obs = make_observer(std::move(on_next));
subscribe(obs);
subscribe(observer<T>{obs});
return std::move(obs).as_disposable();
}
......@@ -1112,6 +1129,7 @@ public:
}
void on_complete() override {
CAF_LOG_TRACE("");
if (sub) {
sub = nullptr;
parent->forwarder_completed(this);
......@@ -1120,6 +1138,7 @@ public:
}
void on_error(const error& what) override {
CAF_LOG_TRACE(CAF_ARG(what));
if (sub) {
sub = nullptr;
parent->forwarder_failed(this, what);
......@@ -1128,6 +1147,7 @@ public:
}
void on_subscribe(subscription new_sub) override {
CAF_LOG_TRACE("");
if (!sub) {
sub = std::move(new_sub);
parent->forwarder_subscribed(this, sub);
......@@ -1137,11 +1157,13 @@ public:
}
void on_next(span<const T> items) override {
CAF_LOG_TRACE(CAF_ARG2("items.size", items.size()));
if (parent)
parent->on_batch(async::make_batch(items), this);
}
void dispose() override {
CAF_LOG_TRACE("");
on_complete();
}
......@@ -1167,6 +1189,7 @@ public:
}
disposable add(observable<T> source, intrusive_ptr<forwarder> fwd) {
CAF_LOG_TRACE("");
forwarders_.emplace_back(fwd);
return source.subscribe(observer<T>{std::move(fwd)});
}
......@@ -1182,6 +1205,7 @@ public:
}
void dispose() override {
CAF_LOG_TRACE("");
inputs_.clear();
std::vector<fwd_ptr> fwds;
fwds.swap(forwarders_);
......@@ -1191,6 +1215,7 @@ public:
}
void cancel_inputs() {
CAF_LOG_TRACE("");
if (!this->completed_) {
std::vector<fwd_ptr> fwds;
fwds.swap(forwarders_);
......@@ -1209,26 +1234,34 @@ public:
}
void delay_error(bool value) {
CAF_LOG_TRACE(CAF_ARG(value));
flags_.delay_error = value;
}
void shutdown_on_last_complete(bool value) {
CAF_LOG_TRACE(CAF_ARG(value));
flags_.shutdown_on_last_complete = value;
if (value && done())
this->shutdown();
if (value && forwarders_.empty()) {
if (delayed_error_)
this->abort(delayed_error_);
else
this->shutdown();
}
}
void on_error(const error& reason) {
void on_error(const error& what) {
CAF_LOG_TRACE(CAF_ARG(what));
if (!flags_.delay_error) {
abort(reason);
abort(what);
return;
}
if (!delayed_error_)
delayed_error_ = reason;
delayed_error_ = what;
}
protected:
void abort(const error& reason) override {
CAF_LOG_TRACE(CAF_ARG(reason));
super::abort(reason);
inputs_.clear();
forwarders_.clear();
......@@ -1238,6 +1271,7 @@ private:
using fwd_ptr = intrusive_ptr<forwarder>;
void pull(size_t n) override {
CAF_LOG_TRACE(CAF_ARG(n));
while (n > 0 && !inputs_.empty()) {
auto& input = inputs_[0];
auto m = std::min(input.buf.size() - input.offset, n);
......@@ -1256,30 +1290,35 @@ private:
}
void on_batch(async::batch buf, fwd_ptr src) {
CAF_LOG_TRACE("");
inputs_.emplace_back(buf, src);
this->try_push();
}
void forwarder_subscribed(forwarder* ptr, subscription& sub) {
CAF_LOG_TRACE("");
if (!flags_.concat_mode || (!forwarders_.empty() && forwarders_[0] == ptr))
sub.request(defaults::flow::buffer_size);
}
void forwarder_failed(forwarder* ptr, const error& reason) {
void forwarder_failed(forwarder* ptr, const error& what) {
CAF_LOG_TRACE(CAF_ARG(what));
if (!flags_.delay_error) {
abort(reason);
abort(what);
return;
}
if (!delayed_error_)
delayed_error_ = reason;
delayed_error_ = what;
forwarder_completed(ptr);
}
void forwarder_completed(forwarder* ptr) {
CAF_LOG_TRACE("");
auto is_ptr = [ptr](auto& x) { return x == ptr; };
auto i = std::find_if(forwarders_.begin(), forwarders_.end(), is_ptr);
if (i != forwarders_.end()) {
forwarders_.erase(i);
CAF_LOG_DEBUG(forwarders_.size() << "forwarders remain");
if (forwarders_.empty()) {
if (flags_.shutdown_on_last_complete) {
if (delayed_error_)
......@@ -1472,6 +1511,8 @@ public:
}
~observable_buffer_impl() {
if (buf_)
buf_->cancel();
this->ctx()->deref_coordinator();
}
......@@ -1535,7 +1576,7 @@ public:
}
}
// -- implementation of consumer ---------------------------------------------
// -- implementation of async::consumer: these may get called concurrently ---
void on_producer_ready() override {
// nop
......@@ -1563,33 +1604,35 @@ protected:
private:
void pull() {
CAF_LOG_TRACE("");
if (!buf_ || pulling_ || !dst_ || demand_ == 0)
if (!buf_ || pulling_ || !dst_)
return;
pulling_ = true;
auto fin = buf_->consume(
async::prioritize_errors, demand_,
[this](span<const value_type> items) {
struct decorator {
size_t* demand;
typename observer<value_type>::impl* dst;
void on_next(span<const value_type> items) {
CAF_LOG_TRACE(CAF_ARG(items));
CAF_ASSERT(!items.empty());
CAF_ASSERT(demand_ >= items.empty());
demand_ -= items.size();
dst_.on_next(items);
},
[this](const error& what) {
if (dst_) {
dst_.on_error(what);
dst_ = nullptr;
}
buf_ = nullptr;
});
pulling_ = false;
if (fin && buf_) {
buf_ = nullptr;
if (dst_) {
dst_.on_complete();
dst_ = nullptr;
CAF_ASSERT(*demand >= items.empty());
*demand -= items.size();
CAF_LOG_DEBUG("got" << items.size() << "items");
dst->on_next(items);
}
void on_complete() {
CAF_LOG_TRACE("");
dst->on_complete();
}
void on_error(const error& what) {
CAF_LOG_TRACE(CAF_ARG(what));
dst->on_error(what);
}
};
decorator dst{&demand_, dst_.ptr()};
if (!buf_->pull(async::prioritize_errors, demand_, dst).first) {
buf_ = nullptr;
dst_ = nullptr;
}
pulling_ = false;
}
intrusive_ptr<observable_buffer_impl> strong_ptr() {
......
......@@ -38,7 +38,7 @@ public:
virtual void on_error(const error& what) = 0;
observer as_observer() {
observer as_observer() noexcept {
return observer{intrusive_ptr<impl>(this)};
}
};
......@@ -148,36 +148,38 @@ class buffer_writer_impl : public ref_counted,
public observer_impl<typename Buffer::value_type>,
public async::producer {
public:
// -- member types -----------------------------------------------------------
using buffer_ptr = intrusive_ptr<Buffer>;
using value_type = typename Buffer::value_type;
// -- friends ----------------------------------------------------------------
CAF_INTRUSIVE_PTR_FRIENDS(buffer_writer_impl)
// -- constructors, destructors, and assignment operators --------------------
buffer_writer_impl(coordinator* ctx, buffer_ptr buf)
: ctx_(ctx), buf_(std::move(buf)) {
CAF_ASSERT(ctx_ != nullptr);
CAF_ASSERT(buf_ != nullptr);
}
void on_consumer_ready() override {
// nop
~buffer_writer_impl() {
if (buf_)
buf_->close();
}
void on_consumer_cancel() override {
// -- implementation of disposable::impl -------------------------------------
void dispose() override {
CAF_LOG_TRACE("");
ctx_->schedule_fn([ptr{strong_ptr()}] {
CAF_LOG_TRACE("");
ptr->on_cancel();
});
on_complete();
}
void on_consumer_demand(size_t demand) override {
CAF_LOG_TRACE(CAF_ARG(demand));
ctx_->schedule_fn([ptr{strong_ptr()}, demand] { //
CAF_LOG_TRACE(CAF_ARG(demand));
ptr->on_demand(demand);
});
bool disposed() const noexcept override {
return buf_ == nullptr;
}
void ref_disposable() const noexcept final {
......@@ -188,13 +190,7 @@ public:
this->deref();
}
void ref_producer() const noexcept final {
this->ref();
}
void deref_producer() const noexcept final {
this->deref();
}
// -- implementation of observer<T>::impl ------------------------------------
void on_next(span<const value_type> items) override {
CAF_LOG_TRACE(CAF_ARG(items));
......@@ -227,18 +223,39 @@ public:
sub_ = std::move(sub);
sub_.request(buf_->capacity());
} else {
CAF_LOG_DEBUG("already have a subscription");
CAF_LOG_DEBUG("already have a subscription or buffer no longer valid");
sub.cancel();
}
}
void dispose() override {
// -- implementation of async::producer: these may get called concurrently ---
void on_consumer_ready() override {
// nop
}
void on_consumer_cancel() override {
CAF_LOG_TRACE("");
on_complete();
ctx_->schedule_fn([ptr{strong_ptr()}] {
CAF_LOG_TRACE("");
ptr->on_cancel();
});
}
bool disposed() const noexcept override {
return buf_ == nullptr;
void on_consumer_demand(size_t demand) override {
CAF_LOG_TRACE(CAF_ARG(demand));
ctx_->schedule_fn([ptr{strong_ptr()}, demand] { //
CAF_LOG_TRACE(CAF_ARG(demand));
ptr->on_demand(demand);
});
}
void ref_producer() const noexcept final {
this->ref();
}
void deref_producer() const noexcept final {
this->deref();
}
private:
......
......@@ -233,6 +233,8 @@ bool scheduled_actor::cleanup(error&& fail_state, execution_unit* host) {
get_downstream_queue().cleanup();
// Cancel any active flow.
while (!watched_disposables_.empty()) {
CAF_LOG_DEBUG("clean up" << watched_disposables_.size()
<< "remaining disposables");
for (auto& ptr : watched_disposables_)
ptr.dispose();
watched_disposables_.clear();
......@@ -1243,6 +1245,7 @@ void scheduled_actor::run_actions() {
}
void scheduled_actor::update_watched_disposables() {
CAF_LOG_TRACE("");
auto disposed = [](auto& hdl) { return hdl.disposed(); };
auto& xs = watched_disposables_;
if (auto e = std::remove_if(xs.begin(), xs.end(), disposed); e != xs.end()) {
......
......@@ -27,13 +27,16 @@ SCENARIO("merge operators combine inputs") {
GIVEN("two observables") {
WHEN("merging them to a single publisher") {
THEN("the observer receives the output of both sources") {
auto on_complete_called = false;
auto outputs = std::vector<int>{};
auto r1 = ctx->make_observable().repeat(11).take(113);
auto r2 = ctx->make_observable().repeat(22).take(223);
flow::merge(std::move(r1), std::move(r2)).for_each([&outputs](int x) {
outputs.emplace_back(x);
});
flow::merge(std::move(r1), std::move(r2))
.for_each([&outputs](int x) { outputs.emplace_back(x); },
[](const error& err) { FAIL("unexpected error:" << err); },
[&on_complete_called] { on_complete_called = true; });
ctx->run();
CHECK(on_complete_called);
if (CHECK_EQ(outputs.size(), 336u)) {
std::sort(outputs.begin(), outputs.end());
CHECK(std::all_of(outputs.begin(), outputs.begin() + 113,
......@@ -46,4 +49,36 @@ SCENARIO("merge operators combine inputs") {
}
}
SCENARIO("mergers can delay shutdown") {
GIVEN("a merger with two inputs and shutdown_on_last_complete set to false") {
WHEN("both inputs completed") {
THEN("the merger only closes after enabling shutdown_on_last_complete") {
auto on_complete_called = false;
auto outputs = std::vector<int>{};
auto merger = make_counted<flow::merger_impl<int>>(ctx.get());
merger->shutdown_on_last_complete(false);
merger->add(ctx->make_observable().repeat(11).take(113));
merger->add(ctx->make_observable().repeat(22).take(223));
merger //
->as_observable()
.for_each([&outputs](int x) { outputs.emplace_back(x); },
[](const error& err) { FAIL("unexpected error:" << err); },
[&on_complete_called] { on_complete_called = true; });
ctx->run();
CHECK(!on_complete_called);
if (CHECK_EQ(outputs.size(), 336u)) {
std::sort(outputs.begin(), outputs.end());
CHECK(std::all_of(outputs.begin(), outputs.begin() + 113,
[](int x) { return x == 11; }));
CHECK(std::all_of(outputs.begin() + 113, outputs.end(),
[](int x) { return x == 22; }));
}
merger->shutdown_on_last_complete(true);
ctx->run();
CHECK(on_complete_called);
}
}
}
}
END_FIXTURE_SCOPE()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment