Commit 3e1a34ff authored by Dominik Charousset's avatar Dominik Charousset Committed by Dominik Charousset

Pass index and queue from WDRR queue to consumer

parent 70289e0d
......@@ -174,6 +174,29 @@ public:
}
};
struct mailbox_visitor {
blocking_actor* self;
bool& done;
receive_cond& rcc;
message_id mid;
detail::blocking_behavior& bhvr;
/// Skips all streaming-related messages.
inline intrusive::task_result
operator()(size_t, mailbox_policy::stream_queue&, mailbox_element&) {
return intrusive::task_result::skip;
}
// Dispatches messages with high and normal priority to the same handler.
template <class Queue>
intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) {
return (*this)(x);
}
// Consumes `x`.
intrusive::task_result operator()(mailbox_element& x);
};
// -- constructors and destructors -------------------------------------------
blocking_actor(actor_config& cfg);
......
......@@ -21,17 +21,10 @@
#include <cstdint>
#include "caf/error.hpp"
#include "caf/fwd.hpp"
#include "caf/exit_reason.hpp"
namespace caf {
class actor_addr;
class message_id;
class local_actor;
class mailbox_element;
} // namespace caf
#include "caf/intrusive/task_result.hpp"
namespace caf {
namespace detail {
......@@ -41,6 +34,13 @@ struct sync_request_bouncer {
explicit sync_request_bouncer(error r);
void operator()(const strong_actor_ptr& sender, const message_id& mid) const;
void operator()(const mailbox_element& e) const;
template <class Key, class Queue>
intrusive::task_result operator()(const Key&, const Queue&,
const mailbox_element& x) const {
(*this)(x);
return intrusive::task_result::resume;
}
};
} // namespace detail
......
......@@ -132,6 +132,14 @@ enum class atom_value : uint64_t;
using actor_id = uint64_t;
// -- intrusive containers -----------------------------------------------------
namespace intrusive {
enum class task_result;
} // namespace intrusive
// -- marker classes for mixins ------------------------------------------------
namespace mixin {
......
......@@ -45,7 +45,7 @@ public:
// -- constructors, destructors, and assignment operators --------------------
drr_queue(const policy_type& p) : super(p), deficit_(0) {
drr_queue(policy_type p) : super(std::move(p)), deficit_(0) {
// nop
}
......
......@@ -61,7 +61,7 @@ public:
// -- constructors, destructors, and assignment operators -------------------
task_queue(const policy_type& p) : old_last_(nullptr), policy_(p) {
task_queue(policy_type p) : old_last_(nullptr), policy_(std::move(p)) {
init();
}
......
......@@ -21,6 +21,7 @@
#define CAF_INTRUSIVE_WDRR_FIXED_MULTIPLEXED_QUEUE_HPP
#include <tuple>
#include <type_traits>
#include <utility>
#include "caf/detail/type_traits.hpp"
......@@ -46,6 +47,9 @@ public:
using task_size_type = typename policy_type::task_size_type;
template <size_t I>
using index = std::integral_constant<size_t, I>;
static constexpr size_t num_queues = sizeof...(Qs) + 1;
template <class... Ps>
......@@ -80,8 +84,7 @@ public:
/// Run a new round with `quantum`, dispatching all tasks to `consumer`.
/// @returns `true` if at least one item was consumed, `false` otherwise.
template <class F>
bool new_round(long quantum,
F& f) noexcept(noexcept(f(std::declval<mapped_type&>()))) {
bool new_round(long quantum, F& f) {
return new_round_recursion<0>(quantum, f) != 0;
}
......@@ -148,10 +151,13 @@ private:
template <size_t I, class F>
detail::enable_if_t<I != num_queues, int>
new_round_recursion(deficit_type quantum,
F& f) noexcept(noexcept(f(std::declval<mapped_type&>()))) {
new_round_recursion(deficit_type quantum, F& f) {
auto& q = std::get<I>(qs_);
if (q.new_round(policy_.quantum(q, quantum), f))
auto g = [&](mapped_type& x) {
index<I> id;
return f(id, q, x);
};
if (q.new_round(policy_.quantum(q, quantum), g))
return 1 + new_round_recursion<I + 1>(quantum, f);
return 0 + new_round_recursion<I + 1>(quantum, f);
}
......
......@@ -144,6 +144,28 @@ public:
/// @endcond
// -- nested classes ---------------------------------------------------------
struct mailbox_visitor {
scheduled_actor* self;
resume_result& result;
size_t& handled_msgs;
size_t max_throughput;
/// Skips all streaming-related messages.
intrusive::task_result operator()(size_t, mailbox_policy::stream_queue&,
mailbox_element&);
// Dispatches messages with high and normal priority to the same handler.
template <class Queue>
intrusive::task_result operator()(size_t, Queue&, mailbox_element& x) {
return (*this)(x);
}
// Consumes `x`.
intrusive::task_result operator()(mailbox_element& x);
};
// -- static helper functions ------------------------------------------------
static void default_error_handler(pointer ptr, error& x);
......
......@@ -148,12 +148,10 @@ void blocking_actor::fail_state(error err) {
fail_state_ = std::move(err);
}
void blocking_actor::receive_impl(receive_cond& rcc,
message_id mid,
detail::blocking_behavior& bhvr) {
CAF_LOG_TRACE(CAF_ARG(mid));
detail::default_invoke_result_visitor<blocking_actor> visitor{this};
bool done = false;
intrusive::task_result
blocking_actor::mailbox_visitor:: operator()(mailbox_element& x) {
CAF_LOG_TRACE(CAF_ARG(x));
CAF_LOG_RECEIVE_EVENT((&x));
auto check_if_done = [&]() -> intrusive::task_result {
// Stop consuming items when reaching the end of the user-defined receive
// loop either via post or pre condition.
......@@ -162,9 +160,6 @@ void blocking_actor::receive_impl(receive_cond& rcc,
done = true;
return intrusive::task_result::stop;
};
// Our mailbox element consumer for the mailbox.
auto f = [&](mailbox_element& x) -> intrusive::task_result {
CAF_LOG_RECEIVE_EVENT((&x));
// Skip messages that don't match our message ID.
if (mid.valid()) {
if (mid != x.mid) {
......@@ -177,18 +172,21 @@ void blocking_actor::receive_impl(receive_cond& rcc,
}
// Automatically unlink from actors after receiving an exit.
if (x.content().match_elements<exit_msg>())
unlink_from(x.content().get_as<exit_msg>(0).source);
self->unlink_from(x.content().get_as<exit_msg>(0).source);
// Blocking actors can nest receives => push/pop `current_element_`
auto prev_element = current_element_;
current_element_ = &x;
auto g = detail::make_scope_guard([&] { current_element_ = prev_element; });
auto prev_element = self->current_element_;
self->current_element_ = &x;
auto g = detail::make_scope_guard([&] {
self->current_element_ = prev_element;
});
// Dispatch on x.
detail::default_invoke_result_visitor<blocking_actor> visitor{self};
switch (bhvr.nested(visitor, x.content())) {
default:
return check_if_done();
case match_case::no_match:
{ // Blocking actors can have fallback handlers for catch-all rules.
auto sres = bhvr.fallback(*current_element_);
auto sres = bhvr.fallback(*self->current_element_);
if (sres.flag != rt_skip) {
visitor.visit(sres);
CAF_LOG_FINALIZE_EVENT();
......@@ -202,7 +200,7 @@ void blocking_actor::receive_impl(receive_cond& rcc,
x.move_content_to_message());
mailbox_element_view<error> tmp{std::move(x.sender), x.mid,
std::move(x.stages), err};
current_element_ = &tmp;
self->current_element_ = &tmp;
bhvr.nested(tmp.content());
CAF_LOG_FINALIZE_EVENT();
return check_if_done();
......@@ -212,8 +210,16 @@ void blocking_actor::receive_impl(receive_cond& rcc,
CAF_LOG_SKIP_EVENT();
return intrusive::task_result::skip;
}
};
}
void blocking_actor::receive_impl(receive_cond& rcc,
message_id mid,
detail::blocking_behavior& bhvr) {
CAF_LOG_TRACE(CAF_ARG(mid));
// Set to `true` by the visitor when done.
bool done = false;
// Make sure each receive sees all mailbox elements.
mailbox_visitor f{this, done, rcc, mid, bhvr};
mailbox().flush_cache();
// Check pre-condition once before entering the message consumption loop. The
// consumer performs any future check on pre and post conditions via
......
......@@ -130,11 +130,7 @@ bool local_actor::cleanup(error&& fail_state, execution_unit* host) {
mailbox_.close();
// TODO: messages that are stuck in the cache can get lost
detail::sync_request_bouncer bounce{fail_state};
auto f = [&](mailbox_element& x) {
bounce(x);
return intrusive::task_result::resume;
};
while (mailbox_.queue().new_round(1000, f))
while (mailbox_.queue().new_round(1000, bounce))
; // nop
}
// tell registry we're done
......
......@@ -218,19 +218,15 @@ void scheduled_actor::intrusive_ptr_release_impl() {
intrusive_ptr_release(ctrl());
}
resumable::resume_result
scheduled_actor::resume(execution_unit* ctx, size_t max_throughput) {
CAF_PUSH_AID(id());
if (!activate(ctx))
return resume_result::done;
size_t handled_msgs = 0;
auto reset_timeout_if_needed = [&] {
if (handled_msgs > 0 && !bhvr_stack_.empty())
request_timeout(bhvr_stack_.back().timeout());
};
auto result = resume_result::awaiting_message;
auto f = [&](mailbox_element& x) -> intrusive::task_result {
switch (reactivate(x)) {
intrusive::task_result scheduled_actor::mailbox_visitor::
operator()(size_t, mailbox_policy::stream_queue&, mailbox_element&) {
// TODO: implement me
return intrusive::task_result::resume;
}
intrusive::task_result
scheduled_actor::mailbox_visitor::operator()(mailbox_element& x) {
switch (self->reactivate(x)) {
case activation_result::terminated:
result = resume_result::done;
return intrusive::task_result::stop;
......@@ -243,7 +239,20 @@ scheduled_actor::resume(execution_unit* ctx, size_t max_throughput) {
default:
return intrusive::task_result::resume;
}
}
resumable::resume_result
scheduled_actor::resume(execution_unit* ctx, size_t max_throughput) {
CAF_PUSH_AID(id());
if (!activate(ctx))
return resume_result::done;
size_t handled_msgs = 0;
auto reset_timeout_if_needed = [&] {
if (handled_msgs > 0 && !bhvr_stack_.empty())
request_timeout(bhvr_stack_.back().timeout());
};
auto result = resume_result::awaiting_message;
mailbox_visitor f{this, result, handled_msgs, max_throughput};
mailbox_element_ptr ptr;
while (handled_msgs < max_throughput) {
if (!mailbox_.new_round(3, f)) {
......@@ -407,15 +416,15 @@ invoke_message_result scheduled_actor::consume(mailbox_element& x) {
auto ordinary_invoke = [](ptr_t, behavior& f, mailbox_element& in) -> bool {
return f(in.content()) != none;
};
auto stream_invoke = [](ptr_t, behavior&, mailbox_element&) -> bool {
/*
auto stream_invoke = [](ptr_t, behavior&, mailbox_element&) -> bool {
// The only legal stream message in a response is `stream_open`.
auto& var = in.content().get_as<stream_msg>(0).content;
if (holds_alternative<stream_msg::open>(var))
return p->handle_stream_msg(in, &f);
*/
return false;
};
*/
auto select_invoke_fun = [&]() -> fun_t {
return ordinary_invoke;
/*
......
......@@ -90,6 +90,19 @@ using queue_type = wdrr_fixed_multiplexed_queue<inode_policy,
nested_queue_type,
nested_queue_type>;
struct fetch_helper {
std::string result;
template <size_t I, class Queue>
void operator()(std::integral_constant<size_t, I>, const Queue&, inode& x) {
if (!result.empty())
result += ',';
result += to_string(I);
result += ':';
result += to_string(x);
};
};
struct fixture {
inode_policy policy;
queue_type queue{policy, policy, policy, policy};
......@@ -104,16 +117,17 @@ struct fixture {
fill(q, xs...);
}
std::string seq;
std::function<void (inode&)> f;
fixture() {
f = [&](inode& x) {
if (!seq.empty())
seq += ',';
seq += to_string(x);
std::string fetch(int quantum) {
std::string result;
auto f = [&](size_t id, drr_queue<inode_policy>&, inode& x) {
if (!result.empty())
result += ',';
result += to_string(id);
result += ':';
result += to_string(x);
};
queue.new_round(quantum, f);
return result;
}
};
......@@ -128,21 +142,22 @@ CAF_TEST(default_constructed) {
CAF_TEST(new_round) {
fill(queue, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12);
// Allow f to consume 2 items per nested queue.
fetch_helper f;
auto round_result = queue.new_round(2, f);
CAF_CHECK_EQUAL(round_result, true);
CAF_CHECK_EQUAL(seq, "3,6,1,4,2,5");
CAF_CHECK_EQUAL(f.result, "0:3,0:6,1:1,1:4,2:2,2:5");
CAF_REQUIRE_EQUAL(queue.empty(), false);
// Allow f to consume one more item from each queue.
seq.clear();
f.result.clear();
round_result = queue.new_round(1, f);
CAF_CHECK_EQUAL(round_result, true);
CAF_CHECK_EQUAL(seq, "9,7,8");
CAF_CHECK_EQUAL(f.result, "0:9,1:7,2:8");
CAF_REQUIRE_EQUAL(queue.empty(), false);
// Allow f to consume the remainder, i.e., 12.
seq.clear();
f.result.clear();
round_result = queue.new_round(1000, f);
CAF_CHECK_EQUAL(round_result, true);
CAF_CHECK_EQUAL(seq, "12");
CAF_CHECK_EQUAL(f.result, "0:12");
CAF_REQUIRE_EQUAL(queue.empty(), true);
}
......@@ -150,21 +165,13 @@ CAF_TEST(priorities) {
queue.policy().enable_priorities = true;
fill(queue, 1, 2, 3, 4, 5, 6, 7, 8, 9);
// Allow f to consume 2 items from the high priority and 1 item otherwise.
auto round_result = queue.new_round(1, f);
CAF_CHECK_EQUAL(round_result, true);
CAF_CHECK_EQUAL(seq, "3,6,1,2");
CAF_CHECK_EQUAL(fetch(1), "0:3,0:6,1:1,2:2");
CAF_REQUIRE_EQUAL(queue.empty(), false);
// Drain the high-priority queue with one item left per other queue.
seq.clear();
round_result = queue.new_round(1, f);
CAF_CHECK_EQUAL(round_result, true);
CAF_CHECK_EQUAL(seq, "9,4,5");
CAF_CHECK_EQUAL(fetch(1), "0:9,1:4,2:5");
CAF_REQUIRE_EQUAL(queue.empty(), false);
// Drain queue.
seq.clear();
round_result = queue.new_round(1000, f);
CAF_CHECK_EQUAL(round_result, true);
CAF_CHECK_EQUAL(seq, "7,8");
CAF_CHECK_EQUAL(fetch(1000), "1:7,2:8");
CAF_REQUIRE_EQUAL(queue.empty(), true);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment