Commit 406f2f1e authored by Dominik Charousset's avatar Dominik Charousset

Base pattern matching on type-erased tuples

parent fa5ad15f
......@@ -38,12 +38,14 @@ struct meta_element {
atom_value v;
uint16_t typenr;
const std::type_info* type;
bool (*fun)(const meta_element&, const message&, size_t, void**);
bool (*fun)(const meta_element&, const type_erased_tuple*, size_t, void**);
};
bool match_element(const meta_element&, const message&, size_t, void**);
bool match_element(const meta_element&, const type_erased_tuple*,
size_t, void**);
bool match_atom_constant(const meta_element&, const message&, size_t, void**);
bool match_atom_constant(const meta_element&, const type_erased_tuple*,
size_t, void**);
template <class T, uint16_t TN = detail::type_nr<T>::value>
struct meta_element_factory {
......@@ -78,7 +80,7 @@ struct meta_elements<type_list<Ts...>> {
}
};
bool try_match(const message& msg, const meta_element* pattern_begin,
bool try_match(const type_erased_tuple* xs, const meta_element* pattern_begin,
size_t pattern_size, void** out);
} // namespace detail
......
......@@ -55,7 +55,7 @@ public:
virtual ~match_case();
virtual result invoke(detail::invoke_result_visitor&, message&) = 0;
virtual result invoke(detail::invoke_result_visitor&, type_erased_tuple*) = 0;
inline uint32_t type_token() const {
return token_;
......@@ -65,18 +65,6 @@ private:
uint32_t token_;
};
struct match_case_zipper {
template <class F, typename T>
auto operator()(const F& fun, T& arg) -> decltype(fun(arg)) const {
return fun(arg);
}
// forward everything as reference if no guard/transformation is set
template <class T>
auto operator()(const unit_t&, T& arg) const -> decltype(std::ref(arg)) {
return std::ref(arg);
}
};
template <class T>
T&& unopt(T&& v,
typename std::enable_if<std::is_rvalue_reference<T&&>::value>::type* = 0) {
......@@ -188,21 +176,24 @@ public:
// nop
}
match_case::result invoke(detail::invoke_result_visitor& f, message& msg) override {
intermediate_tuple it{msg.shared()};
match_case::result invoke(detail::invoke_result_visitor& f,
type_erased_tuple* xs) override {
intermediate_tuple it{xs ? xs->shared() : false};
detail::meta_elements<pattern> ms;
message tmp;
// check if try_match() reports success
if (! detail::try_match(msg, ms.arr.data(), ms.arr.size(), it.data))
if (! detail::try_match(xs, ms.arr.data(), ms.arr.size(), it.data))
return match_case::no_match;
// detach msg before invoking fun_ if needed
if (is_manipulator && it.shared_access) {
msg.force_unshare();
tmp = message::from(xs);
tmp.force_unshare();
it.shared_access = false;
// update pointers in our intermediate tuple
for (size_t i = 0; i < msg.size(); ++i) {
for (size_t i = 0; i < tmp.size(); ++i) {
// msg is guaranteed to be detached, hence we don't need to
// check this condition over and over again via get_mutable
it[i] = const_cast<void*>(msg.at(i));
it[i] = const_cast<void*>(tmp.at(i));
}
}
lfinvoker<std::is_same<result_type, void>::value, F> fun{fun_};
......
......@@ -108,7 +108,6 @@ public:
}
};
/// @relates type_erased_tuple
template <class Processor>
typename std::enable_if<Processor::is_saving::value>::type
......
......@@ -94,7 +94,7 @@ match_case::result behavior_impl::invoke(detail::invoke_result_visitor& f,
auto msg_token = msg.type_token();
for (auto i = begin_; i != end_; ++i)
if (i->type_token == msg_token)
switch (i->ptr->invoke(f, msg)) {
switch (i->ptr->invoke(f, msg.cvals().get())) {
case match_case::no_match:
break;
case match_case::match:
......
......@@ -24,30 +24,23 @@ namespace detail {
using pattern_iterator = const meta_element*;
bool is_wildcard(const meta_element& me) {
return me.typenr == 0 && me.type == nullptr;
}
bool match_element(const meta_element& me, const message& msg,
bool match_element(const meta_element& me, const type_erased_tuple* xs,
size_t pos, void** storage) {
CAF_ASSERT(me.typenr != 0 || me.type != nullptr);
if (! msg.match_element(pos, me.typenr, me.type)) {
if (! xs->matches(pos, me.typenr, me.type))
return false;
}
*storage = const_cast<void*>(msg.at(pos));
*storage = const_cast<void*>(xs->get(pos));
return true;
}
bool match_atom_constant(const meta_element& me, const message& msg,
bool match_atom_constant(const meta_element& me, const type_erased_tuple* xs,
size_t pos, void** storage) {
CAF_ASSERT(me.typenr == detail::type_nr<atom_value>::value);
if (! msg.match_element(pos, detail::type_nr<atom_value>::value, nullptr)) {
if (! xs->matches(pos, detail::type_nr<atom_value>::value, nullptr))
return false;
}
auto ptr = msg.at(pos);
if (me.v != *reinterpret_cast<const atom_value*>(ptr)) {
auto ptr = xs->get(pos);
if (me.v != *reinterpret_cast<const atom_value*>(ptr))
return false;
}
// This assignment casts `atom_value` to `atom_constant<V>*`.
// This type violation could theoretically cause undefined behavior.
// However, `uti` does have an address that is guaranteed to be valid
......@@ -58,81 +51,19 @@ bool match_atom_constant(const meta_element& me, const message& msg,
return true;
}
class set_commit_rollback {
public:
using pointer = void**;
set_commit_rollback(const set_commit_rollback&) = delete;
set_commit_rollback& operator=(const set_commit_rollback&) = delete;
explicit set_commit_rollback(pointer ptr)
: data_(ptr),
pos_(0),
fallback_pos_(0) {
// nop
}
inline void inc() {
++pos_;
}
inline pointer current() const {
return &data_[pos_];
}
inline void commit() {
fallback_pos_ = pos_;
}
inline void rollback() {
pos_ = fallback_pos_;
}
private:
pointer data_;
size_t pos_;
size_t fallback_pos_;
};
bool try_match(const message& msg, size_t msg_pos, size_t msg_size,
pattern_iterator pbegin, pattern_iterator pend,
set_commit_rollback& storage) {
while (msg_pos < msg_size) {
if (pbegin == pend) {
bool try_match(const type_erased_tuple* xs,
pattern_iterator iter, size_t ps, void** out) {
CAF_ASSERT(out != nullptr);
if (! xs)
return ps == 0;
if (xs->size() != ps)
return false;
}
if (is_wildcard(*pbegin)) {
// perform submatching
++pbegin;
// always true at the end of the pattern
if (pbegin == pend) {
return true;
}
// safe current mapping as fallback
storage.commit();
// iterate over remaining values until we found a match
for (; msg_pos < msg_size; ++msg_pos) {
if (try_match(msg, msg_pos, msg_size, pbegin, pend, storage)) {
return true;
}
// restore mapping to fallback (delete invalid mappings)
storage.rollback();
}
return false; // no submatch found
}
for (size_t i = 0; i < ps; ++i, ++iter, ++out)
// inspect current element
if (! pbegin->fun(*pbegin, msg, msg_pos, storage.current())) {
if (! iter->fun(*iter, xs, i, out))
// type mismatch
return false;
}
// next iteration
storage.inc();
++msg_pos;
++pbegin;
}
// we found a match if we've inspected each element and consumed
// the whole pattern (or the remainder consists of wildcards only)
return std::all_of(pbegin, pend, is_wildcard);
}
bool try_match(const message& msg, pattern_iterator pb, size_t ps, void** out) {
CAF_ASSERT(out != nullptr);
CAF_ASSERT(msg.empty() || msg.vals()->get_reference_count() > 0);
set_commit_rollback scr{out};
return try_match(msg, 0, msg.size(), pb, pb + ps, scr);
return true;
}
} // namespace detail
......
......@@ -21,6 +21,8 @@
#include "caf/config.hpp"
#include "caf/detail/dynamic_message_data.hpp"
namespace caf {
type_erased_tuple::~type_erased_tuple() {
......
......@@ -115,7 +115,8 @@ CAF_TEST(message_lifetime_in_scoped_actor) {
self->send(self, msg);
self->receive(
[&](int& value) {
CAF_CHECK_EQUAL(msg.cvals()->get_reference_count(), 1u);
CAF_CHECK_EQUAL(msg.cvals()->get_reference_count(), 2u);
CAF_CHECK_NOT_EQUAL(&value, msg.at(0));
value = 10;
}
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment