Commit c5c80453 authored by Joseph Noir's avatar Joseph Noir

changed test for opencl actor

Test spawn_cl with and without map functions.
parent ef0d3e1f
#include <vector> #include <vector>
#include <iomanip> #include <iomanip>
#include <cassert>
#include <iostream>
#include <algorithm>
#include "test.hpp" #include "test.hpp"
#include "cppa/cppa.hpp" #include "cppa/cppa.hpp"
#include "cppa/opencl/program.hpp" #include "cppa/opencl.hpp"
#include "cppa/opencl/actor_facade.hpp"
#include "cppa/opencl/command_dispatcher.hpp"
using namespace std; using namespace std;
using namespace cppa; using namespace cppa;
using namespace cppa::opencl; using namespace cppa::opencl;
namespace { constexpr const char* kernel_source = R"__( namespace {
__kernel void matrix(__global float* matrix1,
__global float* matrix2, using ivec = vector<int>;
__global float* output) {
int size = get_global_size(0); // == get_global_size_(1); constexpr size_t matrix_size = 4;
int x = get_global_id(0); constexpr const char* kernel_name = "matrix_square";
int y = get_global_id(1);
int idx = 0; constexpr const char* kernel_source = R"__(
float result = 0; __kernel void matrix_square(__global int* matrix,
while (idx < size) { __global int* output) {
float i = matrix1[idx+y*size]; // we only use square matrices, hence: width == height
float j = matrix2[x+idx*size]; size_t size = get_global_size(0); // == get_global_size_(1);
float tmp = i*j; size_t x = get_global_id(0);
result = result + tmp; size_t y = get_global_id(1);
++idx; int result = 0;
for (size_t idx = 0; idx < size; ++idx) {
result += matrix[idx + y * size] * matrix[x + idx * size];
} }
output[x+y*size] = result; output[x+y*size] = result;
} }
)__";
}
template<size_t Size>
class square_matrix {
public:
static constexpr size_t num_elements = Size * Size;
square_matrix(square_matrix&&) = default;
square_matrix(const square_matrix&) = default;
square_matrix& operator=(square_matrix&&) = default;
square_matrix& operator=(const square_matrix&) = default;
square_matrix() : m_data(num_elements) { }
explicit square_matrix(ivec d) : m_data(move(d)) {
assert(m_data.size() == num_elements);
}
__kernel void dimensions(__global float* dummy, __global float* output) { inline float& operator()(size_t row, size_t column) {
int g_size = get_global_size(0); // == get_global_size_(1); return m_data[row + column * Size];
int g_dim_0 = get_global_id(0);
int g_dim_1 = get_global_id(1);
int l_dim_0 = get_local_id(0);
int l_dim_1 = get_local_id(1);
int g_off_0 = get_global_offset(0);
int g_off_1 = get_global_offset(1);
output[(g_dim_0-g_off_0)+(g_dim_1-g_off_1)*g_size] =
g_dim_0 * 10.0f + g_dim_1 * 1.0f + l_dim_0 * 0.1f + l_dim_1* 0.01f;
} }
)__"; }
int main() { inline const float& operator()(size_t row, size_t column) const {
CPPA_TEST(test_opencl); return m_data[row + column * Size];
}
inline void iota_fill() {
iota(m_data.begin(), m_data.end(), 0);
}
typedef typename ivec::const_iterator const_iterator;
announce<vector<int>>(); const_iterator begin() const { return m_data.begin(); }
announce<vector<float>>();
size_t size{6}; const_iterator end() const { return m_data.end(); }
auto prog = program::create(kernel_source);
command_dispatcher* disp = ivec& data() { return m_data; }
cppa::detail::singleton_manager::get_command_dispatcher();
auto matrix_global = disp->spawn<vector<float>,
vector<float>,
vector<float>>(prog, "matrix",
{size, size});
vector<float> m1(size * size); const ivec& data() const { return m_data; }
vector<float> m2(size * size);
iota(m1.begin(), m1.end(), 0.0); private:
iota(m2.begin(), m2.end(), 0.0);
send(matrix_global, m1, m2); ivec m_data;
};
template<size_t Size>
inline bool operator==(const square_matrix<Size>& lhs,
const square_matrix<Size>& rhs) {
return equal(lhs.begin(), lhs.end(), rhs.begin());
}
template<size_t Size>
inline bool operator!=(const square_matrix<Size>& lhs,
const square_matrix<Size>& rhs) {
return !(lhs == rhs);
}
using matrix_type = square_matrix<matrix_size>;
int main() {
CPPA_TEST(test_opencl);
announce<ivec>();
announce<matrix_type>();
const ivec expected1{ 56, 62, 68, 74
,152,174,196,218
,248,286,324,362
,344,398,452,506};
auto worker1 = spawn_cl<ivec(ivec&)>(kernel_source, kernel_name,
{matrix_size,matrix_size});
ivec m1(matrix_size * matrix_size);
iota(m1.begin(), m1.end(), 0);
send(worker1, move(m1));
receive ( receive (
on_arg_match >> [&] (const vector<float>&) { on_arg_match >> [&] (const ivec& result) {
cout << "done!" << endl; CPPA_CHECK(equal(begin(expected1), end(expected1), begin(result)));
// cout << "results:" << endl;
// for (unsigned y{0}; y < size; ++y) {
// for (unsigned x{0}; x < size; ++x) {
// cout << fixed << setprecision(2) << setw(8) << result[x+y*size];
// }
// cout << endl;
// }
},
others() >> [=]() {
cout << "Unexpected message: '"
<< to_string(self->last_dequeued()) << "'.\n";
} }
); );
// cout << endl; CPPA_CHECKPOINT();
// auto matrix_local = disp->spawn<vector<float>,
// vector<float>>(prog, "dimensions", const matrix_type expected2(move(expected1));
// {size, size}, matrix_type m2;
// {1,2}, m2.iota_fill();
// {(size/3), (size/2)}); auto worker2 = spawn_cl(kernel_source, kernel_name,
[] (any_tuple msg) -> option<cow_tuple<ivec>> {
// m1.clear(); auto opt = tuple_cast<matrix_type>(msg);
// m1.push_back(0.0); // dummy if (opt) {
return {move(get_ref<0>(*opt).data())};
// send(matrix_local, m1); }
return {};
// receive ( },
// on_arg_match >> [&] (const vector<float>& result) { [] (ivec& result) -> any_tuple {
// cout << "dimenson example: " << endl; return make_any_tuple(matrix_type{move(result)});
// for (unsigned y{0}; y < size; ++y) { },
// for (unsigned x{0}; x < size; ++x) { {matrix_size, matrix_size}
// cout << fixed << setprecision(2) << setw(6) << result[x+y*size]; );
// } send(worker2, move(m2));
// cout << endl; receive (
// } on_arg_match >> [&] (const matrix_type& result) {
// }, CPPA_CHECK(expected2 == result);
// others() >> [=]() { }
// cout << "Unexpected message: '" );
// << to_string(self->last_dequeued()) << "'.\n";
// }
// );
cppa::await_all_others_done(); cppa::await_all_others_done();
cppa::shutdown(); cppa::shutdown();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment