Commit 21dd89ba authored by Dominik Charousset's avatar Dominik Charousset

maintenance

removed unnecessary moves and enforced coding conventions
parent a828a3c7
......@@ -112,9 +112,9 @@ class actor_facade<Ret(Args...)> : public actor {
return new actor_facade<Ret (Args...)>{dispatcher,
kernel,
prog,
std::move(global_dims),
std::move(offsets),
std::move(local_dims),
global_dims,
offsets,
local_dims,
std::move(map_args),
std::move(map_result)};
}
......@@ -160,18 +160,15 @@ class actor_facade<Ret(Args...)> : public actor {
auto opt = m_map_args(msg);
if (opt) {
response_handle handle{this, sender, id.response_id()};
size_t number_of_values{1};
std::for_each(m_global_dimensions.begin(),
Ret result_buf(std::accumulate(m_global_dimensions.begin(),
m_global_dimensions.end(),
[&](const size_t& s) { number_of_values *= s; });
Ret result_buf(number_of_values);
1, std::multiplies<size_t>{}));
std::vector<mem_ptr> arguments;
add_arguments_to_kernel(arguments,
m_context.get(),
m_kernel.get(),
result_buf,
get_ref<Is>(*opt)...);
CPPA_LOG_TRACE("enqueue to dispatcher");
enqueue_to_dispatcher(m_dispatcher,
make_counted<command_impl<Ret>>(handle,
m_kernel,
......@@ -181,9 +178,7 @@ class actor_facade<Ret(Args...)> : public actor {
m_local_dimensions,
m_map_result));
}
else {
CPPA_LOG_ERROR("actor_facade::enqueue() tuple_cast failed.");
}
else { CPPA_LOG_ERROR("actor_facade::enqueue() tuple_cast failed."); }
}
typedef std::vector<mem_ptr> args_vec;
......@@ -202,32 +197,20 @@ class actor_facade<Ret(Args...)> : public actor {
cl_context,
cl_kernel kernel) {
cl_int err{0};
for(unsigned long i{1}; i < arguments.size(); ++i) {
for(size_t i = 1; i < arguments.size(); ++i) {
err = clSetKernelArg(kernel,
(i-1),
sizeof(cl_mem),
static_cast<void*>(&arguments[i]));
if (err != CL_SUCCESS) {
std::ostringstream oss;
oss << "clSetKernelArg: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
return;
}
CPPA_LOG_ERROR_IF(err != CL_SUCCESS,
"clSetKernelArg: " << get_opencl_error(err));
}
err = clSetKernelArg(kernel,
arguments.size()-1,
sizeof(cl_mem),
static_cast<void*>(&arguments[0]));
if (err != CL_SUCCESS) {
std::ostringstream oss;
oss << "clSetKernelArg: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
return;
}
CPPA_LOG_ERROR_IF(err != CL_SUCCESS,
"clSetKernelArg: " << get_opencl_error(err));
}
template<typename T0, typename... Ts>
......@@ -239,25 +222,17 @@ class actor_facade<Ret(Args...)> : public actor {
cl_int err{0};
auto buf = clCreateBuffer(context,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(typename T0::value_type)*arg0.size(),
sizeof(typename T0::value_type) * arg0.size(),
arg0.data(),
&err);
if (err != CL_SUCCESS) {
std::ostringstream oss;
oss << "clCreateBuffer: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
return;
CPPA_LOG_ERROR("clCreateBuffer: " << get_opencl_error(err));
}
else {
mem_ptr tmp;
tmp.adopt(std::move(buf));
arguments.push_back(tmp);
return add_arguments_to_kernel_rec(arguments,
context,
kernel,
args...);
add_arguments_to_kernel_rec(arguments, context, kernel, args...);
}
}
......@@ -271,22 +246,17 @@ class actor_facade<Ret(Args...)> : public actor {
cl_int err{0};
auto buf = clCreateBuffer(context,
CL_MEM_WRITE_ONLY,
sizeof(typename R::value_type)*ret.size(),
sizeof(typename R::value_type) * ret.size(),
nullptr,
&err);
if (err != CL_SUCCESS) {
std::ostringstream oss;
oss << "clCreateBuffer: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
return;
CPPA_LOG_ERROR("clCreateBuffer: " << get_opencl_error(err));
}
else {
mem_ptr tmp;
tmp.adopt(std::move(buf));
arguments.push_back(tmp);
return add_arguments_to_kernel_rec(arguments,
add_arguments_to_kernel_rec(arguments,
context,
kernel,
std::forward<Ts>(args)...);
......
......@@ -33,6 +33,7 @@
#define CPPA_OPENCL_COMMAND_HPP
#include <vector>
#include <numeric>
#include <algorithm>
#include <functional>
......@@ -70,21 +71,21 @@ class command_impl : public command {
command_impl(response_handle handle,
kernel_ptr kernel,
std::vector<mem_ptr> arguments,
const dim_vec& global_dimensions,
const dim_vec& global_offsets,
const dim_vec& local_dimensions,
const dim_vec& global_dims,
const dim_vec& offsets,
const dim_vec& local_dims,
const std::function<any_tuple(T&)>& map_result)
: m_number_of_values(1)
: m_number_of_values(std::accumulate(global_dims.begin(),
global_dims.end(),
1, std::multiplies<size_t>{}))
, m_handle(handle)
, m_kernel(kernel)
, m_arguments(arguments)
, m_global_dimensions(global_dimensions)
, m_global_offsets(global_offsets)
, m_local_dimensions(local_dimensions)
, m_global_dims(global_dims)
, m_offsets(offsets)
, m_local_dims(local_dims)
, m_map_result(map_result)
{
m_kernel_event.adopt(cl_event());
for (auto s : m_global_dimensions) m_number_of_values *= s;
}
void enqueue (command_queue_ptr queue) {
......@@ -92,9 +93,7 @@ class command_impl : public command {
this->ref();
cl_int err{0};
m_queue = queue;
auto ptr = m_kernel_event.get();
auto data_or_nullptr = [](const dim_vec& vec) {
return vec.empty() ? nullptr : vec.data();
};
......@@ -102,10 +101,10 @@ class command_impl : public command {
/* enqueue kernel */
err = clEnqueueNDRangeKernel(m_queue.get(),
m_kernel.get(),
m_global_dimensions.size(),
data_or_nullptr(m_global_offsets),
data_or_nullptr(m_global_dimensions),
data_or_nullptr(m_local_dimensions),
m_global_dims.size(),
data_or_nullptr(m_offsets),
data_or_nullptr(m_global_dims),
data_or_nullptr(m_local_dims),
0,
nullptr,
&ptr);
......@@ -116,9 +115,6 @@ class command_impl : public command {
err = clSetEventCallback(ptr,
CL_COMPLETE,
[](cl_event, cl_int, void* data) {
CPPA_LOGC_TRACE("command_impl",
"enqueue",
"command::enqueue()::callback()");
auto cmd = reinterpret_cast<command_impl*>(data);
cmd->handle_results();
cmd->deref();
......@@ -138,14 +134,12 @@ class command_impl : public command {
event_ptr m_kernel_event;
command_queue_ptr m_queue;
std::vector<mem_ptr> m_arguments;
dim_vec m_global_dimensions;
dim_vec m_global_offsets;
dim_vec m_local_dimensions;
dim_vec m_global_dims;
dim_vec m_offsets;
dim_vec m_local_dims;
std::function<any_tuple (T&)> m_map_result;
void handle_results () {
CPPA_LOG_TRACE("command::handle_results()");
/* get results from gpu */
cl_int err{0};
cl_event read_event;
T result(m_number_of_values);
......@@ -160,9 +154,8 @@ class command_impl : public command {
&read_event);
clReleaseEvent(read_event);
if (err != CL_SUCCESS) {
throw std::runtime_error("[!!!] clEnqueueReadBuffer: '"
+ get_opencl_error(err)
+ "'.");
throw std::runtime_error("clEnqueueReadBuffer: "
+ get_opencl_error(err));
}
auto mapped_result = m_map_result(result);
reply_tuple_to(m_handle, mapped_result);
......
......@@ -87,7 +87,7 @@ class command_dispatcher {
actor_ptr spawn(const program& prog,
const char* kernel_name,
const dim_vec& global_dims,
const dim_vec& global_offs,
const dim_vec& offsets,
const dim_vec& local_dims,
std::function<option<cow_tuple<typename util::rm_ref<Args>::type...>>(any_tuple)> map_args,
std::function<any_tuple(Ret&)> map_result)
......@@ -96,7 +96,7 @@ class command_dispatcher {
prog,
kernel_name,
global_dims,
global_offs,
offsets,
local_dims,
std::move(map_args),
std::move(map_result));
......@@ -106,7 +106,7 @@ class command_dispatcher {
actor_ptr spawn(const program& prog,
const char* kernel_name,
const dim_vec& global_dims,
const dim_vec& global_offs = {},
const dim_vec& offsets = {},
const dim_vec& local_dims = {})
{
std::function<option<cow_tuple<typename util::rm_ref<Args>::type...>>(any_tuple)>
......@@ -118,9 +118,9 @@ class command_dispatcher {
};
return this->spawn<Ret,Args...>(prog,
kernel_name,
std::move(global_dims),
std::move(global_offs),
std::move(local_dims),
global_dims,
offsets,
local_dims,
std::move(map_args),
std::move(map_result));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment