Commit 919fa7f0 authored by Joseph Noir's avatar Joseph Noir

changed opencl spawn

allows user to define type mappings
parent d9bffea7
......@@ -32,7 +32,7 @@
#ifndef CPPA_OPENCL_ACTOR_FACADE_HPP
#define CPPA_OPENCL_ACTOR_FACADE_HPP
#include <iostream>
#include <ostream>
#include <stdexcept>
#include "cppa/cppa.hpp"
......@@ -67,6 +67,7 @@ class actor_facade<Ret(Args...)> : public actor {
public:
/*
actor_facade(command_dispatcher* dispatcher,
const program& prog,
const char* kernel_name)
......@@ -92,16 +93,41 @@ class actor_facade<Ret(Args...)> : public actor {
CPPA_LOG_TRACE("new actor facde with ID " << this->id());
if(m_local_dimensions .size() > 3 ||
m_global_dimensions.size() > 3) {
throw std::runtime_error("[!!!] Creating actor facade:"
" a maximum of 3 dimensions allowed");
std::ostringstream oss;
oss << "Creating actor facade: a maximum of 3 dimensions allowed.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
init_kernel(m_program.get(), kernel_name);
}
*/
void enqueue(const actor_ptr& sender, any_tuple msg) {
CPPA_LOG_TRACE("actor_facade::enqueue()");
typename util::il_indices<util::type_list<Args...>>::type indices;
enqueue_impl(sender, msg, message_id{}, indices);
actor_facade(command_dispatcher* dispatcher,
const program& prog,
const char* kernel_name,
std::vector<size_t> global_dimensions,
std::vector<size_t> local_dimensions,
std::function<option<cow_tuple<typename util::rm_ref<Args>::type...>>(any_tuple)> map_args,
std::function<any_tuple(Ret&)> map_result)
//std::function<option<cow_tuple<Args...>>(any_tuple)> map_args,
//std::function<any_tuple(Ret& result)> map_result)
: m_program(prog.m_program)
, m_context(prog.m_context)
, m_dispatcher(dispatcher)
, m_global_dimensions(std::move(global_dimensions))
, m_local_dimensions(std::move(local_dimensions))
, m_map_args(std::move(map_args))
, m_map_result(std::move(map_result))
{
CPPA_LOG_TRACE("new actor facde with ID " << this->id());
if(m_local_dimensions .size() > 3 ||
m_global_dimensions.size() > 3) {
std::ostringstream oss;
oss << "Creating actor facade: a maximum of 3 dimensions allowed.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
init_kernel(m_program.get(), kernel_name);
}
void sync_enqueue(const actor_ptr& sender, message_id id, any_tuple msg) {
......@@ -110,6 +136,13 @@ class actor_facade<Ret(Args...)> : public actor {
enqueue_impl(sender, msg, id, indices);
}
void enqueue(const actor_ptr& sender, any_tuple msg) {
CPPA_LOG_TRACE("actor_facade::enqueue()");
typename util::il_indices<util::type_list<Args...>>::type indices;
enqueue_impl(sender, msg, message_id{}, indices);
}
private:
void init_kernel(cl_program program, const char* kernel_name) {
......@@ -118,15 +151,19 @@ class actor_facade<Ret(Args...)> : public actor {
kernel_name,
&err));
if (err != CL_SUCCESS) {
throw std::runtime_error("[!!!] clCreateKernel: '"
+ get_opencl_error(err)
+ "'.");
std::ostringstream oss;
oss << "clCreateKernel: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
}
template<long... Is>
void enqueue_impl(const actor_ptr& sender, any_tuple msg, message_id id, util::int_list<Is...>) {
auto opt = tuple_cast<Args...>(msg);
//auto opt = tuple_cast<Args...>(msg);
auto opt = m_map_args(msg);
if (opt) {
response_handle handle{this, sender, id};
size_t number_of_values = 1;
......@@ -134,11 +171,6 @@ class actor_facade<Ret(Args...)> : public actor {
for (auto s : m_global_dimensions) {
number_of_values *= s;
}
// for (auto s : m_local_dimensions) {
// if (s > 0) {
// number_of_values *= s;
// }
// }
}
else {
number_of_values = get<0>(*opt).size();
......@@ -147,11 +179,11 @@ class actor_facade<Ret(Args...)> : public actor {
m_global_dimensions.push_back(1);
}
if (m_global_dimensions.empty() || number_of_values <= 0) {
throw std::runtime_error("[!!!] enqueue: can't handle dimension sizes!");
std::ostringstream oss;
oss << "actor_facade::enqueue() can't handle dimension sizes!";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
// for (auto s : m_global_dimensions) std::cout << "[global] " << s << std::endl;
// for (auto s : m_local_dimensions ) std::cout << "[local ] " << s << std::endl;
// std::cout << "number stuff " << number_of_values << std::endl;
Ret result_buf(number_of_values);
std::vector<mem_ptr> arguments;
add_arguments_to_kernel(arguments,
......@@ -165,11 +197,11 @@ class actor_facade<Ret(Args...)> : public actor {
m_kernel,
arguments,
m_global_dimensions,
m_local_dimensions));
m_local_dimensions,
m_map_result));
}
else {
aout << "*** warning: tuple_cast failed!\n";
// slap caller around with a large fish
CPPA_LOG_ERROR("actor_facade::enqueue() tuple_cast failed.");
}
}
......@@ -181,6 +213,8 @@ class actor_facade<Ret(Args...)> : public actor {
command_dispatcher* m_dispatcher;
std::vector<size_t> m_global_dimensions;
std::vector<size_t> m_local_dimensions;
std::function<option<cow_tuple<Args...>>(any_tuple)> m_map_args;
std::function<any_tuple(Ret& result)> m_map_result;
void add_arguments_to_kernel_rec(args_vec& arguments,
cl_context,
......@@ -192,9 +226,12 @@ class actor_facade<Ret(Args...)> : public actor {
sizeof(cl_mem),
static_cast<void*>(&arguments[i]));
if (err != CL_SUCCESS) {
throw std::runtime_error("[!!!] clSetKernelArg: '"
+ get_opencl_error(err)
+ "'.");
std::ostringstream oss;
oss << "clSetKernelArg: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
}
err = clSetKernelArg(kernel,
......@@ -202,9 +239,12 @@ class actor_facade<Ret(Args...)> : public actor {
sizeof(cl_mem),
static_cast<void*>(&arguments[0]));
if (err != CL_SUCCESS) {
throw std::runtime_error("[!!!] clSetKernelArg: '"
+ get_opencl_error(err)
+ "'.");
std::ostringstream oss;
oss << "clSetKernelArg: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
}
......@@ -221,9 +261,12 @@ class actor_facade<Ret(Args...)> : public actor {
arg0.data(),
&err);
if (err != CL_SUCCESS) {
throw std::runtime_error("[!!!] clCreateBuffer: '"
+ get_opencl_error(err)
+ "'.");
std::ostringstream oss;
oss << "clCreateBuffer: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
else {
mem_ptr tmp;
......@@ -250,9 +293,12 @@ class actor_facade<Ret(Args...)> : public actor {
nullptr,
&err);
if (err != CL_SUCCESS) {
throw std::runtime_error("[!!!] clCreateBuffer: '"
+ get_opencl_error(err)
+ "'.");
std::ostringstream oss;
oss << "clCreateBuffer: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
else {
mem_ptr tmp;
......
......@@ -70,13 +70,15 @@ class command_impl : public command {
kernel_ptr kernel,
std::vector<mem_ptr> arguments,
std::vector<size_t> global_dimensions,
std::vector<size_t> local_dimensions)
std::vector<size_t> local_dimensions,
std::function<any_tuple(T&)> map_result)
: m_number_of_values(1)
, m_handle(handle)
, m_kernel(kernel)
, m_arguments(arguments)
, m_global_dimensions(global_dimensions)
, m_local_dimensions(local_dimensions)
, m_map_result(map_result)
{
m_kernel_event.adopt(cl_event());
for (size_t s : m_global_dimensions) {
......@@ -135,6 +137,7 @@ class command_impl : public command {
std::vector<mem_ptr> m_arguments;
std::vector<size_t> m_global_dimensions;
std::vector<size_t> m_local_dimensions;
std::function<any_tuple (T&)> m_map_result;
void handle_results () {
CPPA_LOG_TRACE("command::handle_results()");
......@@ -157,7 +160,9 @@ class command_impl : public command {
+ get_opencl_error(err)
+ "'.");
}
reply_to(m_handle, results);
auto mapped_result = m_map_result(results);
reply_tuple_to(m_handle, mapped_result);
//reply_to(m_handle, results);
}
};
......
......@@ -37,7 +37,10 @@
#include <algorithm>
#include <functional>
#include "cppa/option.hpp"
#include "cppa/channel.hpp"
#include "cppa/cow_tuple.hpp"
#include "cppa/logging.hpp"
#include "cppa/opencl/global.hpp"
#include "cppa/opencl/command.hpp"
......@@ -56,6 +59,11 @@ struct dereferencer {
inline void operator()(ref_counted* ptr) { ptr->deref(); }
};
template<typename... Ts>
option<cow_tuple<Ts...>> default_map_args(any_tuple msg) {
return tuple_cast<Ts...>(msg);
}
#ifdef CPPA_OPENCL
class command_dispatcher {
......@@ -74,6 +82,58 @@ class command_dispatcher {
void enqueue();
template<typename Ret, typename... Args>
actor_ptr spawn__(const program& prog,
const char* kernel_name,
size_t global_dim_1,
size_t global_dim_2,
size_t global_dim_3,
size_t local_dim_1,
size_t local_dim_2,
size_t local_dim_3,
std::function<option<cow_tuple<typename util::rm_ref<Args>::type...>>(any_tuple)> map_args,
std::function<any_tuple(Ret&)> map_result)
{
std::vector<size_t> local_dims{local_dim_1, local_dim_2, local_dim_3};
auto i = std::find(local_dims.begin(), local_dims.end(), 0);
if (i != local_dims.end()) local_dims.clear();
return new actor_facade<Ret (Args...)>(this,
prog,
kernel_name,
{global_dim_1, global_dim_2, global_dim_3},
local_dims,
map_args,
map_result);
}
template<typename Ret, typename... Args>
actor_ptr spawn(const program& prog,
const char* kernel_name,
size_t global_dim_1,
size_t global_dim_2 = 1,
size_t global_dim_3 = 1,
size_t local_dim_1 = 0,
size_t local_dim_2 = 0,
size_t local_dim_3 = 0)
{
std::function<option<cow_tuple<typename util::rm_ref<Args>::type...>>(any_tuple)> f0 = [] (any_tuple msg) {
return tuple_cast<typename util::rm_ref<Args>::type...>(msg);
};
std::function<any_tuple(Ret&)> f1 = [] (Ret& result) {
return make_any_tuple(std::move(result));
};
return this->spawn__<Ret,Args...>(prog,
kernel_name,
global_dim_1,
global_dim_2,
global_dim_3,
local_dim_1,
local_dim_2,
local_dim_3,
std::move(f0),
std::move(f1));
}
/*
template<typename Ret, typename... Args>
actor_ptr spawn(const program& prog,
const char* kernel_name,
......@@ -135,6 +195,7 @@ class command_dispatcher {
const char* kernel_name) {
return spawn<Ret, Args...>(program::create(kernel_source), kernel_name);
}
*/
private:
......
......@@ -31,6 +31,7 @@
#include <sstream>
#include <iostream>
#include <stdexcept>
#include <algorithm>
#include "cppa/cppa.hpp"
#include "cppa/opencl/command_dispatcher.hpp"
......@@ -77,17 +78,20 @@ struct command_dispatcher::worker {
job->enqueue(cmd_q);
cl_int err{clFlush(cmd_q)};
if (err != CL_SUCCESS) {
throw runtime_error("[!!!] clFlush: '"
+ get_opencl_error(err)
+ "'.");
ostringstream oss;
oss << "clFlush: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
}
catch (exception& e) {
cerr << e.what() << endl;
CPPA_LOG_ERROR("worker loop, what(): " << e.what());
}
}
else {
cout << "worker done" << endl;
CPPA_LOG_TRACE("worker done");
return;
}
}
......@@ -107,7 +111,7 @@ void command_dispatcher::supervisor_loop(command_dispatcher* scheduler,
worker->start();
worker->m_thread.join();
worker.reset();
cout << "supervisor done" << endl;
CPPA_LOG_TRACE("supervisor done");
}
void command_dispatcher::initialize() {
......@@ -121,12 +125,18 @@ void command_dispatcher::initialize() {
cl_uint number_of_platforms;
err = clGetPlatformIDs(ids.size(), ids.data(), &number_of_platforms);
if (err != CL_SUCCESS) {
throw logic_error("[!!!] clGetPlatformIDs: '"
+ get_opencl_error(err)
+ "'.");
ostringstream oss;
oss << "clGetPlatformIDs: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw logic_error(oss.str());
}
else if (number_of_platforms < 1) {
throw logic_error("[!!!] clGetPlatformIDs: 'no platforms found'.");
ostringstream oss;
oss << "clGetPlatformIDs: 'no platforms found'.";
CPPA_LOG_ERROR(oss.str());
throw logic_error(oss.str());
}
/* find gpu devices on our platform */
......@@ -135,57 +145,67 @@ void command_dispatcher::initialize() {
cl_device_type dev_type{CL_DEVICE_TYPE_GPU};
err = clGetDeviceIDs(ids[pid], dev_type, 0, NULL, &num_devices);
if (err == CL_DEVICE_NOT_FOUND) {
cout << "NO GPU DEVICES FOUND! LOOKING FOR CPU DEVICES NOW ..." << endl;
CPPA_LOG_TRACE("No gpu devices found. Looking for cpu devices.");
cout << "No gpu devices found. Looking for cpu devices." << endl;
dev_type = CL_DEVICE_TYPE_CPU;
err = clGetDeviceIDs(ids[pid], dev_type, 0, NULL, &num_devices);
}
if (err != CL_SUCCESS) {
throw runtime_error("[!!!] clGetDeviceIDs: '"
+ get_opencl_error(err)
+ "'.");
ostringstream oss;
oss << "clGetDeviceIDs: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
vector<cl_device_id> devices(num_devices);
err = clGetDeviceIDs(ids[pid], dev_type, num_devices, devices.data(), NULL);
if (err != CL_SUCCESS) {
throw runtime_error("[!!!] clGetDeviceIDs: '"
+ get_opencl_error(err)
+ "'.");
ostringstream oss;
oss << "clGetDeviceIDs: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
/* create a context */
m_context.adopt(clCreateContext(0, 1, devices.data(), NULL, NULL, &err));
if (err != CL_SUCCESS) {
throw runtime_error("[!!!] clCreateContext: '"
+ get_opencl_error(err)
+ "'.");
ostringstream oss;
oss << "clCreateContext: '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
for (auto& d : devices) {
CPPA_LOG_TRACE("Creating command queue for device(s).");
device_ptr device;
device.adopt(d);
unsigned id{++dev_id_gen};
size_t return_size{0};
static constexpr size_t buf_size = 128;
char buf[buf_size];
err = clGetDeviceInfo(device.get(), CL_DEVICE_NAME, buf_size, buf, &return_size);
if (err != CL_SUCCESS) {
ostringstream oss;
oss << "clGetDeviceInfo (CL_DEVICE_NAME): '"
<< get_opencl_error(err)
<< "'.";
CPPA_LOG_ERROR(oss.str());
fill(buf, buf+buf_size, 0);
}
command_queue_ptr cmd_queue;
cmd_queue.adopt(clCreateCommandQueue(m_context.get(),
device.get(),
CL_QUEUE_PROFILING_ENABLE,
&err));
size_t return_size{0};
if (err != CL_SUCCESS) {
static constexpr size_t buf_size = 128;
char buf[buf_size];
err = clGetDeviceInfo(device.get(), CL_DEVICE_NAME, buf_size, buf, &return_size);
if (err == CL_SUCCESS) {
cout << "**** warning: Could not create command queue for device: "
<< buf << "." << endl;
}
else {
cout << "Could not create command queue unknown for device."
<< endl;
}
// ostringstream oss;
// oss << "[!!!] clCreateCommandQueue (" << id << "): '"
// << get_opencl_error(err) << "'.";
// throw runtime_error(oss.str());
CPPA_LOG_DEBUG("Could not create command queue for device "
<< buf << ": '" << get_opencl_error(err)
<< "'.");
}
else {
size_t max_work_group_size{0};
......@@ -200,6 +220,7 @@ void command_dispatcher::initialize() {
<< id
<< ":CL_DEVICE_MAX_WORK_GROUP_SIZE): '"
<< get_opencl_error(err) << "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
cl_uint max_work_item_dimensions = 0;
......@@ -214,6 +235,7 @@ void command_dispatcher::initialize() {
<< id
<< ":CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS): '"
<< get_opencl_error(err) << "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
vector<size_t> max_work_items_per_dim(max_work_item_dimensions);
......@@ -228,6 +250,7 @@ void command_dispatcher::initialize() {
<< id
<< ":CL_DEVICE_MAX_WORK_ITEM_SIZES): '"
<< get_opencl_error(err) << "'.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
device_info dev_info{id,
......@@ -243,6 +266,7 @@ void command_dispatcher::initialize() {
ostringstream oss;
oss << "[!!!] Could not create a command queue for "
<< "any of the present devices.";
CPPA_LOG_ERROR(oss.str());
throw runtime_error(oss.str());
}
else {
......
......@@ -89,10 +89,13 @@ program program::create(const char* kernel_source) {
build_log.data(),
NULL);
build_log[ret_val_size] = '\0';
throw std::runtime_error("[!!!] clBuildProgram: '"
+ get_opencl_error(err)
+ "'. Build log: "
+ build_log.data());
std::ostringstream oss;
oss << "clBuildProgram: '"
<< get_opencl_error(err)
<< "'. Build log: "
<< build_log.data();
CPPA_LOG_ERROR(oss.str());
throw std::runtime_error(oss.str());
}
else {
#ifdef CPPA_DEBUG
......@@ -121,9 +124,7 @@ program program::create(const char* kernel_source) {
build_log.data(),
NULL);
build_log[ret_val_size] = '\0';
std::cout << "clBuildProgram log: '"
<< build_log.data()
<< std::endl;
CPPA_LOG_DEBUG("clBuildProgram log: '" << build_log.data());
#endif
}
return {cptr, pptr};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment