Commit 676fc15c authored by Joseph Noir's avatar Joseph Noir

removed command dispatcher to reduce overhead

Added opencl_metainfo to handle opencl context and devices, moved
spawn implementation to opencl.hpp header. Adjusted other classes.
parent 861be5e0
......@@ -56,9 +56,9 @@
namespace cppa { namespace opencl {
class command_dispatcher;
class opencl_metainfo;
void enqueue_to_dispatcher(command_dispatcher*, command_ptr);
command_queue_ptr get_command_queue(uint32_t);
template<typename Signature>
class actor_facade;
......@@ -66,20 +66,21 @@ class actor_facade;
template<typename Ret, typename... Args>
class actor_facade<Ret(Args...)> : public actor {
friend class command_impl<actor_facade, Ret>;
public:
typedef cow_tuple<typename util::rm_const_and_ref<Args>::type...> args_tuple;
typedef std::function<option<args_tuple>(any_tuple)> arg_mapping;
typedef std::function<any_tuple(Ret&)> result_mapping;
static actor_facade* create(command_dispatcher* dispatcher,
const program& prog,
static intrusive_ptr<actor_facade> create(const program& prog,
const char* kernel_name,
arg_mapping map_args,
result_mapping map_result,
const dim_vec& global_dims,
const dim_vec& offsets,
const dim_vec& local_dims,
arg_mapping map_args,
result_mapping map_result) {
const dim_vec& local_dims) {
if (global_dims.empty()) {
auto str = "OpenCL kernel needs at least 1 global dimension.";
CPPA_LOGM_ERROR(detail::demangle(typeid(actor_facade)).c_str(), str);
......@@ -107,8 +108,7 @@ class actor_facade<Ret(Args...)> : public actor {
CPPA_LOGM_ERROR(detail::demangle<actor_facade>().c_str(), oss.str());
throw std::runtime_error(oss.str());
}
return new actor_facade<Ret (Args...)>{dispatcher,
kernel,
return new actor_facade<Ret (Args...)>{kernel,
prog,
global_dims,
offsets,
......@@ -125,8 +125,7 @@ class actor_facade<Ret(Args...)> : public actor {
private:
actor_facade(command_dispatcher* dispatcher,
kernel_ptr kernel,
actor_facade(kernel_ptr kernel,
const program& prog,
const dim_vec& global_dimensions,
const dim_vec& global_offsets,
......@@ -136,7 +135,6 @@ class actor_facade<Ret(Args...)> : public actor {
: m_kernel(kernel)
, m_program(prog.m_program)
, m_context(prog.m_context)
, m_dispatcher(dispatcher)
, m_global_dimensions(global_dimensions)
, m_global_offsets(global_offsets)
, m_local_dimensions(local_dimensions)
......@@ -160,14 +158,20 @@ class actor_facade<Ret(Args...)> : public actor {
m_kernel.get(),
ret_size,
get_ref<Is>(*opt)...);
enqueue_to_dispatcher(m_dispatcher,
make_counted<command_impl<Ret>>(handle,
m_kernel,
std::move(arguments),
m_global_dimensions,
m_global_offsets,
m_local_dimensions,
m_map_result));
auto cmd = make_counted<command_impl<actor_facade, Ret>>(handle,
this,
std::move(arguments));
cmd->ref();
cl_command_queue cmd_q = get_command_queue(0).get(); // todo: get the id from program
cmd->enqueue(cmd_q);
clFlush(cmd_q);
// cl_int err{clFlush(cmd_q)};
// if (err != CL_SUCCESS) {
// ostringstream oss;
// oss << "clFlush: " << get_opencl_error(err);
// CPPA_LOGMF(CPPA_ERROR, self, oss.str());
// throw runtime_error(oss.str());
// }
}
else { CPPA_LOGMF(CPPA_ERROR, this, "actor_facade::enqueue() tuple_cast failed."); }
}
......@@ -177,7 +181,6 @@ class actor_facade<Ret(Args...)> : public actor {
kernel_ptr m_kernel;
program_ptr m_program;
context_ptr m_context;
command_dispatcher* m_dispatcher;
dim_vec m_global_dimensions;
dim_vec m_global_offsets;
dim_vec m_local_dimensions;
......
......@@ -63,28 +63,20 @@ class command_dummy : public command {
void enqueue(command_queue_ptr) override { }
};
template<typename T>
template<typename T, typename R>
class command_impl : public command {
public:
command_impl(response_handle handle,
kernel_ptr kernel,
std::vector<mem_ptr> arguments,
const dim_vec& global_dims,
const dim_vec& offsets,
const dim_vec& local_dims,
const std::function<any_tuple(T&)>& map_result)
: m_number_of_values(std::accumulate(global_dims.begin(),
global_dims.end(),
intrusive_ptr<T> af_ptr,
std::vector<mem_ptr> arguments)
: m_number_of_values(std::accumulate(af_ptr->m_global_dimensions.begin(),
af_ptr->m_global_dimensions.end(),
1, std::multiplies<size_t>{}))
, m_handle(handle)
, m_kernel(kernel)
, m_af_ptr(af_ptr)
, m_arguments(move(arguments))
, m_global_dims(global_dims)
, m_offsets(offsets)
, m_local_dims(local_dims)
, m_map_result(map_result)
{
}
......@@ -93,26 +85,26 @@ class command_impl : public command {
this->ref();
cl_int err{0};
m_queue = queue;
auto ptr = m_kernel_event.get();
auto evnt = m_kernel_event.get();
auto data_or_nullptr = [](const dim_vec& vec) {
return vec.empty() ? nullptr : vec.data();
};
/* enqueue kernel */
err = clEnqueueNDRangeKernel(m_queue.get(),
m_kernel.get(),
m_global_dims.size(),
data_or_nullptr(m_offsets),
data_or_nullptr(m_global_dims),
data_or_nullptr(m_local_dims),
m_af_ptr->m_kernel.get(),
m_af_ptr->m_global_dimensions.size(),
data_or_nullptr(m_af_ptr->m_global_offsets),
data_or_nullptr(m_af_ptr->m_global_dimensions),
data_or_nullptr(m_af_ptr->m_local_dimensions),
0,
nullptr,
&ptr);
&evnt);
if (err != CL_SUCCESS) {
throw std::runtime_error("clEnqueueNDRangeKernel: "
+ get_opencl_error(err));
}
err = clSetEventCallback(ptr,
err = clSetEventCallback(evnt,
CL_COMPLETE,
[](cl_event, cl_int, void* data) {
auto cmd = reinterpret_cast<command_impl*>(data);
......@@ -130,23 +122,19 @@ class command_impl : public command {
int m_number_of_values;
response_handle m_handle;
kernel_ptr m_kernel;
intrusive_ptr<T> m_af_ptr;
event_ptr m_kernel_event;
command_queue_ptr m_queue;
std::vector<mem_ptr> m_arguments;
dim_vec m_global_dims;
dim_vec m_offsets;
dim_vec m_local_dims;
std::function<any_tuple (T&)> m_map_result;
void handle_results () {
cl_int err{0};
T result(m_number_of_values);
R result(m_number_of_values);
err = clEnqueueReadBuffer(m_queue.get(),
m_arguments[0].get(),
CL_TRUE,
0,
sizeof(typename T::value_type) * m_number_of_values,
sizeof(typename R::value_type) * m_number_of_values,
result.data(),
0,
nullptr,
......@@ -155,7 +143,7 @@ class command_impl : public command {
throw std::runtime_error("clEnqueueReadBuffer: "
+ get_opencl_error(err));
}
reply_tuple_to(m_handle, m_map_result(result));
reply_tuple_to(m_handle, m_af_ptr->m_map_result(result));
}
};
......
......@@ -16,7 +16,7 @@
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation; either version 2.1 of the License, *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
......@@ -29,106 +29,80 @@
\******************************************************************************/
#ifndef CPPA_OPENCL_COMMAND_DISPATCHER_HPP
#define CPPA_OPENCL_COMMAND_DISPATCHER_HPP
#ifndef OPENCL_METAINFO_HPP
#define OPENCL_METAINFO_HPP
#include <atomic>
#include <vector>
#include <algorithm>
#include <functional>
#include "cppa/option.hpp"
#include "cppa/logging.hpp"
#include "cppa/channel.hpp"
#include "cppa/cow_tuple.hpp"
#include "cppa/cppa.hpp"
#include "cppa/opencl/global.hpp"
#include "cppa/opencl/command.hpp"
#include "cppa/opencl/program.hpp"
#include "cppa/opencl/smart_ptr.hpp"
#include "cppa/opencl/actor_facade.hpp"
#include "cppa/util/limited_vector.hpp"
#include "cppa/detail/singleton_mixin.hpp"
#include "cppa/detail/singleton_manager.hpp"
#include "cppa/intrusive/blocking_single_reader_queue.hpp"
namespace cppa { namespace opencl {
struct dereferencer {
inline void operator()(ref_counted* ptr) { ptr->deref(); }
};
template<typename... Ts>
option<cow_tuple<Ts...>> default_map_args(any_tuple msg) {
return tuple_cast<Ts...>(msg);
}
class command_dispatcher {
struct worker;
friend struct worker;
friend class detail::singleton_manager;
//template<typename Ret, typename... Args>
//actor_ptr spawn(const program& prog,
// const char* kernel_name,
// const dim_vec& global_dims,
// const dim_vec& offsets,
// const dim_vec& local_dims,
// std::function<option<cow_tuple<typename util::rm_const_and_ref<Args>::type...>>(any_tuple)> map_args,
// std::function<any_tuple(Ret&)> map_result)
//{
// return actor_facade<Ret (Args...)>::create(prog,
// kernel_name,
// global_dims,
// offsets,
// local_dims,
// std::move(map_args),
// std::move(map_result));
//}
//template<typename Ret, typename... Args>
//actor_ptr spawn(const program& prog,
// const char* kernel_name,
// const dim_vec& global_dims,
// const dim_vec& offsets = {},
// const dim_vec& local_dims = {})
//{
// std::function<option<cow_tuple<typename util::rm_const_and_ref<Args>::type...>>(any_tuple)>
// map_args = [] (any_tuple msg) {
// return tuple_cast<typename util::rm_const_and_ref<Args>::type...>(msg);
// };
// std::function<any_tuple(Ret&)> map_result = [] (Ret& result) {
// return make_any_tuple(std::move(result));
// };
// return spawn<Ret, Args...>(prog,
// kernel_name,
// global_dims,
// offsets,
// local_dims,
// std::move(map_args),
// std::move(map_result));
//}
class opencl_metainfo {
friend class program;
friend void enqueue_to_dispatcher(command_dispatcher* dispatcher,
command_ptr cmd);
friend class detail::singleton_manager;
friend command_queue_ptr get_command_queue(uint32_t id);
public:
void enqueue();
template<typename Ret, typename... Args>
actor_ptr spawn(const program& prog,
const char* kernel_name,
const dim_vec& global_dims,
const dim_vec& offsets,
const dim_vec& local_dims,
std::function<option<cow_tuple<typename util::rm_const_and_ref<Args>::type...>>(any_tuple)> map_args,
std::function<any_tuple(Ret&)> map_result)
{
return actor_facade<Ret (Args...)>::create(this,
prog,
kernel_name,
global_dims,
offsets,
local_dims,
std::move(map_args),
std::move(map_result));
}
template<typename Ret, typename... Args>
actor_ptr spawn(const program& prog,
const char* kernel_name,
const dim_vec& global_dims,
const dim_vec& offsets = {},
const dim_vec& local_dims = {})
{
std::function<option<cow_tuple<typename util::rm_const_and_ref<Args>::type...>>(any_tuple)>
map_args = [] (any_tuple msg) {
return tuple_cast<typename util::rm_const_and_ref<Args>::type...>(msg);
};
std::function<any_tuple(Ret&)> map_result = [] (Ret& result) {
return make_any_tuple(std::move(result));
};
return this->spawn<Ret, Args...>(prog,
kernel_name,
global_dims,
offsets,
local_dims,
std::move(map_args),
std::move(map_result));
}
private:
struct device_info {
unsigned id;
uint32_t id;
command_queue_ptr cmd_queue;
device_ptr dev_id;
size_t max_itms_per_grp;
......@@ -149,34 +123,23 @@ class command_dispatcher {
, max_itms_per_dim(max_itms_per_dim) { }
};
typedef intrusive::blocking_single_reader_queue<command, dereferencer>
job_queue;
static inline command_dispatcher* create_singleton() {
return new command_dispatcher;
static inline opencl_metainfo* create_singleton() {
return new opencl_metainfo;
}
void initialize();
void dispose();
void destroy();
std::atomic<unsigned> dev_id_gen;
job_queue m_job_queue;
command_ptr m_dummy;
std::thread m_supervisor;
std::atomic<uint32_t> dev_id_gen;
std::vector<device_info> m_devices;
context_ptr m_context;
std::vector<device_info> m_devices;
static void worker_loop(worker*);
static void supervisor_loop(command_dispatcher *scheduler,
job_queue*,
command_ptr);
};
command_dispatcher* get_command_dispatcher();
opencl_metainfo* get_opencl_metainfo();
} } // namespace cppa::opencl
#endif // CPPA_OPENCL_COMMAND_DISPATCHER_HPP
#endif // OPENCL_METAINFO_HPP
......@@ -57,12 +57,13 @@ class program {
* from a given @p kernel_source.
* @returns A program object.
*/
static program create(const char* kernel_source);
static program create(const char* kernel_source, uint32_t device_id = 0);
private:
program(context_ptr context, program_ptr program);
program(context_ptr context, program_ptr program, uint32_t device_id);
uint32_t m_device_id;
context_ptr m_context;
program_ptr m_program;
......
......@@ -30,15 +30,13 @@
#include "cppa/opencl/actor_facade.hpp"
#include "cppa/opencl/command_dispatcher.hpp"
#include "cppa/opencl/opencl_metainfo.hpp"
namespace cppa { namespace opencl {
void enqueue_to_dispatcher(command_dispatcher* dispatcher,
command_ptr cmd) {
cmd->ref(); // implicit ref count of m_job_queue
dispatcher->m_job_queue.push_back(cmd.get());
// todo: find device by id
command_queue_ptr get_command_queue(uint32_t) {
return get_opencl_metainfo()->m_devices.front().cmd_queue;
}
} } // namespace cppa::opencl
......@@ -16,7 +16,7 @@
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation; either version 2.1 of the License, *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
......@@ -28,100 +28,18 @@
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#include <sstream>
#include <iostream>
#include <stdexcept>
#include <algorithm>
#include "cppa/cppa.hpp"
#include "cppa/opencl/command_dispatcher.hpp"
#include "cppa/opencl/opencl_metainfo.hpp"
using namespace std;
namespace cppa { namespace opencl {
struct command_dispatcher::worker {
command_dispatcher* m_parent;
typedef command_ptr job_ptr;
job_queue* m_job_queue;
thread m_thread;
job_ptr m_dummy;
worker(command_dispatcher* parent, job_queue* jq, job_ptr dummy)
: m_parent(parent), m_job_queue(jq), m_dummy(dummy) { }
void start() {
m_thread = thread(&command_dispatcher::worker_loop, this);
}
worker(const worker&) = delete;
worker& operator=(const worker&) = delete;
void operator()() {
job_ptr job;
for (;;) {
/*
* todo:
* manage device usage
* wait for device
*/
// adopt reference count of job queue
job.adopt(m_job_queue->pop());
if(job != m_dummy) {
try {
cl_command_queue cmd_q =
m_parent->m_devices.front().cmd_queue.get();
job->enqueue(cmd_q);
cl_int err{clFlush(cmd_q)};
if (err != CL_SUCCESS) {
ostringstream oss;
oss << "clFlush: " << get_opencl_error(err);
CPPA_LOGMF(CPPA_ERROR, self, oss.str());
throw runtime_error(oss.str());
}
}
catch (exception& e) {
ostringstream oss;
oss << "worker loop, e.what(): " << e.what();
CPPA_LOGMF(CPPA_ERROR, self, oss.str());
throw runtime_error(oss.str());
}
}
else {
CPPA_LOG_TRACE("worker done");
return;
}
}
}
};
void command_dispatcher::worker_loop(command_dispatcher::worker* w) {
(*w)();
}
void command_dispatcher::supervisor_loop(command_dispatcher* scheduler,
job_queue* jq, command_ptr m_dummy) {
CPPA_LOGF_TRACE("");
unique_ptr<command_dispatcher::worker> worker;
worker.reset(new command_dispatcher::worker(scheduler, jq, m_dummy));
worker->start();
worker->m_thread.join();
worker.reset();
}
void command_dispatcher::initialize() {
m_dummy = make_counted<command_dummy>();
void opencl_metainfo::initialize()
{
cl_int err{0};
/* find up to two available platforms */
// get number of available platforms
cl_uint number_of_platforms;
err = clGetPlatformIDs(0, nullptr, &number_of_platforms);
if (err != CL_SUCCESS) {
......@@ -131,13 +49,9 @@ void command_dispatcher::initialize() {
CPPA_LOGMF(CPPA_ERROR, self, oss.str());
throw logic_error(oss.str());
}
else if (number_of_platforms < 1) {
ostringstream oss;
oss << "clGetPlatformIDs: no platforms found.";
CPPA_LOGMF(CPPA_ERROR, self, oss.str());
throw logic_error(oss.str());
}
// get platform ids
vector<cl_platform_id> ids(number_of_platforms);
err = clGetPlatformIDs(ids.size(), ids.data(), nullptr);
if (err != CL_SUCCESS) {
......@@ -148,7 +62,8 @@ void command_dispatcher::initialize() {
throw logic_error(oss.str());
}
/* find gpu devices on our platform */
// find gpu devices on our platform
int pid{0};
cl_uint num_devices{0};
cl_device_type dev_type{CL_DEVICE_TYPE_GPU};
......@@ -174,7 +89,8 @@ void command_dispatcher::initialize() {
throw runtime_error(oss.str());
}
/* create a context */
// create a context
m_context.adopt(clCreateContext(0, 1, devices.data(), nullptr, nullptr, &err));
if (err != CL_SUCCESS) {
ostringstream oss;
......@@ -183,11 +99,12 @@ void command_dispatcher::initialize() {
throw runtime_error(oss.str());
}
for (auto& d : devices) {
CPPA_LOG_TRACE("Creating command queue for device(s).");
device_ptr device;
device.adopt(d);
unsigned id{++dev_id_gen};
uint32_t id{++dev_id_gen};
size_t return_size{0};
static constexpr size_t buf_size = 128;
char buf[buf_size];
......@@ -257,34 +174,28 @@ void command_dispatcher::initialize() {
m_devices.push_back(move(dev_info));
}
}
if (m_devices.empty()) {
ostringstream oss;
oss << "Could not create a command queue for "
<< "any of the present devices.";
<< "any present device.";
CPPA_LOGMF(CPPA_ERROR, self, oss.str());
throw runtime_error(oss.str());
}
else {
m_supervisor = thread(&command_dispatcher::supervisor_loop,
this,
&m_job_queue,
m_dummy);
}
}
void command_dispatcher::destroy() {
m_dummy->ref(); // reference of m_job_queue
m_job_queue.push_back(m_dummy.get());
m_supervisor.join();
void opencl_metainfo::destroy() {
delete this;
}
void command_dispatcher::dispose() {
void opencl_metainfo::dispose() {
delete this;
}
command_dispatcher* get_command_dispatcher() {
return detail::singleton_manager::get_command_dispatcher();
opencl_metainfo* get_opencl_metainfo() {
return detail::singleton_manager::get_opencl_metainfo();
return nullptr;
}
} } // namespace cppa::opencl
......@@ -35,18 +35,18 @@
#include "cppa/singletons.hpp"
#include "cppa/opencl/program.hpp"
#include "cppa/opencl/command_dispatcher.hpp"
#include "cppa/opencl/opencl_metainfo.hpp"
using namespace std;
namespace cppa { namespace opencl {
program::program(context_ptr context, program_ptr program)
: m_context(move(context)), m_program(move(program)) { }
program::program(context_ptr context, program_ptr program, uint32_t device_id)
: m_device_id(device_id), m_context(move(context)), m_program(move(program)) { }
program program::create(const char* kernel_source) {
context_ptr cptr = get_command_dispatcher()->m_context;
program program::create(const char* kernel_source, uint32_t device_id) {
context_ptr cptr = get_opencl_metainfo()->m_context;
cl_int err{0};
......@@ -76,7 +76,8 @@ program program::create(const char* kernel_source) {
// build programm from program object
err = clBuildProgram(pptr.get(), 0, nullptr, nullptr, nullptr, nullptr);
if (err != CL_SUCCESS) {
device_ptr device{get_command_dispatcher()->m_devices.front().dev_id};
// todo: chosoe device, not just front
device_ptr device{get_opencl_metainfo()->m_devices.front().dev_id};
const char* where = "CL_PROGRAM_BUILD_LOG:get size";
size_t ret_size;
auto bi_err = program_build_info(device, 0, nullptr, &ret_size);
......@@ -107,7 +108,7 @@ program program::create(const char* kernel_source) {
}
else {
# ifdef CPPA_DEBUG_MODE
device_ptr device{get_command_dispatcher()->m_devices.front().dev_id};
device_ptr device{get_opencl_metainfo()->m_devices.front().dev_id};
const char* where = "CL_PROGRAM_BUILD_LOG:get size";
size_t ret_size;
err = program_build_info(device, 0, nullptr, &ret_size);
......@@ -132,7 +133,7 @@ program program::create(const char* kernel_source) {
}
# endif
}
return {cptr, pptr};
return {cptr, pptr, device_id};
}
} } // namespace cppa::opencl
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment