Commit 46d6207a authored by neverlord's avatar neverlord

boost threadpool testing

parent 9ae866bc
...@@ -47,43 +47,42 @@ namespace cppa { namespace detail { ...@@ -47,43 +47,42 @@ namespace cppa { namespace detail {
struct pool_job struct pool_job
{ {
abstract_scheduled_actor* ptr; abstract_event_based_actor* ptr;
pool_job(abstract_scheduled_actor* mptr) : ptr(mptr) { } pool_job(abstract_event_based_actor* mptr) : ptr(mptr) { }
void operator()() void operator()()
{ {
util::fiber fself;
struct handler : abstract_scheduled_actor::resume_callback struct handler : abstract_scheduled_actor::resume_callback
{ {
abstract_scheduled_actor* job; abstract_event_based_actor* job;
handler() : job(nullptr) { } handler(abstract_event_based_actor* mjob) : job(mjob) { }
bool still_ready() { return true; } bool still_ready() { return true; }
void exec_done() void exec_done()
{ {
if (!job->deref()) delete job; if (!job->deref()) delete job;
dec_actor_count(); dec_actor_count();
job = nullptr;
} }
}; };
handler h; handler h{ptr};
ptr->resume(&fself, &h); ptr->resume(nullptr, &h);
} }
}; };
class boost_threadpool_scheduler; class boost_threadpool_scheduler;
void enqueue_to_bts(boost_threadpool_scheduler* where, void enqueue_to_bts(boost_threadpool_scheduler* where,
abstract_scheduled_actor* what); abstract_scheduled_actor *what);
class boost_threadpool_scheduler : public scheduler class boost_threadpool_scheduler : public scheduler
{ {
boost::threadpool::pool m_pool; boost::threadpool::thread_pool<pool_job> m_pool;
//boost::threadpool::pool m_pool;
public: public:
void start() /*override*/ void start() /*override*/
{ {
m_pool.size_controller().resize(boost::thread::hardware_concurrency()); m_pool.size_controller().resize(std::max(num_cores(), 4));
} }
void stop() /*override*/ void stop() /*override*/
...@@ -93,32 +92,25 @@ class boost_threadpool_scheduler : public scheduler ...@@ -93,32 +92,25 @@ class boost_threadpool_scheduler : public scheduler
void schedule(abstract_scheduled_actor* what) /*override*/ void schedule(abstract_scheduled_actor* what) /*override*/
{ {
boost::threadpool::schedule(m_pool, pool_job{what}); auto job = static_cast<abstract_event_based_actor*>(what);
boost::threadpool::schedule(m_pool, pool_job{job});
} }
actor_ptr spawn(abstract_event_based_actor* what) actor_ptr spawn(abstract_event_based_actor* what)
{ {
return spawn_impl(what->attach_to_scheduler(enqueue_to_bts, this), false); what->attach_to_scheduler(enqueue_to_bts, this);
}
actor_ptr spawn(scheduled_actor* bhvr, scheduling_hint hint)
{
if (hint == detached) return mock_scheduler::spawn(bhvr);
return spawn_impl(new yielding_actor(bhvr, enqueue_to_bts, this), true);
}
private:
actor_ptr spawn_impl(abstract_scheduled_actor* what, bool push_to_queue)
{
inc_actor_count(); inc_actor_count();
CPPA_MEMORY_BARRIER(); CPPA_MEMORY_BARRIER();
intrusive_ptr<abstract_scheduled_actor> ctx(what); intrusive_ptr<abstract_event_based_actor> ctx(what);
ctx->ref(); ctx->ref();
if (push_to_queue) boost::threadpool::schedule(m_pool, pool_job{what});
return std::move(ctx); return std::move(ctx);
} }
actor_ptr spawn(scheduled_actor* bhvr, scheduling_hint)
{
return mock_scheduler::spawn(bhvr);
}
}; };
void enqueue_to_bts(boost_threadpool_scheduler* where, void enqueue_to_bts(boost_threadpool_scheduler* where,
......
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include <stdexcept> #include <stdexcept>
#include <algorithm> #include <algorithm>
#include "boost/thread.hpp"
template<typename T> template<typename T>
T rd(char const* cstr) T rd(char const* cstr)
{ {
...@@ -51,6 +53,12 @@ T rd(char const* cstr) ...@@ -51,6 +53,12 @@ T rd(char const* cstr)
return result; return result;
} }
#ifdef __APPLE__
int num_cores()
{
return static_cast<int>(boost::thread::hardware_concurrency());
}
#else
int num_cores() int num_cores()
{ {
char cbuf[100]; char cbuf[100];
...@@ -65,6 +73,7 @@ int num_cores() ...@@ -65,6 +73,7 @@ int num_cores()
*i = '\0'; *i = '\0';
return rd<int>(cbuf); return rd<int>(cbuf);
} }
#endif
std::vector<uint64_t> factorize(uint64_t n) std::vector<uint64_t> factorize(uint64_t n)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment