Commit e93b3575 authored by neverlord's avatar neverlord

thread.hpp on macosx

parent d9773eb7
...@@ -4,15 +4,53 @@ ...@@ -4,15 +4,53 @@
#ifdef __APPLE__ #ifdef __APPLE__
#include <boost/thread.hpp> #include <boost/thread.hpp>
#include "cppa/util/duration.hpp"
namespace cppa { namespace detail { namespace cppa { namespace detail {
using boost::mutex; using boost::mutex;
using boost::thread; using boost::thread;
using boost::unique_lock;
using boost::condition_variable; using boost::condition_variable;
namespace this_thread { using namespace boost::this_thread; }
template<class Lock, class Condition>
inline bool wait_until(Lock& lock, Condition& cond,
const boost::system_time& timeout)
{
return cond.timed_wait(lock, timeout);
}
inline boost::system_time now()
{
return boost::get_system_time();
}
} } // namespace cppa::detail } } // namespace cppa::detail
inline boost::system_time& operator+=(boost::system_time& lhs,
const cppa::util::duration& rhs)
{
switch (rhs.unit)
{
case cppa::util::time_unit::seconds:
lhs += boost::posix_time::seconds(rhs.count);
break;
case cppa::util::time_unit::milliseconds:
lhs += boost::posix_time::milliseconds(rhs.count);
break;
case cppa::util::time_unit::microseconds:
lhs += boost::posix_time::microseconds(rhs.count);
break;
default: break;
}
return lhs;
}
#else #else
#include <mutex> #include <mutex>
......
...@@ -48,7 +48,7 @@ task_scheduler::task_scheduler() ...@@ -48,7 +48,7 @@ task_scheduler::task_scheduler()
: m_queue() : m_queue()
, m_dummy() , m_dummy()
{ {
m_worker = std::thread(worker_loop, &m_queue, &m_dummy); m_worker = thread(worker_loop, &m_queue, &m_dummy);
} }
task_scheduler::~task_scheduler() task_scheduler::~task_scheduler()
...@@ -61,7 +61,7 @@ void task_scheduler::schedule(scheduled_actor* what) ...@@ -61,7 +61,7 @@ void task_scheduler::schedule(scheduled_actor* what)
{ {
if (what) if (what)
{ {
if (std::this_thread::get_id() == m_worker.get_id()) if (this_thread::get_id() == m_worker.get_id())
{ {
m_queue._push_back(what); m_queue._push_back(what);
} }
......
...@@ -15,7 +15,7 @@ void enqueue_fun(cppa::detail::thread_pool_scheduler* where, ...@@ -15,7 +15,7 @@ void enqueue_fun(cppa::detail::thread_pool_scheduler* where,
where->schedule(what); where->schedule(what);
} }
typedef std::unique_lock<std::mutex> guard_type; typedef unique_lock<mutex> guard_type;
typedef std::unique_ptr<thread_pool_scheduler::worker> worker_ptr; typedef std::unique_ptr<thread_pool_scheduler::worker> worker_ptr;
typedef util::single_reader_queue<thread_pool_scheduler::worker> worker_queue; typedef util::single_reader_queue<thread_pool_scheduler::worker> worker_queue;
...@@ -29,9 +29,9 @@ struct thread_pool_scheduler::worker ...@@ -29,9 +29,9 @@ struct thread_pool_scheduler::worker
job_queue* m_job_queue; job_queue* m_job_queue;
scheduled_actor* m_job; scheduled_actor* m_job;
worker_queue* m_supervisor_queue; worker_queue* m_supervisor_queue;
std::thread m_thread; thread m_thread;
std::mutex m_mtx; mutex m_mtx;
std::condition_variable m_cv; condition_variable m_cv;
worker(worker_queue* supervisor_queue, job_queue* jq) worker(worker_queue* supervisor_queue, job_queue* jq)
: next(nullptr), m_done(false), m_job_queue(jq), m_job(nullptr) : next(nullptr), m_done(false), m_job_queue(jq), m_job(nullptr)
...@@ -104,7 +104,7 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue, ...@@ -104,7 +104,7 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
worker_queue wqueue; worker_queue wqueue;
std::vector<worker_ptr> workers; std::vector<worker_ptr> workers;
// init // init
size_t num_workers = std::max(std::thread::hardware_concurrency(), size_t num_workers = std::max(thread::hardware_concurrency(),
static_cast<unsigned>(1)); static_cast<unsigned>(1));
for (size_t i = 0; i < num_workers; ++i) for (size_t i = 0; i < num_workers; ++i)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment