Commit 98c3a5c6 authored by Dominik Charousset's avatar Dominik Charousset

removed legacy support for boost::thread

parent 4a7dc6c2
......@@ -34,9 +34,11 @@
#include "cppa/config.hpp"
#include <list>
#include <mutex>
#include <atomic>
#include <vector>
#include <memory>
#include <thread>
#include <algorithm>
#include "cppa/atom.hpp"
......@@ -46,7 +48,6 @@
#include "cppa/exit_reason.hpp"
#include "cppa/util/shared_spinlock.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/intrusive/single_reader_queue.hpp"
......@@ -61,7 +62,7 @@ template<class Base>
class abstract_actor : public Base {
typedef std::unique_ptr<attachable> attachable_ptr;
typedef detail::lock_guard<detail::mutex> guard_type;
typedef std::lock_guard<std::mutex> guard_type;
public:
......@@ -151,7 +152,7 @@ class abstract_actor : public Base {
util::fixed_vector<mailbox_element*, 10> m_nodes;
util::shared_spinlock m_nodes_lock;
typedef detail::lock_guard<util::shared_spinlock> lock_type;
typedef std::lock_guard<util::shared_spinlock> lock_type;
inline mailbox_element* fetch_node(actor* sender, any_tuple msg) {
mailbox_element* result = nullptr;
......@@ -260,7 +261,7 @@ class abstract_actor : public Base {
// true if the associated thread has finished execution
std::atomic<std::uint32_t> m_exit_reason;
// guards access to m_exited, m_subscriptions and m_links
detail::mutex m_mtx;
std::mutex m_mtx;
// links to other actors
std::vector<actor_ptr> m_links;
// code that is executed on cleanup
......
......@@ -31,6 +31,8 @@
#ifndef ACTOR_PROXY_CACHE_HPP
#define ACTOR_PROXY_CACHE_HPP
#include <mutex>
#include <thread>
#include <string>
#include <limits>
#include <vector>
......@@ -41,8 +43,6 @@
#include "cppa/util/shared_spinlock.hpp"
#include "cppa/detail/thread.hpp"
namespace cppa { namespace detail {
class actor_proxy_cache {
......@@ -62,7 +62,7 @@ class actor_proxy_cache {
key_tuple lb{nid, process_id, std::numeric_limits<actor_id>::min()};
key_tuple ub{nid, process_id, std::numeric_limits<actor_id>::max()};
{ // lifetime scope of guard
lock_guard<util::shared_spinlock> guard(m_lock);
std::lock_guard<util::shared_spinlock> guard(m_lock);
auto e = m_entries.end();
auto first = m_entries.lower_bound(lb);
if (first != e) {
......
......@@ -32,12 +32,14 @@
#define ACTOR_REGISTRY_HPP
#include <map>
#include <mutex>
#include <thread>
#include <atomic>
#include <cstdint>
#include <condition_variable>
#include "cppa/actor.hpp"
#include "cppa/attachable.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/util/shared_spinlock.hpp"
namespace cppa { namespace detail {
......@@ -74,8 +76,8 @@ class actor_registry {
std::atomic<size_t> m_running;
std::atomic<std::uint32_t> m_ids;
mutex m_running_mtx;
condition_variable m_running_cv;
std::mutex m_running_mtx;
std::condition_variable m_running_cv;
mutable util::shared_spinlock m_instances_mtx;
std::map<std::uint32_t, actor_ptr> m_instances;
......
......@@ -66,8 +66,6 @@ class converted_thread_context
public:
converted_thread_context();
// called if the converted thread finished execution
void cleanup(std::uint32_t reason = exit_reason::normal);
......@@ -87,10 +85,6 @@ class converted_thread_context
inline decltype(m_mailbox)& mailbox() { return m_mailbox; }
private:
pattern<atom_value, std::uint32_t> m_exit_msg_pattern;
};
} } // namespace cppa::detail
......
......@@ -32,9 +32,10 @@
#define GROUP_MANAGER_HPP
#include <map>
#include <mutex>
#include <thread>
#include "cppa/group.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/util/shared_spinlock.hpp"
namespace cppa { namespace detail {
......@@ -57,7 +58,7 @@ class group_manager {
typedef std::map< std::string, std::unique_ptr<group::module> > modules_map;
modules_map m_mmap;
detail::mutex m_mmap_mtx;
std::mutex m_mmap_mtx;
};
......
......@@ -31,11 +31,11 @@
#ifndef MOCK_SCHEDULER_HPP
#define MOCK_SCHEDULER_HPP
#include <thread>
#include <utility>
#include "cppa/scheduler.hpp"
#include "cppa/detail/tdata.hpp"
#include "cppa/detail/thread.hpp"
namespace cppa { namespace detail {
......@@ -49,7 +49,7 @@ class mock_scheduler : public scheduler {
static actor_ptr spawn_impl(std::function<void()> what);
static thread spawn_hidden_impl(std::function<void()> what, local_actor_ptr ctx);
static std::thread spawn_hidden_impl(std::function<void()> what, local_actor_ptr ctx);
void enqueue(scheduled_actor* what);
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef THREAD_HPP
#define THREAD_HPP
#if defined(__APPLE__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 7) && !defined(__clang__)
#define CPPA_USE_BOOST_THREADS
#include <boost/thread.hpp>
#include "cppa/util/duration.hpp"
namespace cppa { namespace detail {
using boost::mutex;
using boost::thread;
using boost::lock_guard;
using boost::unique_lock;
using boost::condition_variable;
namespace this_thread { using namespace boost::this_thread; }
template<class Lock, class Condition>
inline bool wait_until(Lock& lock, Condition& cond,
const boost::system_time& timeout) {
return cond.timed_wait(lock, timeout);
}
inline boost::system_time now() {
return boost::get_system_time();
}
} } // namespace cppa::detail
inline boost::system_time& operator+=(boost::system_time& lhs,
const cppa::util::duration& rhs) {
switch (rhs.unit) {
case cppa::util::time_unit::seconds:
lhs += boost::posix_time::seconds(rhs.count);
break;
case cppa::util::time_unit::milliseconds:
lhs += boost::posix_time::milliseconds(rhs.count);
break;
case cppa::util::time_unit::microseconds:
lhs += boost::posix_time::microseconds(rhs.count);
break;
default: break;
}
return lhs;
}
#else
#define CPPA_USE_STD_THREADS
#include <mutex>
#include <thread>
#include <condition_variable>
namespace cppa { namespace detail {
using std::mutex;
using std::thread;
using std::lock_guard;
using std::unique_lock;
using std::condition_variable;
namespace this_thread { using namespace std::this_thread; }
// returns false if a timeout occured
template<class Lock, class Condition, typename TimePoint>
inline bool wait_until(Lock& lock, Condition& cond, const TimePoint& timeout) {
return cond.wait_until(lock, timeout) != std::cv_status::timeout;
}
inline auto now() -> decltype(std::chrono::high_resolution_clock::now()) {
return std::chrono::high_resolution_clock::now();
}
} } // namespace cppa::detail
#endif // __APPLE__
#endif // THREAD_HPP
......@@ -31,8 +31,9 @@
#ifndef THREAD_POOL_SCHEDULER_HPP
#define THREAD_POOL_SCHEDULER_HPP
#include <thread>
#include "cppa/scheduler.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/util/producer_consumer_list.hpp"
#include "cppa/detail/scheduled_actor_dummy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
......@@ -64,7 +65,7 @@ class thread_pool_scheduler : public scheduler {
job_queue m_queue;
scheduled_actor_dummy m_dummy;
thread m_supervisor;
std::thread m_supervisor;
actor_ptr spawn_impl(scheduled_actor* what,
bool push_to_queue = true);
......
......@@ -55,9 +55,7 @@ struct types_array_impl {
static constexpr bool builtin_only = true;
// all types are builtin, perform lookup on constuction
uniform_type_info const* data[sizeof...(T)];
types_array_impl()
: data{ta_util<cppa_tinf,util::is_builtin<T>::value,T>::get()...} {
}
types_array_impl() : data{ta_util<cppa_tinf, true, T>::get()...} { }
inline uniform_type_info const* operator[](size_t p) const {
return data[p];
}
......
......@@ -32,11 +32,13 @@
#define SINGLE_READER_QUEUE_HPP
#include <list>
#include <mutex>
#include <atomic>
#include <memory>
#include <thread>
#include <condition_variable>
#include "cppa/config.hpp"
#include "cppa/detail/thread.hpp"
namespace cppa { namespace intrusive {
......@@ -48,7 +50,7 @@ namespace cppa { namespace intrusive {
template<typename T>
class single_reader_queue {
typedef detail::unique_lock<detail::mutex> lock_type;
typedef std::unique_lock<std::mutex> lock_type;
public:
......@@ -142,15 +144,15 @@ class single_reader_queue {
pointer m_head;
// locked on enqueue/dequeue operations to/from an empty list
detail::mutex m_mtx;
detail::condition_variable m_cv;
std::mutex m_mtx;
std::condition_variable m_cv;
template<typename TimePoint>
bool timed_wait_for_data(const TimePoint& timeout) {
if (empty()) {
lock_type guard(m_mtx);
while (m_stack.load() == nullptr) {
if (detail::wait_until(guard, m_cv, timeout) == false) {
if (m_cv.wait_until(guard, timeout) == std::cv_status::timeout) {
return false;
}
}
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef PRODUCER_CONSUMER_LIST_HPP
#define PRODUCER_CONSUMER_LIST_HPP
#define CPPA_CACHE_LINE_SIZE 64
#include <chrono>
#include <thread>
#include <atomic>
#include <cassert>
#include "cppa/detail/thread.hpp"
// GCC hack
#if !defined(_GLIBCXX_USE_SCHED_YIELD) && !defined(__clang__)
#include <time.h>
namespace std { namespace this_thread { namespace {
inline void yield() noexcept {
timespec req;
req.tv_sec = 0;
req.tv_nsec = 1;
nanosleep(&req, nullptr);
}
} } } // namespace std::this_thread::<anonymous>
#endif
// another GCC hack
#if !defined(_GLIBCXX_USE_NANOSLEEP) && !defined(__clang__)
#include <time.h>
namespace std { namespace this_thread { namespace {
template<typename Rep, typename Period>
inline void sleep_for(const chrono::duration<Rep, Period>& rt) {
auto sec = chrono::duration_cast<chrono::seconds>(rt);
auto nsec = chrono::duration_cast<chrono::nanoseconds>(rt - sec);
timespec req;
req.tv_sec = sec.count();
req.tv_nsec = nsec.count();
nanosleep(&req, nullptr);
}
} } } // namespace std::this_thread::<anonymous>
#endif
namespace cppa { namespace util {
......@@ -59,7 +119,7 @@ class producer_consumer_list {
void push_impl(node* tmp) {
// acquire exclusivity
while (m_producer_lock.exchange(true)) {
detail::this_thread::yield();
std::this_thread::yield();
}
// publish & swing last forward
m_last->next = tmp;
......@@ -93,7 +153,7 @@ class producer_consumer_list {
pointer try_pop() {
pointer result = nullptr;
while (m_consumer_lock.exchange(true)) {
detail::this_thread::yield();
std::this_thread::yield();
}
// only one consumer allowed
node* first = m_first;
......
......@@ -28,6 +28,7 @@
\******************************************************************************/
#include <thread>
#include <cstring>
#include "cppa/atom.hpp"
......@@ -36,7 +37,6 @@
#include "cppa/util/shared_lock_guard.hpp"
#include "cppa/util/upgrade_lock_guard.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/singleton_manager.hpp"
......@@ -81,7 +81,7 @@ actor_proxy_ptr actor_proxy_cache::get_impl(const key_tuple& key) {
}
actor_proxy_ptr result{new actor_proxy(std::get<2>(key), new process_information(std::get<1>(key), std::get<0>(key)))};
{ // lifetime scope of exclusive guard
lock_guard<util::shared_spinlock> guard{m_lock};
std::lock_guard<util::shared_spinlock> guard{m_lock};
auto i = m_entries.find(key);
if (i != m_entries.end()) {
return i->second;
......@@ -98,7 +98,7 @@ actor_proxy_ptr actor_proxy_cache::get_impl(const key_tuple& key) {
bool actor_proxy_cache::erase(const actor_proxy_ptr& pptr) {
auto pinfo = pptr->parent_process_ptr();
key_tuple key(pinfo->node_id(), pinfo->process_id(), pptr->id()); {
lock_guard<util::shared_spinlock> guard{m_lock};
std::lock_guard<util::shared_spinlock> guard{m_lock};
return m_entries.erase(key) > 0;
}
return false;
......
......@@ -39,7 +39,7 @@
namespace {
typedef cppa::detail::lock_guard<cppa::util::shared_spinlock> exclusive_guard;
typedef std::lock_guard<cppa::util::shared_spinlock> exclusive_guard;
typedef cppa::util::shared_lock_guard<cppa::util::shared_spinlock> shared_guard;
typedef cppa::util::upgrade_lock_guard<cppa::util::shared_spinlock> upgrade_guard;
......@@ -116,13 +116,13 @@ void actor_registry::dec_running() {
throw std::underflow_error("actor_count::dec()");
}
else*/ if (new_val <= 1) {
unique_lock<mutex> guard(m_running_mtx);
std::unique_lock<std::mutex> guard(m_running_mtx);
m_running_cv.notify_all();
}
}
void actor_registry::await_running_count_equal(size_t expected) {
unique_lock<mutex> guard(m_running_mtx);
std::unique_lock<std::mutex> guard(m_running_mtx);
while (m_running.load() != expected) {
m_running_cv.wait(guard);
}
......
......@@ -28,6 +28,7 @@
\******************************************************************************/
#include <chrono>
#include <memory>
#include <iostream>
#include <algorithm>
......@@ -41,10 +42,6 @@
namespace cppa { namespace detail {
converted_thread_context::converted_thread_context()
: m_exit_msg_pattern(atom("EXIT")) {
}
void converted_thread_context::quit(std::uint32_t reason) {
super::cleanup(reason);
// actor_exited should not be catched, but if anyone does,
......@@ -89,7 +86,7 @@ void converted_thread_context::dequeue(behavior& bhvr) { // override
bhvr.handle_timeout();
}
else {
auto timeout = now();
auto timeout = std::chrono::high_resolution_clock::now();
timeout += bhvr.timeout();
recursive_queue_node* e = m_mailbox.try_pop(timeout);
while (e != nullptr) {
......@@ -103,12 +100,21 @@ void converted_thread_context::dequeue(behavior& bhvr) { // override
}
filter_result converted_thread_context::filter_msg(const any_tuple& msg) {
if (m_trap_exit == false && matches(msg, m_exit_msg_pattern)) {
auto reason = msg.get_as<std::uint32_t>(1);
if (reason != exit_reason::normal) {
quit(reason);
auto& arr = detail::static_types_array<atom_value, std::uint32_t>::arr;
if ( msg.size() == 2
&& msg.type_at(0) == arr[0]
&& msg.type_at(1) == arr[1]) {
auto v0 = *reinterpret_cast<const atom_value*>(msg.at(0));
auto v1 = *reinterpret_cast<const std::uint32_t*>(msg.at(1));
if (v0 == atom("EXIT")) {
if (this->m_trap_exit == false) {
if (v1 != exit_reason::normal) {
quit(v1);
}
return normal_exit_signal;
}
}
return normal_exit_signal;
}
return ordinary_message;
}
......
......@@ -35,6 +35,7 @@
#include "cppa/config.hpp"
#ifndef CPPA_DISABLE_CONTEXT_SWITCHING
#include <thread>
#include <atomic>
#include <cstddef>
#include <cstring>
......@@ -42,7 +43,6 @@
#include <type_traits>
#include "cppa/util/fiber.hpp"
#include "cppa/detail/thread.hpp"
#ifdef CPPA_USE_UCONTEXT_IMPL
......
......@@ -41,7 +41,7 @@ namespace {
using namespace cppa;
typedef detail::lock_guard<util::shared_spinlock> exclusive_guard;
typedef std::lock_guard<util::shared_spinlock> exclusive_guard;
typedef util::shared_lock_guard<util::shared_spinlock> shared_guard;
typedef util::upgrade_lock_guard<util::shared_spinlock> upgrade_guard;
......@@ -141,7 +141,7 @@ intrusive_ptr<group> group_manager::anonymous() {
intrusive_ptr<group> group_manager::get(const std::string& module_name,
const std::string& group_identifier) {
{ // lifetime scope of guard
detail::lock_guard<detail::mutex> guard(m_mmap_mtx);
std::lock_guard<std::mutex> guard(m_mmap_mtx);
auto i = m_mmap.find(module_name);
if (i != m_mmap.end()) {
return (i->second)->get(group_identifier);
......@@ -157,7 +157,7 @@ void group_manager::add_module(group::module* mod) {
const std::string& mname = mod->name();
std::unique_ptr<group::module> mptr(mod);
{ // lifetime scope of guard
detail::lock_guard<detail::mutex> guard(m_mmap_mtx);
std::lock_guard<std::mutex> guard(m_mmap_mtx);
if (m_mmap.insert(std::make_pair(mname, std::move(mptr))).second) {
return; // success; don't throw
}
......
......@@ -30,6 +30,7 @@
#include "cppa/config.hpp"
#include <thread>
#include <atomic>
#include <iostream>
......@@ -41,7 +42,6 @@
#include "cppa/scheduled_actor.hpp"
#include "cppa/abstract_event_based_actor.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/actor_count.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/to_uniform_name.hpp"
......@@ -74,15 +74,15 @@ void run_hidden_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
namespace cppa { namespace detail {
thread mock_scheduler::spawn_hidden_impl(std::function<void()> what, local_actor_ptr ctx) {
return thread{run_hidden_actor, ctx, std::move(what)};
std::thread mock_scheduler::spawn_hidden_impl(std::function<void()> what, local_actor_ptr ctx) {
return std::thread{run_hidden_actor, ctx, std::move(what)};
}
actor_ptr mock_scheduler::spawn_impl(std::function<void()> what) {
inc_actor_count();
CPPA_MEMORY_BARRIER();
intrusive_ptr<local_actor> ctx{new detail::converted_thread_context};
thread{run_actor, ctx, std::move(what)}.detach();
std::thread{run_actor, ctx, std::move(what)}.detach();
return std::move(ctx);
}
......
......@@ -29,6 +29,7 @@
#include <cstdio>
#include <thread>
#include <fcntl.h>
#include <cstdint>
#include <cstring> // strerror
......@@ -37,7 +38,6 @@
#include <sys/time.h>
#include <sys/types.h>
#include "cppa/detail/thread.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/mock_scheduler.hpp"
......@@ -52,10 +52,10 @@ using namespace cppa::detail;
struct network_manager_impl : network_manager {
local_actor_ptr m_mailman;
thread m_mailman_thread;
std::thread m_mailman_thread;
local_actor_ptr m_post_office;
thread m_post_office_thread;
std::thread m_post_office_thread;
int pipe_fd[2];
......
......@@ -31,14 +31,15 @@
#include <new> // placement new
#include <ios> // ios_base::failure
#include <list> // std::list
#include <thread>
#include <vector> // std::vector
#include <sstream>
#include <cstring> // strerror
#include <cstdint> // std::uint32_t, std::uint64_t
#include <iostream> // std::cout, std::cerr, std::endl
#include <exception> // std::logic_error
#include <algorithm> // std::find_if
#include <stdexcept> // std::underflow_error
#include <sstream>
#include <cstdio>
#include <fcntl.h>
......@@ -55,7 +56,6 @@
#include "cppa/deserializer.hpp"
#include "cppa/binary_deserializer.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/buffer.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/types_array.hpp"
......
......@@ -28,7 +28,9 @@
\******************************************************************************/
#include <thread>
#include <atomic>
#include <chrono>
#include <iostream>
#include "cppa/on.hpp"
......@@ -38,7 +40,6 @@
#include "cppa/scheduler.hpp"
#include "cppa/local_actor.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/actor_count.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/singleton_manager.hpp"
......@@ -60,6 +61,10 @@ struct exit_observer : cppa::attachable {
}
};
inline decltype(std::chrono::high_resolution_clock::now()) now() {
return std::chrono::high_resolution_clock::now();
}
} // namespace <anonymous>
namespace cppa {
......@@ -74,7 +79,7 @@ class scheduler_helper {
}
void start() {
m_thread = detail::thread(&scheduler_helper::time_emitter, m_worker);
m_thread = std::thread(&scheduler_helper::time_emitter, m_worker);
}
void stop() {
......@@ -83,7 +88,7 @@ class scheduler_helper {
}
ptr_type m_worker;
detail::thread m_thread;
std::thread m_thread;
private:
......@@ -97,17 +102,17 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) {
// setup & local variables
self.set(m_self.get());
auto& queue = m_self->mailbox();
std::multimap<decltype(detail::now()), queue_node_ptr> messages;
std::multimap<decltype(now()), queue_node_ptr> messages;
queue_node_ptr msg_ptr;
//decltype(queue.pop()) msg_ptr = nullptr;
decltype(detail::now()) now;
decltype(now()) tout;
bool done = false;
// message handling rules
auto mfun = (
on<util::duration,actor_ptr,anything>() >> [&](const util::duration& d,
const actor_ptr&) {
// calculate timeout
auto timeout = detail::now();
auto timeout = now();
timeout += d;
messages.insert(std::make_pair(std::move(timeout),
std::move(msg_ptr)));
......@@ -131,10 +136,10 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) {
msg_ptr.reset(queue.pop());
}
else {
now = detail::now();
tout = now();
// handle timeouts (send messages)
auto it = messages.begin();
while (it != messages.end() && (it->first) <= now) {
while (it != messages.end() && (it->first) <= tout) {
queue_node_ptr ptr{std::move(it->second)};
CPPA_REQUIRE(ptr->marked == false);
auto whom = const_cast<actor_ptr*>(
......
......@@ -98,10 +98,14 @@ template<typename T>
T* lazy_get(std::atomic<T*>& ptr, bool register_atexit_fun = false) {
T* result = ptr.load();
if (result == nullptr) {
auto tmp = new T();
if (ptr.compare_exchange_weak(result, tmp) == false) {
auto tmp = new T;
if (ptr.compare_exchange_strong(result, tmp) == false) {
delete tmp;
}
else {
result = tmp;
}
/*
else {
// ok, successfully created singleton, register exit fun?
if (register_atexit_fun) {
......@@ -111,6 +115,8 @@ T* lazy_get(std::atomic<T*>& ptr, bool register_atexit_fun = false) {
}
return tmp;
}
*/
static_cast<void>(register_atexit_fun); // keep compiler happy
}
return result;
}
......
......@@ -28,6 +28,8 @@
\******************************************************************************/
#include <mutex>
#include <thread>
#include <cstdint>
#include <cstddef>
#include <iostream>
......@@ -47,7 +49,7 @@ namespace cppa { namespace detail {
namespace {
typedef unique_lock<mutex> guard_type;
typedef std::unique_lock<std::mutex> guard_type;
typedef intrusive::single_reader_queue<thread_pool_scheduler::worker> worker_queue;
} // namespace <anonmyous>
......@@ -58,13 +60,13 @@ struct thread_pool_scheduler::worker {
job_queue* m_job_queue;
job_ptr m_dummy;
thread m_thread;
std::thread m_thread;
worker(job_queue* jq, job_ptr dummy) : m_job_queue(jq), m_dummy(dummy) {
}
void start() {
m_thread = thread(&thread_pool_scheduler::worker_loop, this);
m_thread = std::thread(&thread_pool_scheduler::worker_loop, this);
}
worker(const worker&) = delete;
......@@ -78,7 +80,7 @@ struct thread_pool_scheduler::worker {
if (result) {
return result;
}
detail::this_thread::yield();
std::this_thread::yield();
}
return result;
}
......@@ -90,13 +92,7 @@ struct thread_pool_scheduler::worker {
if (result) {
return result;
}
# ifdef CPPA_USE_BOOST_THREADS
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(1);
boost::this_thread::sleep(timeout);
# else
std::this_thread::sleep_for(std::chrono::milliseconds(1));
# endif
}
return result;
}
......@@ -181,7 +177,7 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
scheduled_actor* dummy) {
typedef std::unique_ptr<thread_pool_scheduler::worker> worker_ptr;
std::vector<worker_ptr> workers;
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 8);
size_t num_workers = std::max<size_t>(std::thread::hardware_concurrency() * 2, 8);
for (size_t i = 0; i < num_workers; ++i) {
workers.emplace_back(new worker(jqueue, dummy));
workers.back()->start();
......@@ -193,8 +189,8 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
}
void thread_pool_scheduler::start() {
m_supervisor = thread(&thread_pool_scheduler::supervisor_loop,
&m_queue, &m_dummy);
m_supervisor = std::thread(&thread_pool_scheduler::supervisor_loop,
&m_queue, &m_dummy);
super::start();
}
......
......@@ -657,6 +657,7 @@ class uniform_type_info_map_helper {
for (const std::string& tname : tnames) {
d->m_by_rname.insert(std::make_pair(tname, uti));
}
CPPA_REQUIRE(d->m_by_uname.find(uti->name()) == d->m_by_uname.end());
d->m_by_uname.insert(std::make_pair(uti->name(), uti));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment