Commit ffd78ffa authored by Dominik Charousset's avatar Dominik Charousset

replaced CPPA_MEMORY_BARRIER with std::atomic_thread_fence

parent 8aef33d3
......@@ -57,15 +57,6 @@
# define CPPA_64BIT
#endif
#ifdef CPPA_MACOS
# include <libkern/OSAtomic.h>
# define CPPA_MEMORY_BARRIER() OSMemoryBarrier()
#elif defined(CPPA_GCC)
# define CPPA_MEMORY_BARRIER() __sync_synchronize()
#else
# error Plattform and/or compiler not supported
#endif
#include <cstdio>
#include <cstdlib>
......
......@@ -101,7 +101,7 @@ void abstract_event_based_actor::resume(util::fiber*, scheduler::callback* cb) {
e = m_mailbox.try_pop();
if (!e) {
m_state.store(abstract_scheduled_actor::about_to_block);
CPPA_MEMORY_BARRIER();
std::atomic_thread_fence(std::memory_order_seq_cst);
if (m_mailbox.can_fetch_more() == false) {
switch (compare_exchange_state(abstract_scheduled_actor::about_to_block,
abstract_scheduled_actor::blocked)) {
......
......@@ -80,7 +80,7 @@ std::thread mock_scheduler::spawn_hidden_impl(std::function<void()> what, local_
actor_ptr mock_scheduler::spawn_impl(std::function<void()> what) {
inc_actor_count();
CPPA_MEMORY_BARRIER();
std::atomic_thread_fence(std::memory_order_seq_cst);
intrusive_ptr<local_actor> ctx{new detail::converted_thread_context};
std::thread{run_actor, ctx, std::move(what)}.detach();
return std::move(ctx);
......
......@@ -75,7 +75,7 @@ struct network_manager_impl : network_manager {
m_mailman->enqueue(nullptr, make_any_tuple(atom("DONE")));
m_mailman_thread.join();
// wait until mailman is done; post_office closes all sockets
CPPA_MEMORY_BARRIER();
std::atomic_thread_fence(std::memory_order_seq_cst);
send_to_post_office(po_message{atom("DONE"), -1, 0});
m_post_office_thread.join();
close(pipe_fd[0]);
......
......@@ -84,7 +84,7 @@ void delete_singletons() {
}
stop_and_kill(s_scheduler);
stop_and_kill(s_network_manager);
CPPA_MEMORY_BARRIER();
std::atomic_thread_fence(std::memory_order_seq_cst);
// it's safe now to delete all other singletons now
delete s_actor_registry.load();
delete s_group_manager.load();
......
......@@ -126,7 +126,7 @@ struct thread_pool_scheduler::worker {
job->chained_actor().reset();
}
if (!job->deref()) delete job;
CPPA_MEMORY_BARRIER();
std::atomic_thread_fence(std::memory_order_seq_cst);
dec_actor_count();
job = nullptr;
}
......@@ -207,7 +207,7 @@ void thread_pool_scheduler::enqueue(scheduled_actor* what) {
actor_ptr thread_pool_scheduler::spawn_impl(scheduled_actor* what,
bool push_to_queue) {
inc_actor_count();
CPPA_MEMORY_BARRIER();
std::atomic_thread_fence(std::memory_order_seq_cst);
intrusive_ptr<scheduled_actor> ctx(what);
ctx->ref();
if (push_to_queue) m_queue.push_back(ctx.get());
......
......@@ -72,7 +72,6 @@ recursive_queue_node* yielding_actor::receive_node() {
if (m_mailbox.can_fetch_more() == false) {
m_state.store(abstract_scheduled_actor::about_to_block);
std::atomic_thread_fence(std::memory_order_seq_cst);
//CPPA_MEMORY_BARRIER();
// make sure mailbox is empty
if (m_mailbox.can_fetch_more()) {
// someone preempt us => continue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment