Commit e433d5fa authored by Dominik Charousset's avatar Dominik Charousset

two versions for single_reader_queue

this patch removes the mutex from `single_reader_queue` and instead
provides an explicit `blocking_single_reader_queue` class;
furthermore, both classes provide a `close` member function, enabling
actors to explicitly close the mailbox on shutdown
parent cc09639b
...@@ -81,6 +81,7 @@ cppa/from_string.hpp ...@@ -81,6 +81,7 @@ cppa/from_string.hpp
cppa/get.hpp cppa/get.hpp
cppa/group.hpp cppa/group.hpp
cppa/guard_expr.hpp cppa/guard_expr.hpp
cppa/intrusive/blocking_single_reader_queue.hpp
cppa/intrusive/single_reader_queue.hpp cppa/intrusive/single_reader_queue.hpp
cppa/intrusive_fwd_ptr.hpp cppa/intrusive_fwd_ptr.hpp
cppa/intrusive_ptr.hpp cppa/intrusive_ptr.hpp
......
...@@ -84,12 +84,15 @@ class abstract_actor_base<Base, true> : public Base { ...@@ -84,12 +84,15 @@ class abstract_actor_base<Base, true> : public Base {
}; };
typedef intrusive::single_reader_queue<recursive_queue_node,disposer>
default_mailbox_impl;
/* /*
* @brief Implements linking and monitoring for actors. * @brief Implements linking and monitoring for actors.
* @tparam Base Either {@link cppa::actor actor} * @tparam Base Either {@link cppa::actor actor}
* or {@link cppa::local_actor local_actor}. * or {@link cppa::local_actor local_actor}.
*/ */
template<class Base> template<class Base, class Mailbox = default_mailbox_impl>
class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_actor, Base>::value> { class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_actor, Base>::value> {
friend class ::cppa::self_type; friend class ::cppa::self_type;
...@@ -100,8 +103,8 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac ...@@ -100,8 +103,8 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac
public: public:
typedef detail::recursive_queue_node mailbox_element; typedef Mailbox mailbox_type;
typedef intrusive::single_reader_queue<mailbox_element> mailbox_type; typedef typename mailbox_type::value_type mailbox_element;
bool attach(attachable* ptr) { // override bool attach(attachable* ptr) { // override
if (ptr == nullptr) { if (ptr == nullptr) {
...@@ -180,6 +183,10 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac ...@@ -180,6 +183,10 @@ class abstract_actor : public abstract_actor_base<Base, std::is_base_of<local_ac
return false; return false;
} }
~abstract_actor() {
m_mailbox.clear();
}
protected: protected:
mailbox_type m_mailbox; mailbox_type m_mailbox;
......
...@@ -67,7 +67,7 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> { ...@@ -67,7 +67,7 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
auto node = super::fetch_node(this, auto node = super::fetch_node(this,
make_any_tuple(atom("TIMEOUT"), make_any_tuple(atom("TIMEOUT"),
++m_active_timeout_id)); ++m_active_timeout_id));
this->m_mailbox._push_back(node); this->m_mailbox.enqueue(node);
} }
else { else {
get_scheduler()->delayed_send( get_scheduler()->delayed_send(
...@@ -161,7 +161,7 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> { ...@@ -161,7 +161,7 @@ class abstract_scheduled_actor : public abstract_actor<scheduled_actor> {
int next_state = ready) { int next_state = ready) {
CPPA_REQUIRE(next_state == ready || next_state == pending); CPPA_REQUIRE(next_state == ready || next_state == pending);
CPPA_REQUIRE(node->marked == false); CPPA_REQUIRE(node->marked == false);
if (this->m_mailbox._push_back(node)) { if (this->m_mailbox.enqueue(node) == intrusive::first_enqueued) {
int state = m_state.load(); int state = m_state.load();
for (;;) { for (;;) {
switch (state) { switch (state) {
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef CPPA_BLOCKING_SINGLE_READER_QUEUE_HPP
#define CPPA_BLOCKING_SINGLE_READER_QUEUE_HPP
#include <mutex>
#include <thread>
#include <condition_variable>
#include "cppa/intrusive/single_reader_queue.hpp"
namespace cppa { namespace intrusive {
template<typename T, class Delete = std::default_delete<T> >
class blocking_single_reader_queue {
typedef std::unique_lock<std::mutex> lock_type;
public:
typedef single_reader_queue<T,Delete> impl_type;
typedef typename impl_type::value_type value_type;
typedef typename impl_type::pointer pointer;
pointer pop() {
wait_for_data();
return m_impl.try_pop();
}
inline pointer try_pop() {
return m_impl.try_pop();
}
/**
* @warning call only from the reader (owner)
*/
template<typename TimePoint>
pointer try_pop(const TimePoint& abs_time) {
return (timed_wait_for_data(abs_time)) ? try_pop() : nullptr;
}
void push_back(pointer new_element) {
if (m_impl.enqueue(new_element) == first_enqueued) {
lock_type guard(m_mtx);
m_cv.notify_one();
}
}
inline void clear() {
m_impl.clear();
}
inline void close() {
m_impl.close();
}
template<typename F>
inline void close(const F& f) {
m_impl.close(f);
}
private:
// locked on enqueue/dequeue operations to/from an empty list
std::mutex m_mtx;
std::condition_variable m_cv;
impl_type m_impl;
template<typename TimePoint>
bool timed_wait_for_data(const TimePoint& timeout) {
CPPA_REQUIRE(!m_impl.closed());
if (m_impl.empty()) {
lock_type guard(m_mtx);
while (m_impl.empty()) {
if (m_cv.wait_until(guard, timeout) == std::cv_status::timeout) {
return false;
}
}
}
return true;
}
void wait_for_data() {
if (m_impl.empty()) {
lock_type guard(m_mtx);
while (m_impl.empty()) m_cv.wait(guard);
}
}
};
} } // namespace cppa::intrusive
#endif // CPPA_BLOCKING_SINGLE_READER_QUEUE_HPP
...@@ -32,39 +32,28 @@ ...@@ -32,39 +32,28 @@
#define CPPA_SINGLE_READER_QUEUE_HPP #define CPPA_SINGLE_READER_QUEUE_HPP
#include <list> #include <list>
#include <mutex>
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include <thread>
#include <condition_variable>
#include "cppa/config.hpp" #include "cppa/config.hpp"
namespace cppa { namespace intrusive { namespace cppa { namespace intrusive {
enum enqueue_result { enqueued, first_enqueued, queue_closed };
/** /**
* @brief An intrusive, thread safe queue implementation. * @brief An intrusive, thread safe queue implementation.
* @note For implementation details see * @note For implementation details see
* http://libcppa.blogspot.com/2011/04/mailbox-part-1.html * http://libcppa.blogspot.com/2011/04/mailbox-part-1.html
*/ */
template<typename T> template<typename T, class Delete = std::default_delete<T> >
class single_reader_queue { class single_reader_queue {
typedef std::unique_lock<std::mutex> lock_type;
public: public:
typedef T value_type; typedef T value_type;
typedef value_type* pointer; typedef value_type* pointer;
/**
* @warning call only from the reader (owner)
*/
pointer pop() {
wait_for_data();
return take_head();
}
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
...@@ -72,59 +61,59 @@ class single_reader_queue { ...@@ -72,59 +61,59 @@ class single_reader_queue {
return take_head(); return take_head();
} }
/**
* @warning call only from the reader (owner)
*/
template<typename TimePoint>
pointer try_pop(const TimePoint& abs_time) {
return (timed_wait_for_data(abs_time)) ? take_head() : nullptr;
}
// returns true if the queue was empty // returns true if the queue was empty
bool _push_back(pointer new_element) { enqueue_result enqueue(pointer new_element) {
pointer e = m_stack.load(); pointer e = m_stack.load();
for (;;) { for (;;) {
new_element->next = e; if (e == nullptr) {
if (m_stack.compare_exchange_weak(e, new_element)) { m_delete(new_element);
return (e == nullptr); return queue_closed; // queue is closed
} }
}
}
void push_back(pointer new_element) {
pointer e = m_stack.load();
for (;;) {
new_element->next = e; new_element->next = e;
if (!e) { if (m_stack.compare_exchange_weak(e, new_element)) {
lock_type guard(m_mtx); return (e == stack_end()) ? first_enqueued : enqueued;
if (m_stack.compare_exchange_weak(e, new_element)) {
m_cv.notify_one();
return;
}
}
else {
if (m_stack.compare_exchange_weak(e, new_element)) {
return;
}
} }
} }
} }
inline bool can_fetch_more() const { inline bool can_fetch_more() const {
return m_stack.load() != nullptr; return m_stack.load() != stack_end();
} }
/** /**
* @warning call only from the reader (owner) * @warning call only from the reader (owner)
*/ */
inline bool empty() const { inline bool empty() const {
return m_head == nullptr && m_stack.load() == nullptr; return closed() || (m_head == nullptr && m_stack.load() == stack_end());
} }
single_reader_queue() : m_stack(nullptr), m_head(nullptr) { } inline bool closed() const {
return m_stack.load() == nullptr;
}
~single_reader_queue() { /**
// empty the stack (void) fetch_new_data(); * @warning call only from the reader (owner)
*/
// closes this queue deletes all remaining elements
inline void close() {
fetch_new_data(nullptr);
clear_cached_elements();
}
// closes this queue and applies f to all remaining elements before deleting
template<typename F>
inline void close(const F& f) {
fetch_new_data(nullptr);
clear_cached_elements(f);
}
inline single_reader_queue() : m_head(nullptr) {
m_stack = stack_end();
}
inline void clear() {
fetch_new_data();
clear_cached_elements();
} }
private: private:
...@@ -134,38 +123,15 @@ class single_reader_queue { ...@@ -134,38 +123,15 @@ class single_reader_queue {
// accessed only by the owner // accessed only by the owner
pointer m_head; pointer m_head;
Delete m_delete;
// locked on enqueue/dequeue operations to/from an empty list // atomically sets m_stack back and enqueues all elements to the cache
std::mutex m_mtx; bool fetch_new_data(pointer end_ptr) {
std::condition_variable m_cv; CPPA_REQUIRE(end_ptr == nullptr || end_ptr == stack_end());
template<typename TimePoint>
bool timed_wait_for_data(const TimePoint& timeout) {
if (empty()) {
lock_type guard(m_mtx);
while (m_stack.load() == nullptr) {
if (m_cv.wait_until(guard, timeout) == std::cv_status::timeout) {
return false;
}
}
}
return true;
}
void wait_for_data() {
if (empty()) {
lock_type guard(m_mtx);
while (!(m_stack.load())) m_cv.wait(guard);
}
}
// atomically sets m_stack to nullptr and enqueues all elements to the cache
bool fetch_new_data() {
CPPA_REQUIRE(m_head == nullptr);
pointer e = m_stack.load(); pointer e = m_stack.load();
while (e) { while (e != end_ptr) {
if (m_stack.compare_exchange_weak(e, 0)) { if (m_stack.compare_exchange_weak(e, end_ptr)) {
while (e) { while (e != stack_end()) {
auto next = e->next; auto next = e->next;
e->next = m_head; e->next = m_head;
m_head = e; m_head = e;
...@@ -175,10 +141,11 @@ class single_reader_queue { ...@@ -175,10 +141,11 @@ class single_reader_queue {
} }
// next iteration // next iteration
} }
// !public_tail
return false; return false;
} }
inline bool fetch_new_data() { return fetch_new_data(stack_end()); }
pointer take_head() { pointer take_head() {
if (m_head != nullptr || fetch_new_data()) { if (m_head != nullptr || fetch_new_data()) {
auto result = m_head; auto result = m_head;
...@@ -188,6 +155,30 @@ class single_reader_queue { ...@@ -188,6 +155,30 @@ class single_reader_queue {
return nullptr; return nullptr;
} }
void clear_cached_elements() {
while (m_head != nullptr) {
auto next = m_head->next;
m_delete(m_head);
m_head = next;
}
}
template<typename F>
void clear_cached_elements(const F& f) {
while (m_head != nullptr) {
auto next = m_head->next;
f(*m_head);
m_delete(m_head);
m_head = next;
}
}
pointer stack_end() const {
// we are *never* going to dereference the returned pointer;
// it is only used as indicator wheter this queue is closed or not
return reinterpret_cast<pointer>(const_cast<single_reader_queue*>(this));
}
}; };
} } // namespace cppa::util } } // namespace cppa::util
......
...@@ -54,6 +54,8 @@ ...@@ -54,6 +54,8 @@
#include "cppa/detail/stacked_actor_mixin.hpp" #include "cppa/detail/stacked_actor_mixin.hpp"
#include "cppa/detail/recursive_queue_node.hpp" #include "cppa/detail/recursive_queue_node.hpp"
#include "cppa/intrusive/blocking_single_reader_queue.hpp"
namespace cppa { namespace cppa {
#ifdef CPPA_DOCUMENTATION #ifdef CPPA_DOCUMENTATION
...@@ -79,18 +81,27 @@ class thread_mapped_actor : public local_actor { ...@@ -79,18 +81,27 @@ class thread_mapped_actor : public local_actor {
class self_type; class self_type;
class thread_mapped_actor : public detail::stacked_actor_mixin< class thread_mapped_actor;
thread_mapped_actor,
detail::abstract_actor<local_actor> > { namespace detail {
typedef intrusive::blocking_single_reader_queue<recursive_queue_node,disposer>
blocking_mailbox;
typedef stacked_actor_mixin<thread_mapped_actor,
abstract_actor<local_actor,blocking_mailbox> >
thread_mapped_actor_base;
} // namespace detail
class thread_mapped_actor : public detail::thread_mapped_actor_base {
friend class self_type; // needs access to cleanup() friend class self_type; // needs access to cleanup()
friend class detail::behavior_stack; friend class detail::behavior_stack;
friend class detail::receive_policy; friend class detail::receive_policy;
typedef detail::stacked_actor_mixin< typedef detail::thread_mapped_actor_base super;
thread_mapped_actor,
detail::abstract_actor<local_actor> > super;
public: public:
...@@ -128,7 +139,8 @@ class thread_mapped_actor : public detail::stacked_actor_mixin< ...@@ -128,7 +139,8 @@ class thread_mapped_actor : public detail::stacked_actor_mixin<
inline detail::recursive_queue_node* receive_node() { inline detail::recursive_queue_node* receive_node() {
return m_mailbox.pop(); return m_mailbox.pop();
} }
inline auto init_timeout(const util::duration& tout) -> decltype(std::chrono::high_resolution_clock::now()) { inline auto init_timeout(const util::duration& tout)
-> decltype(std::chrono::high_resolution_clock::now()) {
auto result = std::chrono::high_resolution_clock::now(); auto result = std::chrono::high_resolution_clock::now();
result += tout; result += tout;
return result; return result;
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "cppa/detail/actor_registry.hpp" #include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/singleton_manager.hpp" #include "cppa/detail/singleton_manager.hpp"
#include "cppa/intrusive/single_reader_queue.hpp" #include "cppa/intrusive/blocking_single_reader_queue.hpp"
using namespace std; using namespace std;
using namespace cppa::detail; using namespace cppa::detail;
...@@ -195,7 +195,7 @@ actor_ptr default_protocol::remote_actor(io_stream_ptr_pair io, ...@@ -195,7 +195,7 @@ actor_ptr default_protocol::remote_actor(io_stream_ptr_pair io,
return singleton_manager::get_actor_registry()->get(remote_aid); return singleton_manager::get_actor_registry()->get(remote_aid);
} }
default_protocol_ptr proto = this; default_protocol_ptr proto = this;
intrusive::single_reader_queue<remote_actor_result> q; intrusive::blocking_single_reader_queue<remote_actor_result> q;
run_later([proto, io, pinfptr, remote_aid, &q] { run_later([proto, io, pinfptr, remote_aid, &q] {
CPPA_LOGF_TRACE("lambda from default_protocol::remote_actor"); CPPA_LOGF_TRACE("lambda from default_protocol::remote_actor");
auto pp = proto->get_peer(*pinfptr); auto pp = proto->get_peer(*pinfptr);
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#include "cppa/cppa.hpp" #include "cppa/cppa.hpp"
#include "cppa/logging.hpp" #include "cppa/logging.hpp"
#include "cppa/detail/singleton_manager.hpp" #include "cppa/detail/singleton_manager.hpp"
#include "cppa/intrusive/single_reader_queue.hpp" #include "cppa/intrusive/blocking_single_reader_queue.hpp"
using namespace std; using namespace std;
...@@ -128,7 +128,7 @@ class logging_impl : public logging { ...@@ -128,7 +128,7 @@ class logging_impl : public logging {
private: private:
thread m_thread; thread m_thread;
intrusive::single_reader_queue<log_event> m_queue; intrusive::blocking_single_reader_queue<log_event> m_queue;
}; };
......
...@@ -428,7 +428,7 @@ class middleman_impl : public abstract_middleman { ...@@ -428,7 +428,7 @@ class middleman_impl : public abstract_middleman {
void run_later(function<void()> fun) { void run_later(function<void()> fun) {
CPPA_LOG_TRACE(""); CPPA_LOG_TRACE("");
m_queue._push_back(new middleman_event(move(fun))); m_queue.enqueue(new middleman_event(move(fun)));
atomic_thread_fence(memory_order_seq_cst); atomic_thread_fence(memory_order_seq_cst);
uint8_t dummy = 0; uint8_t dummy = 0;
if (write(m_pipe_write, &dummy, sizeof(dummy)) != sizeof(dummy)) { if (write(m_pipe_write, &dummy, sizeof(dummy)) != sizeof(dummy)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment