Commit b61316b9 authored by neverlord's avatar neverlord

bye, bye select()

parent ad937fb8
......@@ -32,10 +32,12 @@
#define ACTOR_PROXY_CACHE_HPP
#include <string>
#include <vector>
#include <functional>
#include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp"
#include "cppa/util/shared_spinlock.hpp"
namespace cppa { namespace detail {
......@@ -49,42 +51,17 @@ class actor_proxy_cache
process_information::node_id_type> // node id
key_tuple;
typedef std::function<void (actor_proxy_ptr&)> new_proxy_callback;
private:
std::map<key_tuple, process_information_ptr> m_pinfos;
std::map<key_tuple, actor_proxy_ptr> m_proxies;
new_proxy_callback m_new_cb;
process_information_ptr get_pinfo(key_tuple const& key);
util::shared_spinlock m_lock;
std::map<key_tuple, actor_proxy_ptr> m_entries;
public:
// this callback is called if a new proxy instance is created
template<typename F>
void set_new_proxy_callback(F&& cb)
{
m_new_cb = std::forward<F>(cb);
}
actor_proxy_ptr get(key_tuple const& key);
void add(actor_proxy_ptr& pptr);
size_t size() const;
void erase(actor_proxy_ptr const& pptr);
template<typename F>
void for_each(F&& fun)
{
for (auto i = m_proxies.begin(); i != m_proxies.end(); ++i)
{
fun(i->second);
}
}
// @returns true if pptr was successfully removed, false otherwise
bool erase(actor_proxy_ptr const& pptr);
};
......
......@@ -188,7 +188,8 @@ class buffer
return append_impl(fun, throw_on_error);
}
bool append_from(native_socket_type sfd, int rdflags,
bool append_from(native_socket_type sfd,
int rdflags = 0,
bool throw_on_error = false)
{
auto fun = [=]() -> int
......
......@@ -40,106 +40,8 @@
namespace cppa { namespace detail {
struct mailman_send_job
{
process_information_ptr target_peer;
addressed_message msg;
inline mailman_send_job(process_information_ptr piptr,
actor_ptr const& from,
channel_ptr const& to,
any_tuple const& content)
: target_peer(piptr), msg(from, to, content)
{
}
};
struct mailman_add_peer
{
native_socket_type sockfd;
process_information_ptr pinfo;
inline mailman_add_peer(native_socket_type fd,
process_information_ptr const& piptr)
: sockfd(fd), pinfo(piptr)
{
}
};
class mailman_job
{
public:
enum job_type
{
invalid_type,
send_job_type,
add_peer_type,
kill_type
};
inline mailman_job() : next(nullptr), m_type(invalid_type) { }
mailman_job(process_information_ptr piptr,
actor_ptr const& from,
channel_ptr const& to,
any_tuple const& omsg);
mailman_job(native_socket_type sockfd, process_information_ptr const& pinfo);
static mailman_job* kill_job();
~mailman_job();
inline mailman_send_job& send_job()
{
return m_send_job;
}
inline mailman_add_peer& add_peer_job()
{
return m_add_socket;
}
inline job_type type() const
{
return m_type;
}
inline bool is_send_job() const
{
return m_type == send_job_type;
}
inline bool is_add_peer_job() const
{
return m_type == add_peer_type;
}
inline bool is_kill_job() const
{
return m_type == kill_type;
}
mailman_job* next;
private:
job_type m_type;
// unrestricted union
union
{
mailman_send_job m_send_job;
mailman_add_peer m_add_socket;
};
inline mailman_job(job_type jt) : next(nullptr), m_type(jt) { }
};
void mailman_loop();
intrusive::single_reader_queue<mailman_job>& mailman_queue();
}} // namespace cppa::detail
#endif // MAILMAN_HPP
......@@ -31,7 +31,11 @@
#ifndef MOCK_SCHEDULER_HPP
#define MOCK_SCHEDULER_HPP
#include <utility>
#include "cppa/scheduler.hpp"
#include "cppa/detail/tdata.hpp"
#include "cppa/detail/thread.hpp"
namespace cppa { namespace detail {
......@@ -44,7 +48,9 @@ class mock_scheduler : public scheduler
actor_ptr spawn(std::function<void()> what, scheduling_hint);
static actor_ptr spawn(std::function<void()> what);
static actor_ptr spawn_impl(std::function<void()> what);
static thread spawn_hidden_impl(std::function<void()> what, local_actor_ptr ctx);
void enqueue(scheduled_actor* what);
......
......@@ -43,15 +43,13 @@ class network_manager
virtual ~network_manager();
virtual void write_to_pipe(pipe_msg const& what) = 0;
virtual void start() = 0;
virtual void stop() = 0;
virtual intrusive::single_reader_queue<mailman_job>& mailman_queue() = 0;
virtual void send_to_post_office(any_tuple msg) = 0;
virtual intrusive::single_reader_queue<post_office_msg>& post_office_queue() = 0;
virtual void send_to_mailman(any_tuple msg) = 0;
static network_manager* create_singleton();
......
......@@ -38,12 +38,10 @@
namespace cppa { namespace detail {
void post_office_loop(int pipe_read_handle, int pipe_write_handle);
void post_office_loop();
void post_office_add_peer(native_socket_type peer_socket,
process_information_ptr const& peer_ptr,
actor_proxy_ptr const& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer);
process_information_ptr const& peer_ptr);
void post_office_publish(native_socket_type server_socket,
actor_ptr const& published_actor);
......
......@@ -41,18 +41,21 @@ struct match_helper
match_helper(match_helper const&) = delete;
match_helper& operator=(match_helper const&) = delete;
any_tuple tup;
match_helper(any_tuple&& t) : tup(std::move(t)) { }
match_helper(any_tuple t) : tup(std::move(t)) { }
match_helper(match_helper&&) = default;
/*
void operator()(partial_function&& arg)
{
partial_function tmp{std::move(arg)};
tmp(tup);
}
*/
template<class Arg0, class... Args>
void operator()(Arg0&& arg0, Args&&... args)
{
(*this)(mexpr_concat_convert(std::forward<Arg0>(arg0),
std::forward<Args>(args)...));
auto tmp = mexpr_concat(std::forward<Arg0>(arg0),
std::forward<Args>(args)...);
tmp(tup);
}
};
......
......@@ -34,6 +34,8 @@
#include "cppa/actor_proxy.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/singleton_manager.hpp"
namespace cppa {
......@@ -47,8 +49,8 @@ void actor_proxy::forward_message(process_information_ptr const& piptr,
actor* sender,
any_tuple&& msg)
{
auto mailman_msg = new detail::mailman_job(piptr, sender, this, std::move(msg));
detail::mailman_queue().push_back(mailman_msg);
detail::singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(piptr, actor_ptr{sender}, std::move(msg)));
}
void actor_proxy::enqueue(actor* sender, any_tuple msg)
......
......@@ -30,15 +30,23 @@
#include "cppa/atom.hpp"
#include "cppa/any_tuple.hpp"
#include "cppa/util/shared_lock_guard.hpp"
#include "cppa/util/upgrade_lock_guard.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/singleton_manager.hpp"
// thread_specific_ptr
#include <boost/thread/tss.hpp>
namespace {
boost::thread_specific_ptr<cppa::detail::actor_proxy_cache> s_proxy_cache;
//boost::thread_specific_ptr<cppa::detail::actor_proxy_cache> s_proxy_cache;
cppa::detail::actor_proxy_cache s_proxy_cache;
} // namespace <anonmyous>
......@@ -46,62 +54,58 @@ namespace cppa { namespace detail {
actor_proxy_cache& get_actor_proxy_cache()
{
/*
if (s_proxy_cache.get() == nullptr)
{
s_proxy_cache.reset(new actor_proxy_cache);
}
return *s_proxy_cache;
*/
return s_proxy_cache;
}
process_information_ptr
actor_proxy_cache::get_pinfo(const actor_proxy_cache::key_tuple& key)
actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key)
{
auto i = m_pinfos.find(key);
if (i != m_pinfos.end())
// lifetime scope of shared guard
{
return i->second;
util::shared_lock_guard<util::shared_spinlock> guard{m_lock};
auto i = m_entries.find(key);
if (i != m_entries.end())
{
return i->second;
}
}
process_information_ptr tmp(new process_information(std::get<1>(key),
std::get<2>(key)));
m_pinfos.insert(std::make_pair(key, tmp));
return tmp;
}
actor_proxy_ptr actor_proxy_cache::get(const key_tuple& key)
{
auto i = m_proxies.find(key);
if (i != m_proxies.end())
actor_proxy_ptr result{new actor_proxy(std::get<0>(key), new process_information(std::get<1>(key), std::get<2>(key)))};
// lifetime scope of exclusive guard
{
return i->second;
lock_guard<util::shared_spinlock> guard{m_lock};
auto i = m_entries.find(key);
if (i != m_entries.end())
{
return i->second;
}
m_entries.insert(std::make_pair(key, result));
}
// get_pinfo(key) also inserts to m_pinfos
actor_proxy_ptr result(new actor_proxy(std::get<0>(key), get_pinfo(key)));
m_proxies.insert(std::make_pair(key, result));
if (m_new_cb) m_new_cb(result);
// insert to m_proxies
//result->enqueue(message(result, nullptr, atom("MONITOR")));
auto msg = make_any_tuple(atom("ADD_PROXY"), result);
singleton_manager::get_network_manager()->send_to_post_office(std::move(msg));
result->enqueue(nullptr, make_any_tuple(atom("MONITOR")));
result->attach_functor([result](std::uint32_t)
{
auto msg = make_any_tuple(atom("RM_PROXY"), result);
singleton_manager::get_network_manager()->send_to_post_office(std::move(msg));
});
return result;
}
void actor_proxy_cache::add(actor_proxy_ptr& pptr)
bool actor_proxy_cache::erase(actor_proxy_ptr const& pptr)
{
auto pinfo = pptr->parent_process_ptr();
key_tuple key(pptr->id(), pinfo->process_id(), pinfo->node_id());
m_pinfos.insert(std::make_pair(key, pptr->parent_process_ptr()));
m_proxies.insert(std::make_pair(key, pptr));
if (m_new_cb) m_new_cb(pptr);
}
void actor_proxy_cache::erase(const actor_proxy_ptr& pptr)
{
auto pinfo = pptr->parent_process_ptr();
key_tuple key(pptr->id(), pinfo->process_id(), pinfo->node_id());
m_proxies.erase(key);
}
size_t actor_proxy_cache::size() const
{
return m_proxies.size();
{
lock_guard<util::shared_spinlock> guard{m_lock};
return m_entries.erase(key) > 0;
}
return false;
}
} } // namespace cppa::detail
......@@ -31,6 +31,7 @@
#include <atomic>
#include <iostream>
#include "cppa/cppa.hpp"
#include "cppa/to_string.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/binary_serializer.hpp"
......@@ -51,80 +52,31 @@ using std::endl;
// implementation of mailman.hpp
namespace cppa { namespace detail {
mailman_job::mailman_job(process_information_ptr piptr,
const actor_ptr& from,
const channel_ptr& to,
const any_tuple& content)
: next(nullptr), m_type(send_job_type)
{
new (&m_send_job) mailman_send_job (piptr, from, to, content);
}
mailman_job::mailman_job(native_socket_type sockfd, const process_information_ptr& pinfo)
: next(0), m_type(add_peer_type)
{
new (&m_add_socket) mailman_add_peer (sockfd, pinfo);
}
mailman_job* mailman_job::kill_job()
{
return new mailman_job(kill_type);
}
mailman_job::~mailman_job()
{
switch (m_type)
{
case send_job_type:
{
m_send_job.~mailman_send_job();
break;
}
case add_peer_type:
{
m_add_socket.~mailman_add_peer();
break;
}
default:
{
// union doesn't contain a valid object
break;
}
}
}
// known issues: send() should be asynchronous and select() should be used
void mailman_loop()
{
bool done = false;
// serializes outgoing messages
binary_serializer bs;
// current active job
std::unique_ptr<mailman_job> job;
// caches mailman_queue()
auto& mqueue = mailman_queue();
// connected tcp peers
std::map<process_information, native_socket_type> peers;
for (;;)
{
job.reset(mqueue.pop());
if (job->is_send_job())
do_receive
(
on_arg_match >> [&](process_information_ptr target_peer, addressed_message msg)
{
mailman_send_job& sjob = job->send_job();
// forward message to receiver peer
auto peer_element = peers.find(*(sjob.target_peer));
if (peer_element != peers.end())
auto i = peers.find(*target_peer);
if (i != peers.end())
{
bool disconnect_peer = false;
auto peer = peer_element->second;
auto peer_fd = i->second;
try
{
bs << sjob.msg;
bs << msg;
auto size32 = static_cast<std::uint32_t>(bs.size());
DEBUG("--> " << to_string(sjob.msg));
DEBUG("--> " << to_string(msg));
// write size of serialized message
auto sent = ::send(peer, &size32, sizeof(std::uint32_t), 0);
auto sent = ::send(peer_fd, &size32, sizeof(std::uint32_t), 0);
if ( sent != static_cast<int>(sizeof(std::uint32_t))
|| static_cast<int>(bs.size()) != ::send(peer, bs.data(), bs.size(), 0))
|| static_cast<int>(bs.size()) != ::send(peer_fd, bs.data(), bs.size(), 0))
{
disconnect_peer = true;
DEBUG("too few bytes written");
......@@ -140,8 +92,8 @@ void mailman_loop()
{
DEBUG("peer disconnected (error during send)");
//closesocket(peer);
post_office_close_socket(peer);
peers.erase(peer_element);
post_office_close_socket(peer_fd);
peers.erase(i);
}
bs.reset();
}
......@@ -149,28 +101,33 @@ void mailman_loop()
{
DEBUG("message to an unknown peer");
}
// else: unknown peer
}
else if (job->is_add_peer_job())
},
on_arg_match >> [&](native_socket_type sockfd, process_information_ptr pinfo)
{
mailman_add_peer& pjob = job->add_peer_job();
auto i = peers.find(*(pjob.pinfo));
auto i = peers.find(*pinfo);
if (i == peers.end())
{
//cout << "mailman added " << pjob.pinfo->process_id() << "@"
// << to_string(pjob.pinfo->node_id()) << endl;
peers.insert(std::make_pair(*(pjob.pinfo), pjob.sockfd));
peers.insert(std::make_pair(*pinfo, sockfd));
}
else
{
DEBUG("add_peer_job failed: peer already known");
}
}
else if (job->is_kill_job())
},
on(atom("DONE")) >> [&]()
{
done = true;
},
others() >> [&]()
{
return;
std::string str = "unexpected message in post_office: ";
str += to_string(self->last_dequeued());
CPPA_CRITICAL(str.c_str());
}
}
)
.until(gref(done));
}
} } // namespace cppa::detail
......@@ -63,17 +63,31 @@ void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
cppa::detail::dec_actor_count();
}
void run_hidden_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
std::function<void()> what)
{
cppa::self.set(m_self.get());
try { what(); }
catch (...) { }
cppa::self.set(nullptr);
}
} // namespace <anonymous>
namespace cppa { namespace detail {
actor_ptr mock_scheduler::spawn(std::function<void()> what)
thread mock_scheduler::spawn_hidden_impl(std::function<void()> what, local_actor_ptr ctx)
{
return thread{run_hidden_actor, ctx, std::move(what)};
}
actor_ptr mock_scheduler::spawn_impl(std::function<void()> what)
{
inc_actor_count();
CPPA_MEMORY_BARRIER();
intrusive_ptr<local_actor> ctx(new detail::converted_thread_context);
thread(run_actor, ctx, std::move(what)).detach();
return ctx;
intrusive_ptr<local_actor> ctx{new detail::converted_thread_context};
thread{run_actor, ctx, std::move(what)}.detach();
return std::move(ctx);
}
actor_ptr mock_scheduler::spawn(scheduled_actor*)
......@@ -85,7 +99,7 @@ actor_ptr mock_scheduler::spawn(scheduled_actor*)
actor_ptr mock_scheduler::spawn(std::function<void()> what, scheduling_hint)
{
return spawn(std::move(what));
return spawn_impl(what);
}
void mock_scheduler::enqueue(scheduled_actor*)
......
......@@ -40,8 +40,10 @@
#include "cppa/detail/thread.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/converted_thread_context.hpp"
namespace {
......@@ -51,61 +53,37 @@ using namespace cppa::detail;
struct network_manager_impl : network_manager
{
typedef intrusive::single_reader_queue<post_office_msg> post_office_queue_t;
typedef intrusive::single_reader_queue<mailman_job> mailman_queue_t;
local_actor_ptr m_mailman;
thread m_mailman_thread;
int m_pipe[2]; // m_pipe[0]: read; m_pipe[1]: write
local_actor_ptr m_post_office;
thread m_post_office_thread;
mailman_queue_t m_mailman_queue;
post_office_queue_t m_post_office_queue;
thread m_loop; // post office thread
void start() /*override*/
void start() // override
{
if (pipe(m_pipe) != 0)
{
char* error_cstr = strerror(errno);
std::string error_str = "pipe(): ";
error_str += error_cstr;
free(error_cstr);
throw std::logic_error(error_str);
}
m_loop = thread(post_office_loop, m_pipe[0], m_pipe[1]);
}
m_post_office.reset(new converted_thread_context);
m_post_office_thread = mock_scheduler::spawn_hidden_impl(post_office_loop, m_post_office);
void write_to_pipe(pipe_msg const& what)
{
if (write(m_pipe[1], what, pipe_msg_size) != (int) pipe_msg_size)
{
std::cerr << "FATAL: cannot write to pipe" << std::endl;
abort();
}
}
inline int write_handle() const
{
return m_pipe[1];
m_mailman.reset(new converted_thread_context);
m_mailman_thread = mock_scheduler::spawn_hidden_impl(mailman_loop, m_mailman);
}
mailman_queue_t& mailman_queue()
void stop() // override
{
return m_mailman_queue;
m_post_office->enqueue(nullptr, make_any_tuple(atom("DONE")));
m_mailman->enqueue(nullptr, make_any_tuple(atom("DONE")));
m_post_office_thread.join();
m_mailman_thread.join();
}
post_office_queue_t& post_office_queue()
void send_to_post_office(any_tuple msg)
{
return m_post_office_queue;
m_post_office->enqueue(nullptr, std::move(msg));
}
void stop() /*override*/
void send_to_mailman(any_tuple msg)
{
pipe_msg msg = { shutdown_event, 0 };
write_to_pipe(msg);
// m_loop calls close(m_pipe[0])
m_loop.join();
close(m_pipe[0]);
close(m_pipe[1]);
m_mailman->enqueue(nullptr, std::move(msg));
}
};
......
This diff is collapsed.
......@@ -181,10 +181,7 @@ network_manager* singleton_manager::get_network_manager()
{
scheduler* s = new thread_pool_scheduler;
// set_scheduler sets s_network_manager
if (set_scheduler(s) == false)
{
//delete s;
}
set_scheduler(s);
return get_network_manager();
}
return result;
......
......@@ -263,21 +263,21 @@ actor_ptr thread_pool_scheduler::spawn(scheduled_actor* what)
actor_ptr thread_pool_scheduler::spawn(std::function<void()> what,
scheduling_hint hint)
{
if (hint == detached)
if (hint == scheduled)
{
return mock_scheduler::spawn(std::move(what));
auto new_actor = new yielding_actor(std::move(what));
return spawn_impl(new_actor->attach_to_scheduler(this));
}
else
{
auto new_actor = new yielding_actor(std::move(what));
return spawn_impl(new_actor->attach_to_scheduler(this));
return mock_scheduler::spawn_impl(std::move(what));
}
}
#else
actor_ptr thread_pool_scheduler::spawn(std::function<void()> what,
scheduling_hint)
scheduling_hint hint)
{
return mock_scheduler::spawn(what);
return mock_scheduler::spawn(what, hint);
}
#endif
......
......@@ -53,6 +53,7 @@
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/singleton_manager.hpp"
......@@ -132,10 +133,6 @@ void publish(actor_ptr& whom, std::uint16_t port)
{
throw network_error("unable to get socket flags");
}
if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) == -1)
{
throw network_error("unable to set socket to nonblocking");
}
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0)
{
throw bind_failure(errno);
......@@ -191,10 +188,11 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
read_from_socket(sockfd, peer_node_id.data(), peer_node_id.size());
auto peer_pinf = new process_information(peer_pid, peer_node_id);
process_information_ptr pinfptr(peer_pinf);
actor_proxy_ptr result(new actor_proxy(remote_actor_id, pinfptr));
detail::mailman_queue().push_back(new detail::mailman_job(sockfd, pinfptr));
detail::post_office_add_peer(sockfd, pinfptr, result,
std::unique_ptr<attachable>());
auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
auto result = detail::get_actor_proxy_cache().get(key);
detail::singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(sockfd, pinfptr));
detail::post_office_add_peer(sockfd, pinfptr);
//auto ptr = get_scheduler()->register_hidden_context();
return result;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment