Commit 7023a997 authored by neverlord's avatar neverlord

post_office

parent 49511085
...@@ -133,7 +133,7 @@ ...@@ -133,7 +133,7 @@
</data> </data>
<data> <data>
<variable>ProjectExplorer.Project.Updater.EnvironmentId</variable> <variable>ProjectExplorer.Project.Updater.EnvironmentId</variable>
<value type="QString">{00861904-8afe-4186-b958-756209cdf248}</value> <value type="QString">{07fcd197-092d-45a0-8500-3be614e6ae31}</value>
</data> </data>
<data> <data>
<variable>ProjectExplorer.Project.Updater.FileVersion</variable> <variable>ProjectExplorer.Project.Updater.FileVersion</variable>
......
...@@ -181,3 +181,6 @@ cppa/detail/mailman.hpp ...@@ -181,3 +181,6 @@ cppa/detail/mailman.hpp
src/mailman.cpp src/mailman.cpp
cppa/detail/native_socket.hpp cppa/detail/native_socket.hpp
src/native_socket.cpp src/native_socket.cpp
cppa/detail/post_office.hpp
src/post_office.cpp
cppa/detail/buffer.hpp
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "cppa/attachable.hpp" #include "cppa/attachable.hpp"
#include "cppa/process_information.hpp" #include "cppa/process_information.hpp"
#include "cppa/util/rm_ref.hpp"
#include "cppa/util/enable_if.hpp" #include "cppa/util/enable_if.hpp"
namespace cppa { namespace cppa {
...@@ -36,17 +37,20 @@ class actor : public channel ...@@ -36,17 +37,20 @@ class actor : public channel
~actor(); ~actor();
/** /**
* @brief Attaches @p ptr to this actor. * @brief Attaches @p ptr to this actor
* (the actor takes ownership of @p ptr).
* *
* @p ptr will be deleted if actor finished execution of immediately * The actor will call <tt>ptr->detach(...)</tt> on exit or immediately
* if the actor already exited. * if he already exited.
* *
* @return @c true if @p ptr was successfully attached to the actor; * @return @c true if @p ptr was successfully attached to the actor;
* otherwise (actor already exited) @p false. * otherwise (actor already exited) @p false.
*
*/ */
virtual bool attach(attachable* ptr) = 0; virtual bool attach(attachable* ptr) = 0;
template<typename F>
bool attach_functor(F&& ftor);
/** /**
* @brief Detaches the first attached object that matches @p what. * @brief Detaches the first attached object that matches @p what.
*/ */
...@@ -127,6 +131,30 @@ bool actor::attach(std::unique_ptr<T>&& ptr, ...@@ -127,6 +131,30 @@ bool actor::attach(std::unique_ptr<T>&& ptr,
return attach(static_cast<attachable*>(ptr.release())); return attach(static_cast<attachable*>(ptr.release()));
} }
template<class F>
class functor_attachable : public attachable
{
F m_functor;
public:
template<class FArg>
functor_attachable(FArg&& arg) : m_functor(std::forward<FArg>(arg)) { }
virtual void detach(std::uint32_t reason)
{
m_functor(reason);
}
};
template<typename F>
bool actor::attach_functor(F&& ftor)
{
typedef typename util::rm_ref<F>::type f_type;
return attach(new functor_attachable<f_type>(std::forward<F>(ftor)));
}
} // namespace cppa } // namespace cppa
......
...@@ -244,6 +244,8 @@ inline void await_all_others_done() ...@@ -244,6 +244,8 @@ inline void await_all_others_done()
/** /**
* @brief Publishes @p whom at given @p port. * @brief Publishes @p whom at given @p port.
*
* The connection is automatically closed if the lifetime of @p whom ends.
*/ */
void publish(actor_ptr& whom, std::uint16_t port); void publish(actor_ptr& whom, std::uint16_t port);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define ACTOR_PROXY_CACHE_HPP #define ACTOR_PROXY_CACHE_HPP
#include <string> #include <string>
#include <functional>
#include "cppa/actor_proxy.hpp" #include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp" #include "cppa/process_information.hpp"
...@@ -13,24 +14,38 @@ class actor_proxy_cache ...@@ -13,24 +14,38 @@ class actor_proxy_cache
public: public:
typedef std::tuple<std::uint32_t, std::uint32_t, typedef std::tuple<std::uint32_t, // actor id
process_information::node_id_type> key_tuple; std::uint32_t, // process id
process_information::node_id_type> // node id
key_tuple;
typedef std::function<void (actor_proxy_ptr&)> new_proxy_callback;
private: private:
std::map<key_tuple, process_information_ptr> m_pinfos; std::map<key_tuple, process_information_ptr> m_pinfos;
std::map<key_tuple, actor_proxy_ptr> m_proxies; std::map<key_tuple, actor_proxy_ptr> m_proxies;
new_proxy_callback m_new_cb;
process_information_ptr get_pinfo(const key_tuple& key); process_information_ptr get_pinfo(const key_tuple& key);
void add(const actor_proxy_ptr& pptr, const key_tuple& key);
public: public:
template<typename F>
void set_callback(F&& cb)
{
m_new_cb = std::forward<F>(cb);
}
actor_proxy_ptr get(const key_tuple& key); actor_proxy_ptr get(const key_tuple& key);
void add(const actor_proxy_ptr& pptr);
void add(actor_proxy_ptr& pptr);
size_t size() const; size_t size() const;
void erase(const actor_proxy_ptr& pptr);
template<typename F> template<typename F>
void for_each(F&& fun) void for_each(F&& fun)
{ {
......
#ifndef BUFFER_HPP
#define BUFFER_HPP
#include <ios> // std::ios_base::failure
#include <iostream>
#include <string.h>
#include "cppa/detail/native_socket.hpp"
namespace cppa { namespace detail {
template<size_t ChunkSize, size_t MaxBufferSize, typename DataType = char>
class buffer
{
DataType* m_data;
size_t m_written;
size_t m_allocated;
size_t m_final_size;
template<typename F>
bool append_impl(F&& fun, bool throw_on_error)
{
auto recv_result = fun();
if (recv_result == 0)
{
// connection closed
if (throw_on_error)
{
std::ios_base::failure("cannot read from a closed pipe/socket");
}
return false;
}
else if (recv_result < 0)
{
switch (errno)
{
case EAGAIN:
{
// rdflags or sfd is set to non-blocking,
// this is not treated as error
return true;
}
default:
{
// a "real" error occured;
if (throw_on_error)
{
char* cstr = strerror(errno);
std::string errmsg = cstr;
free(cstr);
throw std::ios_base::failure(std::move(errmsg));
}
return false;
}
}
}
inc_written(static_cast<size_t>(recv_result));
return true;
}
public:
buffer() : m_data(nullptr), m_written(0), m_allocated(0), m_final_size(0)
{
}
buffer(buffer&& other)
: m_data(other.m_data), m_written(other.m_written)
, m_allocated(other.m_allocated), m_final_size(other.m_final_size)
{
other.m_data = nullptr;
other.m_written = other.m_allocated = other.m_final_size = 0;
}
~buffer()
{
delete[] m_data;
}
void clear()
{
m_written = 0;
}
void reset(size_t new_final_size = 0)
{
m_written = 0;
m_final_size = new_final_size;
if (new_final_size > m_allocated)
{
if (new_final_size > MaxBufferSize)
{
throw std::ios_base::failure("maximum buffer size exceeded");
}
auto remainder = (new_final_size % ChunkSize);
if (remainder == 0)
{
m_allocated = new_final_size;
}
else
{
m_allocated = (new_final_size - remainder) + ChunkSize;
}
delete[] m_data;
m_data = new DataType[m_allocated];
}
}
bool ready()
{
return m_written == m_final_size;
}
// pointer to the current write position
DataType* wr_ptr()
{
return m_data + m_written;
}
size_t size()
{
return m_written;
}
size_t final_size()
{
return m_final_size;
}
size_t remaining()
{
return m_final_size - m_written;
}
void inc_written(size_t value)
{
m_written += value;
}
DataType* data()
{
return m_data;
}
bool append_from_file_descriptor(int fd, bool throw_on_error = false)
{
auto _this = this;
auto fun = [_this, fd]() -> int
{
return ::read(fd, _this->wr_ptr(), _this->remaining());
};
return append_impl(fun, throw_on_error);
}
bool append_from(native_socket_t sfd, int rdflags,
bool throw_on_error = false)
{
auto _this = this;
auto fun = [_this, sfd, rdflags]() -> int
{
return ::recv(sfd, _this->wr_ptr(), _this->remaining(), rdflags);
};
return append_impl(fun, throw_on_error);
}
};
} } // namespace cppa::detail
#endif // BUFFER_HPP
...@@ -14,7 +14,7 @@ class mock_scheduler : public scheduler ...@@ -14,7 +14,7 @@ class mock_scheduler : public scheduler
void register_converted_context(context*); void register_converted_context(context*);
//void unregister_converted_context(context*); //void unregister_converted_context(context*);
actor_ptr spawn(actor_behavior*, scheduling_hint); actor_ptr spawn(actor_behavior*, scheduling_hint);
std::unique_ptr<attachable> register_hidden_context(); attachable* register_hidden_context();
}; };
......
#ifndef POST_OFFICE_HPP
#define POST_OFFICE_HPP
#include <memory>
#include "cppa/actor_proxy.hpp"
#include "cppa/detail/native_socket.hpp"
namespace cppa { namespace detail {
void post_office_add_peer(native_socket_t peer_socket,
const process_information_ptr& peer_ptr,
const actor_proxy_ptr& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer);
void post_office_publish(native_socket_t server_socket,
const actor_ptr& published_actor);
void post_office_unpublish(std::uint32_t actor_id);
void post_office_close_socket(native_socket_t sfd);
//void post_office_unpublish(const actor_ptr& published_actor);
//void post_office_proxy_exited(const actor_proxy_ptr& proxy_ptr);
} } // namespace cppa::detail
#endif // POST_OFFICE_HPP
...@@ -64,6 +64,8 @@ class process_information : public ref_counted, ...@@ -64,6 +64,8 @@ class process_information : public ref_counted,
}; };
std::string to_string(const process_information& what);
typedef intrusive_ptr<process_information> process_information_ptr; typedef intrusive_ptr<process_information> process_information_ptr;
} // namespace cppa } // namespace cppa
......
...@@ -48,13 +48,7 @@ class scheduler ...@@ -48,13 +48,7 @@ class scheduler
* @return An {@link attachable} that the hidden context has to destroy * @return An {@link attachable} that the hidden context has to destroy
* if his lifetime ends. * if his lifetime ends.
*/ */
virtual std::unique_ptr<attachable> register_hidden_context() = 0; virtual attachable* register_hidden_context() = 0;
/**
* @brief Informs the scheduler that the convertex context @p what
* finished execution.
*/
//virtual void unregister_converted_context(context* what) = 0;
/** /**
* @brief Wait until all other actors finished execution. * @brief Wait until all other actors finished execution.
......
#include "cppa/atom.hpp" #include "cppa/atom.hpp"
#include "cppa/message.hpp" #include "cppa/message.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/actor_proxy.hpp" #include "cppa/actor_proxy.hpp"
#include "cppa/exit_reason.hpp" #include "cppa/exit_reason.hpp"
#include "cppa/detail/mailman.hpp"
namespace cppa { namespace cppa {
...@@ -9,12 +11,20 @@ actor_proxy::actor_proxy(std::uint32_t mid, const process_information_ptr& pptr) ...@@ -9,12 +11,20 @@ actor_proxy::actor_proxy(std::uint32_t mid, const process_information_ptr& pptr)
: super(mid), m_parent(pptr) : super(mid), m_parent(pptr)
{ {
if (!m_parent) throw std::runtime_error("parent == nullptr"); if (!m_parent) throw std::runtime_error("parent == nullptr");
attach(get_scheduler()->register_hidden_context());
} }
actor_proxy::actor_proxy(std::uint32_t mid, process_information_ptr&& pptr) actor_proxy::actor_proxy(std::uint32_t mid, process_information_ptr&& pptr)
: super(mid), m_parent(std::move(pptr)) : super(mid), m_parent(std::move(pptr))
{ {
if (!m_parent) throw std::runtime_error("parent == nullptr"); if (!m_parent) throw std::runtime_error("parent == nullptr");
attach(get_scheduler()->register_hidden_context());
}
void actor_proxy::forward_message(const process_information_ptr& piptr,
const message& msg)
{
detail::mailman_queue().push_back(new detail::mailman_job(piptr, msg));
} }
void actor_proxy::enqueue(const message& msg) void actor_proxy::enqueue(const message& msg)
......
...@@ -35,23 +35,27 @@ actor_proxy_ptr actor_proxy_cache::get(const key_tuple& key) ...@@ -35,23 +35,27 @@ actor_proxy_ptr actor_proxy_cache::get(const key_tuple& key)
} }
// get_pinfo(key) also inserts to m_pinfos // get_pinfo(key) also inserts to m_pinfos
actor_proxy_ptr result(new actor_proxy(std::get<0>(key), get_pinfo(key))); actor_proxy_ptr result(new actor_proxy(std::get<0>(key), get_pinfo(key)));
// insert to m_proxies
m_proxies.insert(std::make_pair(key, result)); m_proxies.insert(std::make_pair(key, result));
result->enqueue(message(result, nullptr, atom(":Monitor"))); if (m_new_cb) m_new_cb(result);
// insert to m_proxies
//result->enqueue(message(result, nullptr, atom(":Monitor")));
return result; return result;
} }
void actor_proxy_cache::add(const actor_proxy_ptr& pptr, const key_tuple& key) void actor_proxy_cache::add(actor_proxy_ptr& pptr)
{ {
auto pinfo = pptr->parent_process_ptr();
key_tuple key(pptr->id(), pinfo->process_id, pinfo->node_id);
m_pinfos.insert(std::make_pair(key, pptr->parent_process_ptr())); m_pinfos.insert(std::make_pair(key, pptr->parent_process_ptr()));
m_proxies.insert(std::make_pair(key, pptr)); m_proxies.insert(std::make_pair(key, pptr));
if (m_new_cb) m_new_cb(pptr);
} }
void actor_proxy_cache::add(const actor_proxy_ptr& pptr) void actor_proxy_cache::erase(const actor_proxy_ptr& pptr)
{ {
auto pinfo = pptr->parent_process_ptr(); auto pinfo = pptr->parent_process_ptr();
key_tuple key(pptr->id(), pinfo->process_id, pinfo->node_id); key_tuple key(pptr->id(), pinfo->process_id, pinfo->node_id);
add(pptr, key); m_proxies.erase(key);
} }
size_t actor_proxy_cache::size() const size_t actor_proxy_cache::size() const
......
#include <iostream>
#include "cppa/to_string.hpp"
#include "cppa/detail/mailman.hpp" #include "cppa/detail/mailman.hpp"
#include "cppa/binary_serializer.hpp" #include "cppa/binary_serializer.hpp"
#include "cppa/detail/post_office.hpp"
#define DEBUG(arg) std::cout << arg << std::endl
// forward declaration // forward declaration
namespace cppa { namespace detail { namespace { void mailman_loop(); } } } namespace cppa { namespace detail { namespace { void mailman_loop(); } } }
...@@ -82,13 +88,21 @@ mailman_job::~mailman_job() ...@@ -82,13 +88,21 @@ mailman_job::~mailman_job()
{ {
switch (m_type) switch (m_type)
{ {
case send_job_type: case send_job_type:
m_send_job.~mailman_send_job(); {
break; m_send_job.~mailman_send_job();
case add_peer_type: break;
m_add_socket.~mailman_add_peer(); }
break; case add_peer_type:
default: break; {
m_add_socket.~mailman_add_peer();
break;
}
case kill_type:
{
// union doesn't contain a valid object
break;
}
} }
} }
...@@ -102,8 +116,6 @@ util::single_reader_queue<mailman_job>& mailman_queue() ...@@ -102,8 +116,6 @@ util::single_reader_queue<mailman_job>& mailman_queue()
namespace cppa { namespace detail { namespace { namespace cppa { namespace detail { namespace {
void mailman_loop() void mailman_loop()
{ {
// send() flags
int flags = 0;
// serializes outgoing messages // serializes outgoing messages
binary_serializer bs; binary_serializer bs;
// current active job // current active job
...@@ -129,29 +141,55 @@ void mailman_loop() ...@@ -129,29 +141,55 @@ void mailman_loop()
{ {
bs << out_msg; bs << out_msg;
auto size32 = static_cast<std::uint32_t>(bs.size()); auto size32 = static_cast<std::uint32_t>(bs.size());
//cout << pself.process_id << " --> " << (to_string(out_msg) + "\n"); DEBUG("--> " << to_string(out_msg));
// write size of serialized message // write size of serialized message
auto sent = ::send(peer, &size32, sizeof(size32), flags); auto sent = ::send(peer, &size32, sizeof(std::uint32_t), 0);
if (sent <= 0) if (sent > 0)
{ {
// write message // write message
sent = ::send(peer, bs.data(), bs.size(), flags); sent = ::send(peer, bs.data(), bs.size(), 0);
} }
// disconnect peer if send() failed // disconnect peer if send() failed
disconnect_peer = (sent > 0); disconnect_peer = (sent <= 0);
if (sent <= 0)
{
if (sent == 0)
{
DEBUG("remote socket closed");
}
else
{
DEBUG("send() returned -1");
perror("send()");
}
}
else
{
if (sent != size32)
{
throw std::logic_error("WTF?!?");
}
}
} }
// something went wrong; close connection to this peer // something went wrong; close connection to this peer
catch (...) catch (std::exception& e)
{ {
DEBUG(to_uniform_name(typeid(e)) << ": " << e.what());
disconnect_peer = true; disconnect_peer = true;
} }
if (disconnect_peer) if (disconnect_peer)
{ {
closesocket(peer); DEBUG("peer disconnected (error during send)");
//closesocket(peer);
post_office_close_socket(peer);
peers.erase(peer_element); peers.erase(peer_element);
} }
bs.reset(); bs.reset();
} }
else
{
DEBUG("message to an unknown peer");
}
// else: unknown peer // else: unknown peer
} }
else if (job->is_add_peer_job()) else if (job->is_add_peer_job())
......
...@@ -85,10 +85,10 @@ void mock_scheduler::register_converted_context(context* ctx) ...@@ -85,10 +85,10 @@ void mock_scheduler::register_converted_context(context* ctx)
} }
} }
std::unique_ptr<attachable> mock_scheduler::register_hidden_context() attachable* mock_scheduler::register_hidden_context()
{ {
inc_actor_count(); inc_actor_count();
return std::unique_ptr<attachable>(new exit_observer); return new exit_observer;
} }
void mock_scheduler::await_others_done() void mock_scheduler::await_others_done()
......
#include "cppa/config.hpp"
#include <ios> // ios_base::failure #include <ios> // ios_base::failure
#include <errno.h> #include <errno.h>
#include <sstream> #include <sstream>
......
#include <new> // placement new
#include <ios> // ios_base::failure
#include <list> // std::list
#include <cstdint> // std::uint32_t
#include <iostream> // std::cout, std::endl
#include <exception> // std::logic_error
#include <algorithm> // std::find_if
#include <cstdio>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/types.h>
// used cppa classes
#include "cppa/to_string.hpp"
#include "cppa/deserializer.hpp"
#include "cppa/binary_deserializer.hpp"
// used cppa utility
#include "cppa/util/single_reader_queue.hpp"
// used cppa details
#include "cppa/detail/buffer.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
namespace cppa { namespace detail { namespace {
// allocate in 1KB chunks (minimize reallocations)
constexpr size_t s_chunk_size = 1024;
// allow up to 1MB per buffer
constexpr size_t s_max_buffer_size = (1024 * 1024);
static_assert((s_max_buffer_size % s_chunk_size) == 0,
"max_buffer_size is not a multiple of chunk_size");
static_assert(sizeof(native_socket_t) == sizeof(std::uint32_t),
"sizeof(native_socket_t) != sizeof(std::uint32_t)");
constexpr int s_rdflag = MSG_DONTWAIT;
constexpr std::uint32_t rd_queue_event = 0x00;
constexpr std::uint32_t unpublish_actor_event = 0x01;
constexpr std::uint32_t dec_socket_ref_event = 0x02;
constexpr std::uint32_t close_socket_event = 0x03;
constexpr std::uint32_t shutdown_event = 0x04;
typedef std::uint32_t pipe_msg[2];
constexpr size_t pipe_msg_size = 2 * sizeof(std::uint32_t);
#define DEBUG(arg) std::cout << arg << std::endl
struct add_peer_msg
{
native_socket_t sockfd;
process_information_ptr peer;
actor_proxy_ptr first_peer_actor;
std::unique_ptr<attachable> attachable_ptr;
add_peer_msg(native_socket_t peer_socket,
const process_information_ptr& peer_ptr,
const actor_proxy_ptr& peer_actor_ptr,
std::unique_ptr<attachable>&& peer_observer)
: sockfd(peer_socket)
, peer(peer_ptr)
, first_peer_actor(peer_actor_ptr)
, attachable_ptr(std::move(peer_observer))
{
}
};
struct add_server_socket_msg
{
native_socket_t server_sockfd;
actor_ptr published_actor;
add_server_socket_msg(native_socket_t ssockfd,
const actor_ptr& pub_actor)
: server_sockfd(ssockfd)
, published_actor(pub_actor)
{
}
};
class post_office_msg
{
friend class util::single_reader_queue<post_office_msg>;
post_office_msg* next;
bool m_is_add_peer_msg;
union
{
add_peer_msg m_add_peer_msg;
add_server_socket_msg m_add_server_socket;
};
public:
post_office_msg(native_socket_t arg0,
const process_information_ptr& arg1,
const actor_proxy_ptr& arg2,
std::unique_ptr<attachable>&& arg3)
: next(nullptr), m_is_add_peer_msg(true)
{
new (&m_add_peer_msg) add_peer_msg (arg0, arg1, arg2, std::move(arg3));
}
post_office_msg(native_socket_t arg0, const actor_ptr& arg1)
: next(nullptr), m_is_add_peer_msg(false)
{
new (&m_add_server_socket) add_server_socket_msg(arg0, arg1);
}
inline bool is_add_peer_msg() const
{
return m_is_add_peer_msg;
}
inline bool is_add_server_socket_msg() const
{
return !m_is_add_peer_msg;
}
inline add_peer_msg& as_add_peer_msg()
{
return m_add_peer_msg;
}
inline add_server_socket_msg& as_add_server_socket_msg()
{
return m_add_server_socket;
}
~post_office_msg()
{
if (m_is_add_peer_msg)
{
m_add_peer_msg.~add_peer_msg();
}
else
{
m_add_server_socket.~add_server_socket_msg();
}
}
};
void post_office_loop(int pipe_read_handle);
// static initialization and destruction
struct post_office_manager
{
typedef util::single_reader_queue<post_office_msg> queue_t;
// m_pipe[0] is for reading, m_pipe[1] is for writing
int m_pipe[2];
queue_t* m_queue;
boost::thread* m_loop;
post_office_manager()
{
//if (socketpair(AF_UNIX, SOCK_STREAM, 0, m_pipe) != 0)
if (pipe(m_pipe) != 0)
{
switch (errno)
{
case EFAULT:
{
throw std::logic_error("EFAULT: invalid pipe() argument");
}
case EMFILE:
{
throw std::logic_error("EMFILE: Too many file "
"descriptors in use");
}
case ENFILE:
{
throw std::logic_error("The system limit on the total "
"number of open files "
"has been reached");
}
default:
{
throw std::logic_error("unknown error");
}
}
}
/*
int flags = fcntl(m_pipe[0], F_GETFL, 0);
if (flags == -1 || fcntl(m_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1)
{
throw std::logic_error("unable to set pipe to nonblocking");
}
*/
/*
int flags = fcntl(m_pipe[0], F_GETFL, 0);
if (flags == -1 || fcntl(m_pipe[0], F_SETFL, flags | O_ASYNC) == -1)
{
throw std::logic_error("unable to set pipe to O_ASYNC");
}
*/
m_queue = new queue_t;
m_loop = new boost::thread(post_office_loop, m_pipe[0]);
}
int write_handle()
{
return m_pipe[1];
}
~post_office_manager()
{
std::cout << "~post_office_manager() ..." << std::endl;
pipe_msg msg = { shutdown_event, 0 };
write(write_handle(), msg, pipe_msg_size);
// m_loop calls close(m_pipe[0])
m_loop->join();
delete m_loop;
delete m_queue;
close(m_pipe[0]);
close(m_pipe[1]);
std::cout << "~post_office_manager() done" << std::endl;
}
}
s_po_manager;
class remote_observer : public attachable
{
process_information_ptr peer;
public:
remote_observer(const process_information_ptr& piptr) : peer(piptr)
{
}
void detach(std::uint32_t reason)
{
actor_ptr self_ptr = self();
message msg(self_ptr, self_ptr, atom(":KillProxy"), reason);
detail::mailman_queue().push_back(new detail::mailman_job(peer, msg));
}
};
void handle_message(const message& msg,
const std::type_info& atom_tinfo,
const process_information& pself,
const process_information_ptr& peer)
{
if ( msg.content().size() == 1
&& msg.content().utype_info_at(0) == atom_tinfo
&& *reinterpret_cast<const atom_value*>(msg.content().at(0))
== atom(":Monitor"))
{
DEBUG("<-- :Monitor");
actor_ptr sender = msg.sender();
if (sender->parent_process() == pself)
{
//cout << pinfo << " ':Monitor'; actor id = "
// << sender->id() << endl;
// local actor?
// this message was send from a proxy
sender->attach(new remote_observer(peer));
}
else
{
DEBUG(":Monitor received for an remote actor");
}
}
else
{
DEBUG("<-- " << to_string(msg));
auto r = msg.receiver();
if (r) r->enqueue(msg);
}
}
struct po_peer
{
enum state
{
// connection just established; waiting for process information
wait_for_process_info,
// wait for the size of the next message
wait_for_msg_size,
// currently reading a message
read_message,
// this po_peer is no longer a valid instance
moved
};
state m_state;
native_socket_t m_sockfd;
process_information_ptr m_peer;
std::unique_ptr<attachable> m_observer;
buffer<s_chunk_size, s_max_buffer_size> m_rdbuf;
bool m_has_parent;
native_socket_t m_parent;
// counts how many actors currently have a
// "reference" to this peer
size_t m_ref_count;
explicit po_peer(add_peer_msg& from)
: m_state(wait_for_msg_size)
, m_sockfd(from.sockfd)
, m_peer(std::move(from.peer))
, m_observer(std::move(from.attachable_ptr))
, m_has_parent(false)
, m_parent(-1)
, m_ref_count(0)
{
}
explicit po_peer(native_socket_t sockfd, native_socket_t parent_socket)
: m_state(wait_for_process_info)
, m_sockfd(sockfd)
, m_has_parent(true)
, m_parent(parent_socket)
// implicitly referenced by parent
, m_ref_count(1)
{
m_rdbuf.reset( sizeof(std::uint32_t)
+ process_information::node_id_size);
}
po_peer(po_peer&& other)
: m_state(other.m_state)
, m_sockfd(other.m_sockfd)
, m_peer(std::move(other.m_peer))
, m_observer(std::move(other.m_observer))
, m_rdbuf(std::move(other.m_rdbuf))
, m_has_parent(other.m_has_parent)
, m_parent(other.m_parent)
, m_ref_count(other.m_ref_count)
{
other.m_state = moved;
other.m_has_parent = false;
}
~po_peer()
{
if (m_state != moved)
{
closesocket(m_sockfd);
if (m_observer)
{
//m_observer->detach(exit_reason::remote_link_unreachable);
}
}
}
inline bool has_parent() const
{
return m_has_parent;
}
inline native_socket_t parent()
{
return m_parent;
}
// @return new reference count
size_t parent_exited(native_socket_t parent_socket)
{
if (has_parent() && parent() == parent_socket)
{
m_has_parent = false;
return --m_ref_count;
}
return m_ref_count;
}
size_t dec_ref_count()
{
return --m_ref_count;
}
void inc_ref_count()
{
++m_ref_count;
}
// @return false if an error occured; otherwise true
bool read_and_continue(const uniform_type_info* meta_msg,
const process_information& pself)
{
switch (m_state)
{
case wait_for_process_info:
{
if (!m_rdbuf.append_from(m_sockfd, s_rdflag)) return false;
if (m_rdbuf.ready() == false)
{
break;
}
else
{
m_peer.reset(new process_information);
// inform mailman about new peer
mailman_queue().push_back(new mailman_job(m_sockfd,
m_peer));
memcpy(&(m_peer->process_id),
m_rdbuf.data(),
sizeof(std::uint32_t));
memcpy(m_peer->node_id.data(),
m_rdbuf.data() + sizeof(std::uint32_t),
process_information::node_id_size);
m_rdbuf.reset();
m_state = wait_for_msg_size;
DEBUG("pinfo read: "
<< m_peer->process_id
<< "@"
<< m_peer->node_id_as_string());
// fall through and try to read more from socket
}
}
case wait_for_msg_size:
{
if (m_rdbuf.final_size() != sizeof(std::uint32_t))
{
m_rdbuf.reset(sizeof(std::uint32_t));
}
if (!m_rdbuf.append_from(m_sockfd, s_rdflag)) return false;
if (m_rdbuf.ready() == false)
{
break;
}
else
{
// read and set message size
std::uint32_t msg_size;
memcpy(&msg_size, m_rdbuf.data(), sizeof(std::uint32_t));
m_rdbuf.reset(msg_size);
m_state = read_message;
// fall through and try to read more from socket
}
}
case read_message:
{
if (!m_rdbuf.append_from(m_sockfd, s_rdflag)) return false;
if (m_rdbuf.ready())
{
message msg;
binary_deserializer bd(m_rdbuf.data(), m_rdbuf.size());
try
{
meta_msg->deserialize(&msg, &bd);
}
catch (std::exception& e)
{
DEBUG(to_uniform_name(typeid(e)) << ": " << e.what());
return false;
}
handle_message(msg, typeid(atom_value), pself, m_peer);
m_rdbuf.reset();
m_state = wait_for_msg_size;
}
break;
}
default:
{
throw std::logic_error("illegal state");
}
}
return true;
}
};
struct po_doorman
{
// server socket
bool m_valid;
native_socket_t ssockfd;
actor_ptr published_actor;
explicit po_doorman(add_server_socket_msg& assm)
: m_valid(true)
, ssockfd(assm.server_sockfd)
, published_actor(assm.published_actor)
{
}
po_doorman(po_doorman&& other)
: m_valid(true)
, ssockfd(other.ssockfd)
, published_actor(std::move(other.published_actor))
{
other.m_valid = false;
}
~po_doorman()
{
if (m_valid) closesocket(ssockfd);
}
// @return false if an error occured; otherwise true
bool read_and_continue(const process_information& pself,
std::list<po_peer>& peers)
{
sockaddr addr;
socklen_t addrlen;
auto sfd = ::accept(ssockfd, &addr, &addrlen);
if (sfd < 0)
{
switch (errno)
{
case EAGAIN:
# if EAGAIN != EWOULDBLOCK
case EWOULDBLOCK:
# endif
{
// just try again
return true;
}
default: return false;
}
}
auto id = published_actor->id();
::send(sfd, &id, sizeof(std::uint32_t), 0);
::send(sfd, &(pself.process_id), sizeof(std::uint32_t), 0);
::send(sfd, pself.node_id.data(), pself.node_id.size(), 0);
peers.push_back(po_peer(sfd, ssockfd));
DEBUG("socket accepted; published actor: " << id);
return true;
}
};
void post_office_loop(int pipe_read_handle)
{
// map of all published actors
std::map<std::uint32_t, std::list<po_doorman> > doormen;
// list of all connected peers
std::list<po_peer> peers;
// readset for select()
fd_set readset;
// maximum number of all socket descriptors
int maxfd = 0;
// cache some used global data
auto meta_msg = uniform_typeid<message>();
auto& pself = process_information::get();
// initialize variables
FD_ZERO(&readset);
maxfd = pipe_read_handle;
FD_SET(pipe_read_handle, &readset);
// keeps track about what peer we are iterating at this time
po_peer* selected_peer = nullptr;
buffer<pipe_msg_size, pipe_msg_size> pipe_msg_buf;
pipe_msg_buf.reset(pipe_msg_size);
// thread id of post_office
auto thread_id = boost::this_thread::get_id();
// if an actor calls its quit() handler in this thread,
// we 'catch' the released socket here
std::vector<native_socket_t> released_socks;
// functor that releases a socket descriptor
// returns true if an element was removed from peers
auto release_socket = [&](native_socket_t sockfd) -> bool
{
auto i = peers.begin();
auto end = peers.end();
while (i != end)
{
if (i->m_sockfd == sockfd)
{
if (i->dec_ref_count() == 0)
{
DEBUG("socket closed; last proxy exited");
peers.erase(i);
return true;
}
// exit loop
return false;
}
else
{
++i;
}
}
return false;
};
// initialize proxy cache
get_actor_proxy_cache().set_callback([&](actor_proxy_ptr& pptr)
{
pptr->enqueue(message(pptr, nullptr, atom(":Monitor")));
if (selected_peer == nullptr)
{
throw std::logic_error("selected_peer == nullptr");
}
selected_peer->inc_ref_count();
auto msock = selected_peer->m_sockfd;
pptr->attach_functor([msock, thread_id, &released_socks](std::uint32_t)
{
if (boost::this_thread::get_id() == thread_id)
{
released_socks.push_back(msock);
}
else
{
pipe_msg msg = { dec_socket_ref_event,
static_cast<std::uint32_t>(msock) };
write(s_po_manager.write_handle(), msg, pipe_msg_size);
}
});
});
for (;;)
{
//std::cout << __LINE__ << std::endl;
if (select(maxfd + 1, &readset, nullptr, nullptr, nullptr) < 0)
{
// must not happen
perror("select()");
exit(3);
}
//std::cout << __LINE__ << std::endl;
bool recalculate_readset = false;
// iterate over all peers; lifetime scope of i, end
{
auto i = peers.begin();
auto end = peers.end();
while (i != end)
{
if (FD_ISSET(i->m_sockfd, &readset))
{
selected_peer = &(*i);
//DEBUG("read message from peer");
if (i->read_and_continue(meta_msg, pself))
{
// no errors detected; next iteration
++i;
}
else
{
// peer detected an error; erase from list
DEBUG("connection to peer lost");
i = peers.erase(i);
recalculate_readset = true;
}
}
else
{
// next iteration
++i;
}
}
}
selected_peer = nullptr;
// new connections to accept?
for (auto& kvp : doormen)
{
auto& list = kvp.second;
auto i = list.begin();
auto end = list.end();
while (i != end)
{
if (FD_ISSET(i->ssockfd, &readset))
{
DEBUG("accept new socket...");
if (i->read_and_continue(pself, peers))
{
DEBUG("ok");
++i;
}
else
{
DEBUG("failed; erased doorman");
i = list.erase(i);
}
recalculate_readset = true;
}
else
{
++i;
}
}
}
// read events from pipe
if (FD_ISSET(pipe_read_handle, &readset))
{
//pipe_msg pmsg;
//pipe_msg_buf.append_from_file_descriptor(pipe_read_handle, true);
//while (pipe_msg_buf.ready())
//{
pipe_msg pmsg;
//memcpy(pmsg, pipe_msg_buf.data(), pipe_msg_buf.size());
//pipe_msg_buf.clear();
::read(pipe_read_handle, &pmsg, pipe_msg_size);
switch (pmsg[0])
{
case rd_queue_event:
{
DEBUG("rd_queue_event");
post_office_msg* pom = s_po_manager.m_queue->pop();
if (pom->is_add_peer_msg())
{
auto& apm = pom->as_add_peer_msg();
actor_proxy_ptr pptr = apm.first_peer_actor;
po_peer pd(apm);
selected_peer = &pd;
if (pptr)
{
DEBUG("proxy added via post_office_msg");
get_actor_proxy_cache().add(pptr);
}
selected_peer = nullptr;
peers.push_back(std::move(pd));
recalculate_readset = true;
DEBUG("new peer (remote_actor)");
}
else
{
auto& assm = pom->as_add_server_socket_msg();
auto& pactor = assm.published_actor;
if (!pactor)
{
throw std::logic_error("nullptr published");
}
auto actor_id = pactor->id();
auto callback = [actor_id](std::uint32_t)
{
DEBUG("call post_office_unpublish() ...");
post_office_unpublish(actor_id);
};
if (pactor->attach_functor(std::move(callback)))
{
auto& dm = doormen[actor_id];
dm.push_back(po_doorman(assm));
recalculate_readset = true;
DEBUG("new doorman");
}
// else: actor already exited!
}
delete pom;
break;
}
case unpublish_actor_event:
{
DEBUG("unpublish_actor_event");
auto kvp = doormen.find(pmsg[1]);
if (kvp != doormen.end())
{
for (po_doorman& dm : kvp->second)
{
auto i = peers.begin();
auto end = peers.end();
while (i != end)
{
if (i->parent_exited(dm.ssockfd) == 0)
{
DEBUG("socket closed; parent exited");
i = peers.erase(i);
}
else
{
++i;
}
}
}
doormen.erase(kvp);
recalculate_readset = true;
}
break;
}
case dec_socket_ref_event:
{
auto sockfd = static_cast<native_socket_t>(pmsg[1]);
if (release_socket(sockfd))
{
recalculate_readset = true;
}
break;
}
case shutdown_event:
{
// goodbye
DEBUG("case shutdown_event");
close(pipe_read_handle);
return;
}
default: throw std::logic_error("unexpected event type");
}
// next iteration?
//pipe_msg_buf.append_from_file_descriptor(pipe_read_handle,
// true);
//}
}
if (released_socks.empty() == false)
{
for (native_socket_t sockfd : released_socks)
{
if (release_socket(sockfd))
{
recalculate_readset = true;
}
}
}
// recalculate readset if needed
if (recalculate_readset)
{
//DEBUG("recalculate readset");
FD_ZERO(&readset);
FD_SET(pipe_read_handle, &readset);
maxfd = pipe_read_handle;
for (po_peer& pd : peers)
{
auto fd = pd.m_sockfd;
if (fd > maxfd) maxfd = fd;
FD_SET(fd, &readset);
}
// iterate over key value pairs
for (auto& kvp : doormen)
{
// iterate over value (= list of doormen)
for (auto& dm : kvp.second)
{
auto fd = dm.ssockfd;
if (fd > maxfd) maxfd = fd;
FD_SET(fd, &readset);
}
}
}
}
}
} } } // namespace cppa::detail::<anonmyous>
namespace cppa { namespace detail {
void post_office_add_peer(native_socket_t a0,
const process_information_ptr& a1,
const actor_proxy_ptr& a2,
std::unique_ptr<attachable>&& a3)
{
DEBUG(__FUNCTION__);
s_po_manager.m_queue->push_back(new post_office_msg(a0, a1, a2,
std::move(a3)));
pipe_msg msg = { rd_queue_event, 0 };
write(s_po_manager.write_handle(), msg, pipe_msg_size);
}
void post_office_publish(native_socket_t server_socket,
const actor_ptr& published_actor)
{
DEBUG(__FUNCTION__ << "(..., " << published_actor->id() << ")");
s_po_manager.m_queue->push_back(new post_office_msg(server_socket,
published_actor));
pipe_msg msg = { rd_queue_event, 0 };
write(s_po_manager.write_handle(), msg, pipe_msg_size);
}
void post_office_unpublish(std::uint32_t actor_id)
{
DEBUG(__FUNCTION__ << "(" << actor_id << ")");
pipe_msg msg = { unpublish_actor_event, actor_id };
write(s_po_manager.write_handle(), msg, pipe_msg_size);
}
void post_office_close_socket(native_socket_t sfd)
{
DEBUG(__FUNCTION__ << "(...)");
pipe_msg msg = { close_socket_event, static_cast<std::uint32_t>(sfd) };
write(s_po_manager.write_handle(), msg, pipe_msg_size);
}
//void post_office_unpublish(const actor_ptr& published_actor)
//{
// pipe_msg msg = { unpublish_actor_event, published_actor->id() };
// write(s_po_manager.write_handle(), msg, pipe_msg_size);
//}
//void post_office_proxy_exited(const actor_proxy_ptr& proxy_ptr)
//{
// std::uint32_t
//}
} } // namespace cppa::detail
...@@ -183,4 +183,11 @@ void process_information::node_id_from_string(const std::string& str, ...@@ -183,4 +183,11 @@ void process_information::node_id_from_string(const std::string& str,
} }
} }
std::string to_string(const process_information& what)
{
std::ostringstream oss;
oss << what.process_id << "@" << what.node_id_as_string();
return oss.str();
}
} // namespace cppa } // namespace cppa
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <iostream> #include <iostream>
#include <stdexcept> #include <stdexcept>
#include <boost/thread.hpp> #include <fcntl.h>
//#include <boost/thread.hpp>
#include "cppa/cppa.hpp" #include "cppa/cppa.hpp"
#include "cppa/atom.hpp" #include "cppa/atom.hpp"
...@@ -20,153 +21,35 @@ ...@@ -20,153 +21,35 @@
#include "cppa/util/single_reader_queue.hpp" #include "cppa/util/single_reader_queue.hpp"
#include "cppa/detail/mailman.hpp" #include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/native_socket.hpp" #include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_proxy_cache.hpp" #include "cppa/detail/actor_proxy_cache.hpp"
using std::cout; using std::cout;
using std::endl; using std::endl;
using cppa::detail::mailman_job; //using cppa::detail::mailman_job;
using cppa::detail::mailman_queue; //using cppa::detail::mailman_queue;
using cppa::detail::native_socket_t; using cppa::detail::native_socket_t;
using cppa::detail::get_actor_proxy_cache; //using cppa::detail::get_actor_proxy_cache;
namespace cppa { namespace cppa {
namespace { namespace {
/*
// a map that manages links between local actors and remote actors (proxies) // a map that manages links between local actors and remote actors (proxies)
typedef std::map<actor_ptr, std::list<actor_proxy_ptr> > link_map; typedef std::map<actor_ptr, std::list<actor_proxy_ptr> > link_map;
class remote_observer : public attachable std::string pid_as_string(const process_information& pinf)
{ {
return to_string(pinf);
process_information_ptr peer;
public:
remote_observer(const process_information_ptr& piptr) : peer(piptr)
{
}
void detach(std::uint32_t reason)
{
actor_ptr self_ptr = self();
message msg(self_ptr, self_ptr, atom(":KillProxy"), reason);
detail::mailman_queue().push_back(new detail::mailman_job(peer, msg));
}
};
template<typename T>
T& operator<<(T& o, const process_information& pinfo)
{
return (o << pinfo.process_id << "@" << pinfo.node_id_as_string());
}
void read_from_socket(native_socket_t sfd, void* buf, size_t buf_size)
{
char* cbuf = reinterpret_cast<char*>(buf);
size_t read_bytes = 0;
size_t left = buf_size;
int rres = 0;
size_t urres = 0;
do
{
rres = ::recv(sfd, cbuf + read_bytes, left, 0);
if (rres <= 0)
{
throw std::ios_base::failure("cannot read from closed socket");
}
urres = static_cast<size_t>(rres);
read_bytes += urres;
left -= urres;
}
while (urres < left);
} }
// handles *one* socket / peer std::string pid_as_string()
void post_office_loop(native_socket_t socket_fd,
process_information_ptr peer,
actor_proxy_ptr aptr,
attachable* attachable_ptr)
{ {
//cout << "--> post_office_loop; self() = " return pid_as_string(process_information::get());
// << process_information::get()
// << ", peer = "
// << *peer
// << endl;
// destroys attachable_ptr if the function scope is leaved
std::unique_ptr<attachable> exit_guard(attachable_ptr);
if (aptr) detail::get_actor_proxy_cache().add(aptr);
message msg;
std::uint32_t rsize;
char* buf = nullptr;
size_t buf_size = 0;
size_t buf_allocated = 0;
auto meta_msg = uniform_typeid<message>();
const std::type_info& atom_tinfo = typeid(atom_value);
auto& pself = process_information::get();
try
{
for (;;)
{
read_from_socket(socket_fd, &rsize, sizeof(rsize));
if (buf_allocated < rsize)
{
// release old memory
delete[] buf;
// always allocate 1KB chunks
buf_allocated = 1024;
while (buf_allocated <= rsize)
{
buf_allocated += 1024;
}
buf = new char[buf_allocated];
}
buf_size = rsize;
//cout << "[" << pinfo << "] read " << rsize << " bytes" << endl;
read_from_socket(socket_fd, buf, buf_size);
binary_deserializer bd(buf, buf_size);
meta_msg->deserialize(&msg, &bd);
cout << pself.process_id << " <-- " << (to_string(msg) + "\n");
if ( msg.content().size() == 1
&& msg.content().utype_info_at(0) == atom_tinfo
&& *reinterpret_cast<const atom_value*>(msg.content().at(0))
== atom(":Monitor"))
{
actor_ptr sender = msg.sender();
if (sender->parent_process() == pself)
{
//cout << pinfo << " ':Monitor'; actor id = "
// << sender->id() << endl;
// local actor?
// this message was send from a proxy
sender->attach(new remote_observer(peer));
}
}
else
{
auto r = msg.receiver();
if (r) r->enqueue(msg);
}
}
}
catch (std::exception& e)
{
cout << "[" << process_information::get() << "] "
<< detail::to_uniform_name(typeid(e)) << ": "
<< e.what() << endl;
}
cout << "kill " << detail::actor_proxy_cache().size() << " proxies" << endl;
detail::actor_proxy_cache().for_each([](actor_proxy_ptr& pptr)
{
cout << "send :KillProxy message" << endl;
if (pptr) pptr->enqueue(message(nullptr, pptr, atom(":KillProxy"),
exit_reason::remote_link_unreachable));
});
cout << "[" << process_information::get() << "] ~post_office_loop"
<< endl;
} }
struct mm_worker struct mm_worker
...@@ -186,10 +69,10 @@ struct mm_worker ...@@ -186,10 +69,10 @@ struct mm_worker
~mm_worker() ~mm_worker()
{ {
cout << "=> [" << process_information::get() << "]::~mm_worker()" << endl; cout << "=> [" << pid_as_string() << "]::~mm_worker()" << endl;
detail::closesocket(m_sockfd); detail::closesocket(m_sockfd);
m_thread.join(); m_thread.join();
cout << "<= [" << process_information::get() << "]::~mm_worker()" << endl; cout << "<= [" << pid_as_string() << "]::~mm_worker()" << endl;
} }
}; };
...@@ -270,14 +153,54 @@ void middle_man_loop(native_socket_t server_socket_fd, ...@@ -270,14 +153,54 @@ void middle_man_loop(native_socket_t server_socket_fd,
barrier->wait(); barrier->wait();
//cout << "middle_man_loop finished\n"; //cout << "middle_man_loop finished\n";
} }
*/
void read_from_socket(native_socket_t sfd, void* buf, size_t buf_size)
{
char* cbuf = reinterpret_cast<char*>(buf);
size_t read_bytes = 0;
size_t left = buf_size;
int rres = 0;
size_t urres = 0;
do
{
rres = ::recv(sfd, cbuf + read_bytes, left, 0);
if (rres <= 0)
{
throw std::ios_base::failure("cannot read from closed socket");
}
urres = static_cast<size_t>(rres);
read_bytes += urres;
left -= urres;
}
while (urres < left);
}
} // namespace <anonmyous> } // namespace <anonmyous>
void actor_proxy::forward_message(const process_information_ptr& piptr, struct socket_guard
const message& msg)
{ {
mailman_queue().push_back(new mailman_job(piptr, msg));
} bool m_released;
detail::native_socket_t m_socket;
public:
socket_guard(detail::native_socket_t sfd) : m_released(false), m_socket(sfd)
{
}
~socket_guard()
{
if (!m_released) detail::closesocket(m_socket);
}
void release()
{
m_released = true;
}
};
void publish(actor_ptr& whom, std::uint16_t port) void publish(actor_ptr& whom, std::uint16_t port)
{ {
...@@ -289,10 +212,21 @@ void publish(actor_ptr& whom, std::uint16_t port) ...@@ -289,10 +212,21 @@ void publish(actor_ptr& whom, std::uint16_t port)
{ {
throw network_exception("could not create server socket"); throw network_exception("could not create server socket");
} }
// closes the socket if an exception occurs
socket_guard sguard(sockfd);
memset((char*) &serv_addr, 0, sizeof(serv_addr)); memset((char*) &serv_addr, 0, sizeof(serv_addr));
serv_addr.sin_family = AF_INET; serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY; serv_addr.sin_addr.s_addr = INADDR_ANY;
serv_addr.sin_port = htons(port); serv_addr.sin_port = htons(port);
int flags = fcntl(sockfd, F_GETFL, 0);
if (flags == -1)
{
throw network_exception("unable to get socket flags");
}
if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) == -1)
{
throw network_exception("unable to set socket to nonblocking");
}
if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0) if (bind(sockfd, (struct sockaddr*) &serv_addr, sizeof(serv_addr)) < 0)
{ {
throw bind_failure(errno); throw bind_failure(errno);
...@@ -301,9 +235,12 @@ void publish(actor_ptr& whom, std::uint16_t port) ...@@ -301,9 +235,12 @@ void publish(actor_ptr& whom, std::uint16_t port)
{ {
throw network_exception("listen() failed"); throw network_exception("listen() failed");
} }
intrusive_ptr<shared_barrier> barrier_ptr(new shared_barrier); // ok, no exceptions
boost::thread(middle_man_loop, sockfd, whom, barrier_ptr).detach(); sguard.release();
whom->attach(new mm_handle(sockfd, barrier_ptr)); detail::post_office_publish(sockfd, whom);
//intrusive_ptr<shared_barrier> barrier_ptr(new shared_barrier);
//boost::thread(middle_man_loop, sockfd, whom, barrier_ptr).detach();
//whom->attach(new mm_handle(sockfd, barrier_ptr));
} }
void publish(actor_ptr&& whom, std::uint16_t port) void publish(actor_ptr&& whom, std::uint16_t port)
...@@ -350,10 +287,12 @@ actor_ptr remote_actor(const char* host, std::uint16_t port) ...@@ -350,10 +287,12 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
peer_pinf->node_id.size()); peer_pinf->node_id.size());
process_information_ptr pinfptr(peer_pinf); process_information_ptr pinfptr(peer_pinf);
actor_proxy_ptr result(new actor_proxy(remote_actor_id, pinfptr)); actor_proxy_ptr result(new actor_proxy(remote_actor_id, pinfptr));
mailman_queue().push_back(new mailman_job(sockfd, pinfptr)); detail::mailman_queue().push_back(new detail::mailman_job(sockfd, pinfptr));
auto ptr = get_scheduler()->register_hidden_context(); detail::post_office_add_peer(sockfd, pinfptr, result,
boost::thread(post_office_loop, sockfd, std::unique_ptr<attachable>());
peer_pinf, result, ptr.release()).detach(); //auto ptr = get_scheduler()->register_hidden_context();
//boost::thread(post_office_loop, sockfd,
// peer_pinf, result, ptr.release()).detach();
return result; return result;
} }
......
...@@ -101,7 +101,7 @@ int main(int argc, char** c_argv) ...@@ -101,7 +101,7 @@ int main(int argc, char** c_argv)
RUN_TEST(test__spawn); RUN_TEST(test__spawn);
RUN_TEST(test__local_group); RUN_TEST(test__local_group);
RUN_TEST(test__atom); RUN_TEST(test__atom);
RUN_TEST_A3(test__remote_actor, c_argv[0], false, argv); //RUN_TEST_A3(test__remote_actor, c_argv[0], false, argv);
cout << endl cout << endl
<< "error(s) in all tests: " << errors << "error(s) in all tests: " << errors
<< endl; << endl;
......
...@@ -46,7 +46,7 @@ size_t test__remote_actor(const char* app_path, bool is_client, ...@@ -46,7 +46,7 @@ size_t test__remote_actor(const char* app_path, bool is_client,
auto ping_actor = spawn(ping); auto ping_actor = spawn(ping);
std::uint16_t port = 4242; std::uint16_t port = 4242;
bool success = false; bool success = false;
while (!success) do
{ {
try try
{ {
...@@ -59,22 +59,24 @@ size_t test__remote_actor(const char* app_path, bool is_client, ...@@ -59,22 +59,24 @@ size_t test__remote_actor(const char* app_path, bool is_client,
++port; ++port;
} }
} }
while (!success);
cout << "port = " << port << endl;
std::string cmd; std::string cmd;
{ {
std::ostringstream oss; std::ostringstream oss;
oss << app_path << " test__remote_actor " << port;// << " &>/dev/null"; oss << app_path << " test__remote_actor " << port << " &>/dev/null";
cmd = oss.str(); cmd = oss.str();
} }
// execute client_part() in a separate process, // execute client_part() in a separate process,
// connected via localhost socket // connected via localhost socket
boost::thread child([&cmd]() { system(cmd.c_str()); }); //boost::thread child([&cmd]() { system(cmd.c_str()); });
cout << __LINE__ << endl; cout << __LINE__ << endl;
await_all_others_done(); await_all_others_done();
cout << __LINE__ << endl; cout << __LINE__ << endl;
CPPA_CHECK_EQUAL(pongs(), 5); CPPA_CHECK_EQUAL(pongs(), 5);
// wait until separate process (in sep. thread) finished execution // wait until separate process (in sep. thread) finished execution
cout << __LINE__ << endl; cout << __LINE__ << endl;
child.join(); //child.join();
cout << __LINE__ << endl; cout << __LINE__ << endl;
return CPPA_TEST_RESULT; return CPPA_TEST_RESULT;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment