Commit 49511085 authored by neverlord's avatar neverlord

moved mailman to own .cpp file

parent 63b14c49
......@@ -42,14 +42,12 @@
<value key="ProjectExplorer.Target.ActiveDeployConfiguration" type="int">0</value>
<value key="ProjectExplorer.Target.ActiveRunConfiguration" type="int">0</value>
<valuemap key="ProjectExplorer.Target.BuildConfiguration.0" type="QVariantMap">
<value key="GenericProjectManager.GenericBuildConfiguration.BuildDirectory" type="QString">/Users/neverlord/libcppa</value>
<value key="ProjectExplorer.BuildCOnfiguration.ToolChain" type="QString">ProjectExplorer.ToolChain.Gcc:/usr/bin/g++.x86-macos-generic-mach_o-64bit.</value>
<value key="GenericProjectManager.GenericBuildConfiguration.BuildDirectory" type="QString">/home/neverlord/libcppa</value>
<value key="ProjectExplorer.BuildCOnfiguration.ToolChain" type="QString">INVALID</value>
<valuemap key="ProjectExplorer.BuildConfiguration.BuildStepList.0" type="QVariantMap">
<valuemap key="ProjectExplorer.BuildStepList.Step.0" type="QVariantMap">
<valuelist key="GenericProjectManager.GenericMakeStep.BuildTargets" type="QVariantList">
<value type="QString">all</value>
</valuelist>
<value key="GenericProjectManager.GenericMakeStep.MakeArguments" type="QString">-j 2</value>
<valuelist key="GenericProjectManager.GenericMakeStep.BuildTargets" type="QVariantList"/>
<value key="GenericProjectManager.GenericMakeStep.MakeArguments" type="QString"></value>
<value key="GenericProjectManager.GenericMakeStep.MakeCommand" type="QString"></value>
<value key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName" type="QString">Make</value>
<value key="ProjectExplorer.ProjectConfiguration.DisplayName" type="QString"></value>
......@@ -135,7 +133,7 @@
</data>
<data>
<variable>ProjectExplorer.Project.Updater.EnvironmentId</variable>
<value type="QString">{07fcd197-092d-45a0-8500-3be614e6ae31}</value>
<value type="QString">{00861904-8afe-4186-b958-756209cdf248}</value>
</data>
<data>
<variable>ProjectExplorer.Project.Updater.FileVersion</variable>
......
......@@ -177,3 +177,7 @@ src/atom.cpp
src/cppa.cpp
cppa/exit_reason.hpp
cppa/detail/abstract_actor.hpp
cppa/detail/mailman.hpp
src/mailman.cpp
cppa/detail/native_socket.hpp
src/native_socket.cpp
/Users/neverlord/libcppa
/home/neverlord/libcppa
/opt/local/include/gcc46/c++
/opt/local/include/
/Users/neverlord/libcppa/unit_testing
/home/neverlord/libcppa/unit_testing
......@@ -77,7 +77,10 @@ class abstract_actor /*[[base_check]]*/ : public Base
//abstract_actor() : Base() { }
template<typename... Args>
abstract_actor(const Args&... args) : Base(args...), m_exit_reason(0) { }
abstract_actor(const Args&... args) : Base(args...)
, m_exit_reason(exit_reason::not_exited)
{
}
void cleanup(std::uint32_t reason)
{
......
......@@ -29,6 +29,17 @@ class actor_proxy_cache
actor_proxy_ptr get(const key_tuple& key);
void add(const actor_proxy_ptr& pptr);
size_t size() const;
template<typename F>
void for_each(F&& fun)
{
for (auto i = m_proxies.begin(); i != m_proxies.end(); ++i)
{
fun(i->second);
}
}
};
// get the thread-local cache object
......
#ifndef MAILMAN_HPP
#define MAILMAN_HPP
#include "cppa/message.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/util/single_reader_queue.hpp"
namespace cppa { namespace detail {
struct mailman_send_job
{
process_information_ptr target_peer;
message original_message;
mailman_send_job(actor_proxy_ptr apptr, message msg);
mailman_send_job(process_information_ptr peer, message msg);
};
struct mailman_add_peer
{
native_socket_t sockfd;
process_information_ptr pinfo;
mailman_add_peer(native_socket_t sfd, const process_information_ptr& pinf);
};
class mailman_job
{
friend class util::single_reader_queue<mailman_job>;
public:
enum job_type
{
send_job_type,
add_peer_type,
kill_type
};
mailman_job(process_information_ptr piptr, const message& omsg);
mailman_job(actor_proxy_ptr apptr, const message& omsg);
mailman_job(native_socket_t sockfd, const process_information_ptr& pinfo);
static mailman_job* kill_job();
~mailman_job();
inline mailman_send_job& send_job()
{
return m_send_job;
}
inline mailman_add_peer& add_peer_job()
{
return m_add_socket;
}
inline job_type type() const
{
return m_type;
}
inline bool is_send_job() const
{
return m_type == send_job_type;
}
inline bool is_add_peer_job() const
{
return m_type == add_peer_type;
}
inline bool is_kill_job() const
{
return m_type == kill_type;
}
private:
mailman_job* next;
job_type m_type;
// unrestricted union
union
{
mailman_send_job m_send_job;
mailman_add_peer m_add_socket;
};
mailman_job(job_type jt);
};
util::single_reader_queue<mailman_job>& mailman_queue();
}} // namespace cppa::detail
#endif // MAILMAN_HPP
......@@ -14,6 +14,7 @@ class mock_scheduler : public scheduler
void register_converted_context(context*);
//void unregister_converted_context(context*);
actor_ptr spawn(actor_behavior*, scheduling_hint);
std::unique_ptr<attachable> register_hidden_context();
};
......
#ifndef NATIVE_SOCKET_HPP
#define NATIVE_SOCKET_HPP
#include "cppa/config.hpp"
#ifdef CPPA_WINDOWS
#else
# include <netdb.h>
# include <unistd.h>
# include <sys/types.h>
# include <sys/socket.h>
# include <netinet/in.h>
#endif
namespace cppa { namespace detail {
#ifdef CPPA_WINDOWS
typedef SOCKET native_socket_t;
typedef const char* socket_send_ptr;
typedef char* socket_recv_ptr;
#else
typedef int native_socket_t;
typedef const void* socket_send_ptr;
typedef void* socket_recv_ptr;
void closesocket(native_socket_t s);
#endif
} } // namespace cppa::detail
#endif // NATIVE_SOCKET_HPP
#ifndef SCHEDULER_HPP
#define SCHEDULER_HPP
#include <memory>
#include "cppa/actor.hpp"
#include "cppa/attachable.hpp"
#include "cppa/scheduling_hint.hpp"
namespace cppa {
......@@ -39,6 +42,14 @@ class scheduler
*/
virtual void register_converted_context(context* what) = 0;
/**
* @brief Informs the scheduler about a hidden (non-actor)
* context that should be counted by await_others_done().
* @return An {@link attachable} that the hidden context has to destroy
* if his lifetime ends.
*/
virtual std::unique_ptr<attachable> register_hidden_context() = 0;
/**
* @brief Informs the scheduler that the convertex context @p what
* finished execution.
......
......@@ -33,9 +33,11 @@ actor_proxy_ptr actor_proxy_cache::get(const key_tuple& key)
{
return i->second;
}
// get_pinfo(key) also inserts to m_pinfos
actor_proxy_ptr result(new actor_proxy(std::get<0>(key), get_pinfo(key)));
// insert to m_proxies
m_proxies.insert(std::make_pair(key, result));
result->enqueue(message(result, nullptr, atom(":Monitor")));
add(result);
return result;
}
......@@ -52,6 +54,11 @@ void actor_proxy_cache::add(const actor_proxy_ptr& pptr)
add(pptr, key);
}
size_t actor_proxy_cache::size() const
{
return m_proxies.size();
}
actor_proxy_cache& get_actor_proxy_cache()
{
if (t_proxy_cache.get() == nullptr)
......
......@@ -9,7 +9,7 @@ namespace cppa { namespace detail {
void converted_thread_context::quit(std::uint32_t reason)
{
super::cleanup(reason);
try { super::cleanup(reason); } catch(...) { }
// actor_exited should not be catched, but if anyone does,
// the next call to self() must return a newly created instance
set_self(nullptr);
......
#include "cppa/detail/mailman.hpp"
#include "cppa/binary_serializer.hpp"
// forward declaration
namespace cppa { namespace detail { namespace { void mailman_loop(); } } }
// static helper
namespace {
struct mailman_manager
{
typedef cppa::util::single_reader_queue<cppa::detail::mailman_job> queue_t;
boost::thread* m_loop;
queue_t* m_queue;
mailman_manager()
{
m_queue = new queue_t;
m_loop = new boost::thread(cppa::detail::mailman_loop);
}
~mailman_manager()
{
m_queue->push_back(cppa::detail::mailman_job::kill_job());
m_loop->join();
delete m_loop;
delete m_queue;
}
}
s_mailman_manager;
} // namespace <anonymous>
// implementation of mailman.hpp
namespace cppa { namespace detail {
mailman_send_job::mailman_send_job(actor_proxy_ptr apptr, message msg)
: target_peer(apptr->parent_process_ptr()), original_message(msg)
{
}
mailman_send_job::mailman_send_job(process_information_ptr peer, message msg)
: target_peer(peer), original_message(msg)
{
}
mailman_add_peer::mailman_add_peer(native_socket_t sfd,
const process_information_ptr& pinf)
: sockfd(sfd), pinfo(pinf)
{
}
mailman_job::mailman_job(job_type jt) : next(0), m_type(jt)
{
}
mailman_job::mailman_job(process_information_ptr piptr, const message& omsg)
: next(0), m_type(send_job_type)
{
new (&m_send_job) mailman_send_job(piptr, omsg);
}
mailman_job::mailman_job(actor_proxy_ptr apptr, const message& omsg)
: next(0), m_type(send_job_type)
{
new (&m_send_job) mailman_send_job(apptr, omsg);
}
mailman_job::mailman_job(native_socket_t sockfd, const process_information_ptr& pinfo)
: next(0), m_type(add_peer_type)
{
new (&m_add_socket) mailman_add_peer(sockfd, pinfo);
}
mailman_job* mailman_job::kill_job()
{
return new mailman_job(kill_type);
}
mailman_job::~mailman_job()
{
switch (m_type)
{
case send_job_type:
m_send_job.~mailman_send_job();
break;
case add_peer_type:
m_add_socket.~mailman_add_peer();
break;
default: break;
}
}
util::single_reader_queue<mailman_job>& mailman_queue()
{
return *(s_mailman_manager.m_queue);
}
} } // namespace cppa::detail
namespace cppa { namespace detail { namespace {
void mailman_loop()
{
// send() flags
int flags = 0;
// serializes outgoing messages
binary_serializer bs;
// current active job
mailman_job* job = nullptr;
// caches mailman_queue()
auto& mqueue = mailman_queue();
// connected tcp peers
std::map<process_information, native_socket_t> peers;
for (;;)
{
job = mqueue.pop();
if (job->is_send_job())
{
mailman_send_job& sjob = job->send_job();
const message& out_msg = sjob.original_message;
// forward message to receiver peer
auto peer_element = peers.find(*(sjob.target_peer));
if (peer_element != peers.end())
{
bool disconnect_peer = false;
auto peer = peer_element->second;
try
{
bs << out_msg;
auto size32 = static_cast<std::uint32_t>(bs.size());
//cout << pself.process_id << " --> " << (to_string(out_msg) + "\n");
// write size of serialized message
auto sent = ::send(peer, &size32, sizeof(size32), flags);
if (sent <= 0)
{
// write message
sent = ::send(peer, bs.data(), bs.size(), flags);
}
// disconnect peer if send() failed
disconnect_peer = (sent > 0);
}
// something went wrong; close connection to this peer
catch (...)
{
disconnect_peer = true;
}
if (disconnect_peer)
{
closesocket(peer);
peers.erase(peer_element);
}
bs.reset();
}
// else: unknown peer
}
else if (job->is_add_peer_job())
{
mailman_add_peer& pjob = job->add_peer_job();
auto i = peers.find(*(pjob.pinfo));
if (i == peers.end())
{
//cout << "mailman added " << pjob.pinfo->process_id << "@"
// << pjob.pinfo->node_id_as_string() << endl;
peers.insert(std::make_pair(*(pjob.pinfo), pjob.sockfd));
}
else
{
// TODO: some kind of error handling?
}
}
else if (job->is_kill_job())
{
delete job;
return;
}
delete job;
}
}
} } } // namespace cppa::detail::<anonymous>
......@@ -85,6 +85,12 @@ void mock_scheduler::register_converted_context(context* ctx)
}
}
std::unique_ptr<attachable> mock_scheduler::register_hidden_context()
{
inc_actor_count();
return std::unique_ptr<attachable>(new exit_observer);
}
void mock_scheduler::await_others_done()
{
auto expected = (unchecked_self() == nullptr) ? 0 : 1;
......
#include <ios> // ios_base::failure
#include <errno.h>
#include <sstream>
#include "cppa/detail/native_socket.hpp"
namespace cppa { namespace detail {
#ifndef CPPA_WINDOWS
void closesocket(native_socket_t s)
{
if(::close(s) != 0)
{
switch(errno)
{
case EBADF:
{
throw std::ios_base::failure("EBADF: invalid socket");
}
case EINTR:
{
throw std::ios_base::failure("EINTR: interrupted");
}
case EIO:
{
throw std::ios_base::failure("EIO: an I/O error occured");
}
default:
{
std::ostringstream oss;
throw std::ios_base::failure("");
}
}
}
}
#endif
} } // namespace cppa::detail
......@@ -6,11 +6,6 @@
#include <iostream>
#include <stdexcept>
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <boost/thread.hpp>
#include "cppa/cppa.hpp"
......@@ -21,227 +16,28 @@
#include "cppa/exit_reason.hpp"
#include "cppa/binary_serializer.hpp"
#include "cppa/binary_deserializer.hpp"
#include "cppa/util/single_reader_queue.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
using std::cout;
using std::endl;
using cppa::detail::mailman_job;
using cppa::detail::mailman_queue;
using cppa::detail::native_socket_t;
using cppa::detail::get_actor_proxy_cache;
namespace cppa {
namespace {
#ifdef ACEDIA_WINDOWS
typedef SOCKET native_socket_t;
typedef const char* socket_send_ptr;
typedef char* socket_recv_ptr;
#else
typedef int native_socket_t;
typedef const void* socket_send_ptr;
typedef void* socket_recv_ptr;
inline void closesocket(native_socket_t s) { close(s); }
#endif
struct mailman_send_job
{
process_information_ptr target_peer;
message original_message;
mailman_send_job(actor_proxy_ptr apptr, message msg)
: target_peer(apptr->parent_process_ptr()), original_message(msg)
{
}
mailman_send_job(process_information_ptr peer, message msg)
: target_peer(peer), original_message(msg)
{
}
};
struct mailman_add_peer
{
native_socket_t sockfd;
process_information_ptr pinfo;
mailman_add_peer(native_socket_t sfd, const process_information_ptr& pinf)
: sockfd(sfd), pinfo(pinf)
{
}
};
class mailman_job
{
friend class
util::single_reader_queue<mailman_job>;
public:
enum job_type
{
send_job_type,
add_peer_type,
kill_type
};
private:
mailman_job* next;
job_type m_type;
union
{
mailman_send_job m_send_job;
mailman_add_peer m_add_socket;
};
mailman_job(job_type jt) : next(0), m_type(jt)
{
}
public:
mailman_job(process_information_ptr piptr, const message& omsg)
: next(0), m_type(send_job_type)
{
new (&m_send_job) mailman_send_job(piptr, omsg);
}
mailman_job(actor_proxy_ptr apptr, const message& omsg)
: next(0), m_type(send_job_type)
{
new (&m_send_job) mailman_send_job(apptr, omsg);
}
mailman_job(native_socket_t sockfd, const process_information_ptr& pinfo)
: next(0), m_type(add_peer_type)
{
new (&m_add_socket) mailman_add_peer(sockfd, pinfo);
}
static mailman_job* kill_job()
{
return new mailman_job(kill_type);
}
~mailman_job()
{
switch (m_type)
{
case send_job_type:
m_send_job.~mailman_send_job();
break;
case add_peer_type:
m_add_socket.~mailman_add_peer();
break;
default: break;
}
}
mailman_send_job& send_job()
{
return m_send_job;
}
mailman_add_peer& add_peer_job()
{
return m_add_socket;
}
job_type type() const
{
return m_type;
}
bool is_send_job() const
{
return m_type == send_job_type;
}
bool is_add_peer_job() const
{
return m_type == add_peer_type;
}
bool is_kill_job() const
{
return m_type == kill_type;
}
};
void mailman_loop();
//util::single_reader_queue<mailman_job> s_mailman_queue;
//boost::thread s_mailman_thread(mailman_loop);
struct mailman_manager
{
boost::thread* m_loop;
util::single_reader_queue<mailman_job>* m_queue;
mailman_manager()
{
m_queue = new util::single_reader_queue<mailman_job>;
m_loop = new boost::thread(mailman_loop);
}
~mailman_manager()
{
m_queue->push_back(mailman_job::kill_job());
m_loop->join();
delete m_loop;
delete m_queue;
}
}
s_mailman_manager;
util::single_reader_queue<mailman_job>& s_mailman_queue()
{
return *(s_mailman_manager.m_queue);
}
// a map that manages links between local actors and remote actors (proxies)
typedef std::map<actor_ptr, std::list<actor_proxy_ptr> > link_map;
void fake_exits_from_disconnected_links(link_map& links)
{
for (auto& element : links)
{
auto local_actor = element.first;
auto& remote_actors = element.second;
for (auto& rem_actor : remote_actors)
{
message msg(rem_actor, local_actor,
atom(":KillProxy"),
exit_reason::remote_link_unreachable);
local_actor->enqueue(msg);
}
}
}
void add_link(link_map& links,
const actor_ptr& local_actor,
const actor_proxy_ptr& remote_actor)
{
if (local_actor && remote_actor)
{
links[local_actor].push_back(remote_actor);
}
}
void remove_link(link_map& links,
const actor_ptr& local_actor,
const actor_proxy_ptr& remote_actor)
{
if (local_actor && remote_actor)
{
auto& link_list = links[local_actor];
link_list.remove(remote_actor);
if (link_list.empty()) links.erase(local_actor);
}
}
class remote_observer : public attachable
{
......@@ -257,7 +53,7 @@ class remote_observer : public attachable
{
actor_ptr self_ptr = self();
message msg(self_ptr, self_ptr, atom(":KillProxy"), reason);
s_mailman_queue().push_back(new mailman_job(peer, msg));
detail::mailman_queue().push_back(new detail::mailman_job(peer, msg));
}
};
......@@ -268,81 +64,6 @@ T& operator<<(T& o, const process_information& pinfo)
return (o << pinfo.process_id << "@" << pinfo.node_id_as_string());
}
// handles *all* outgoing messages
void mailman_loop()
{
//cout << "mailman_loop()" << endl;
//link_map links;
int flags = 0;
binary_serializer bs;
mailman_job* job = nullptr;
//const auto& pself = process_information::get();
std::map<process_information, native_socket_t> peers;
for (;;)
{
job = s_mailman_queue().pop();
if (job->is_send_job())
{
mailman_send_job& sjob = job->send_job();
const message& out_msg = sjob.original_message;
// forward message to receiver peer
auto peer_element = peers.find(*(sjob.target_peer));
if (peer_element != peers.end())
{
auto peer = peer_element->second;
try
{
bs << out_msg;
auto size32 = static_cast<std::uint32_t>(bs.size());
//cout << "--> " << (to_string(out_msg) + "\n");
auto sent = ::send(peer, &size32, sizeof(size32), flags);
if (sent != -1)
{
sent = ::send(peer, bs.data(), bs.size(), flags);
}
if (sent == -1)
{
// peer unreachable
//cout << "peer " << *(sjob.target_peer)
// << " unreachable" << endl;
peers.erase(*(sjob.target_peer));
}
}
catch (...)
{
// TODO: some kind of error handling?
}
bs.reset();
}
else
{
// TODO: some kind of error handling?
}
}
else if (job->is_add_peer_job())
{
mailman_add_peer& pjob = job->add_peer_job();
auto i = peers.find(*(pjob.pinfo));
if (i == peers.end())
{
//cout << "mailman added " << pjob.pinfo->process_id << "@"
// << pjob.pinfo->node_id_as_string() << endl;
peers.insert(std::make_pair(*(pjob.pinfo), pjob.sockfd));
}
else
{
// TODO: some kind of error handling?
}
}
else if (job->is_kill_job())
{
delete job;
return;
}
delete job;
}
}
void read_from_socket(native_socket_t sfd, void* buf, size_t buf_size)
{
char* cbuf = reinterpret_cast<char*>(buf);
......@@ -367,13 +88,16 @@ void read_from_socket(native_socket_t sfd, void* buf, size_t buf_size)
// handles *one* socket / peer
void post_office_loop(native_socket_t socket_fd,
process_information_ptr peer,
actor_proxy_ptr aptr)
actor_proxy_ptr aptr,
attachable* attachable_ptr)
{
//cout << "--> post_office_loop; self() = "
// << process_information::get()
// << ", peer = "
// << *peer
// << endl;
// destroys attachable_ptr if the function scope is leaved
std::unique_ptr<attachable> exit_guard(attachable_ptr);
if (aptr) detail::get_actor_proxy_cache().add(aptr);
message msg;
std::uint32_t rsize;
......@@ -382,7 +106,7 @@ void post_office_loop(native_socket_t socket_fd,
size_t buf_allocated = 0;
auto meta_msg = uniform_typeid<message>();
const std::type_info& atom_tinfo = typeid(atom_value);
auto& pinfo = process_information::get();
auto& pself = process_information::get();
try
{
for (;;)
......@@ -405,14 +129,14 @@ void post_office_loop(native_socket_t socket_fd,
read_from_socket(socket_fd, buf, buf_size);
binary_deserializer bd(buf, buf_size);
meta_msg->deserialize(&msg, &bd);
//cout << "<-- " << (to_string(msg) + "\n");
cout << pself.process_id << " <-- " << (to_string(msg) + "\n");
if ( msg.content().size() == 1
&& msg.content().utype_info_at(0) == atom_tinfo
&& *reinterpret_cast<const atom_value*>(msg.content().at(0))
== atom(":Monitor"))
{
actor_ptr sender = msg.sender();
if (sender->parent_process() == pinfo)
if (sender->parent_process() == pself)
{
//cout << pinfo << " ':Monitor'; actor id = "
// << sender->id() << endl;
......@@ -428,18 +152,21 @@ void post_office_loop(native_socket_t socket_fd,
}
}
}
catch (std::ios_base::failure& e)
{
//cout << "std::ios_base::failure: " << e.what() << endl;
}
catch (std::exception& e)
{
//cout << "[" << process_information::get() << "] "
// << detail::to_uniform_name(typeid(e)) << ": "
// << e.what() << endl;
cout << "[" << process_information::get() << "] "
<< detail::to_uniform_name(typeid(e)) << ": "
<< e.what() << endl;
}
//cout << "[" << process_information::get() << "] ~post_office_loop"
// << endl;
cout << "kill " << detail::actor_proxy_cache().size() << " proxies" << endl;
detail::actor_proxy_cache().for_each([](actor_proxy_ptr& pptr)
{
cout << "send :KillProxy message" << endl;
if (pptr) pptr->enqueue(message(nullptr, pptr, atom(":KillProxy"),
exit_reason::remote_link_unreachable));
});
cout << "[" << process_information::get() << "] ~post_office_loop"
<< endl;
}
struct mm_worker
......@@ -452,14 +179,17 @@ struct mm_worker
: m_sockfd(sockfd), m_thread(post_office_loop,
sockfd,
peer,
actor_proxy_ptr())
actor_proxy_ptr(),
static_cast<attachable*>(nullptr))
{
}
~mm_worker()
{
closesocket(m_sockfd);
cout << "=> [" << process_information::get() << "]::~mm_worker()" << endl;
detail::closesocket(m_sockfd);
m_thread.join();
cout << "<= [" << process_information::get() << "]::~mm_worker()" << endl;
}
};
......@@ -486,8 +216,10 @@ struct mm_handle : attachable
virtual ~mm_handle()
{
//cout << "--> ~mm_handle()" << endl;
closesocket(m_sockfd);
cout << "=> [" << process_information::get() << "]::~mm_worker()" << endl;
detail::closesocket(m_sockfd);
if (m_barrier) m_barrier->wait();
cout << "<= [" << process_information::get() << "]::~mm_worker()" << endl;
//cout << "<-- ~mm_handle()" << endl;
}
};
......@@ -523,7 +255,7 @@ void middle_man_loop(native_socket_t server_socket_fd,
read_from_socket(sockfd,
peer->node_id.data(),
process_information::node_id_size);
s_mailman_queue().push_back(new mailman_job(sockfd, peer));
mailman_queue().push_back(new mailman_job(sockfd, peer));
// todo: check if connected peer is compatible
children.push_back(child_ptr(new mm_worker(sockfd, peer)));
//cout << "client connection done" << endl;
......@@ -544,7 +276,7 @@ void middle_man_loop(native_socket_t server_socket_fd,
void actor_proxy::forward_message(const process_information_ptr& piptr,
const message& msg)
{
s_mailman_queue().push_back(new mailman_job(piptr, msg));
mailman_queue().push_back(new mailman_job(piptr, msg));
}
void publish(actor_ptr& whom, std::uint16_t port)
......@@ -618,8 +350,10 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
peer_pinf->node_id.size());
process_information_ptr pinfptr(peer_pinf);
actor_proxy_ptr result(new actor_proxy(remote_actor_id, pinfptr));
s_mailman_queue().push_back(new mailman_job(sockfd, pinfptr));
boost::thread(post_office_loop, sockfd, peer_pinf, result).detach();
mailman_queue().push_back(new mailman_job(sockfd, pinfptr));
auto ptr = get_scheduler()->register_hidden_context();
boost::thread(post_office_loop, sockfd,
peer_pinf, result, ptr.release()).detach();
return result;
}
......
#include <string>
#include <iostream>
#include <boost/thread.hpp>
#include "test.hpp"
......@@ -6,6 +7,9 @@
#include "cppa/cppa.hpp"
#include "cppa/exception.hpp"
using std::cout;
using std::endl;
using namespace cppa;
namespace {
......@@ -33,12 +37,12 @@ void client_part(const std::vector<std::string>& argv)
size_t test__remote_actor(const char* app_path, bool is_client,
const std::vector<std::string>& argv)
{
CPPA_TEST(test__remote_actor);
if (is_client)
{
client_part(argv);
return 0;
}
CPPA_TEST(test__remote_actor);
auto ping_actor = spawn(ping);
std::uint16_t port = 4242;
bool success = false;
......@@ -58,15 +62,19 @@ size_t test__remote_actor(const char* app_path, bool is_client,
std::string cmd;
{
std::ostringstream oss;
oss << app_path << " test__remote_actor " << port << " &>/dev/null";
oss << app_path << " test__remote_actor " << port;// << " &>/dev/null";
cmd = oss.str();
}
// execute client_part() in a separate process,
// connected via localhost socket
boost::thread child([&cmd] () { system(cmd.c_str()); });
boost::thread child([&cmd]() { system(cmd.c_str()); });
cout << __LINE__ << endl;
await_all_others_done();
cout << __LINE__ << endl;
CPPA_CHECK_EQUAL(pongs(), 5);
// wait until separate process (in sep. thread) finished execution
cout << __LINE__ << endl;
child.join();
cout << __LINE__ << endl;
return CPPA_TEST_RESULT;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment