Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
Actor Framework
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
cpp-libs
Actor Framework
Commits
7023a997
Commit
7023a997
authored
Jun 13, 2011
by
neverlord
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
post_office
parent
49511085
Changes
20
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1327 additions
and
187 deletions
+1327
-187
cppa.creator.user
cppa.creator.user
+1
-1
cppa.files
cppa.files
+3
-0
cppa/actor.hpp
cppa/actor.hpp
+32
-4
cppa/cppa.hpp
cppa/cppa.hpp
+2
-0
cppa/detail/actor_proxy_cache.hpp
cppa/detail/actor_proxy_cache.hpp
+19
-4
cppa/detail/buffer.hpp
cppa/detail/buffer.hpp
+172
-0
cppa/detail/mock_scheduler.hpp
cppa/detail/mock_scheduler.hpp
+1
-1
cppa/detail/post_office.hpp
cppa/detail/post_office.hpp
+29
-0
cppa/process_information.hpp
cppa/process_information.hpp
+2
-0
cppa/scheduler.hpp
cppa/scheduler.hpp
+1
-7
src/actor_proxy.cpp
src/actor_proxy.cpp
+10
-0
src/actor_proxy_cache.cpp
src/actor_proxy_cache.cpp
+9
-5
src/mailman.cpp
src/mailman.cpp
+54
-16
src/mock_scheduler.cpp
src/mock_scheduler.cpp
+2
-2
src/native_socket.cpp
src/native_socket.cpp
+2
-0
src/post_office.cpp
src/post_office.cpp
+893
-0
src/process_information.cpp
src/process_information.cpp
+7
-0
src/unicast_network.cpp
src/unicast_network.cpp
+81
-142
unit_testing/main.cpp
unit_testing/main.cpp
+1
-1
unit_testing/test__remote_actor.cpp
unit_testing/test__remote_actor.cpp
+6
-4
No files found.
cppa.creator.user
View file @
7023a997
...
...
@@ -133,7 +133,7 @@
</data>
<data>
<variable>
ProjectExplorer.Project.Updater.EnvironmentId
</variable>
<value
type=
"QString"
>
{0
0861904-8afe-4186-b958-756209cdf248
}
</value>
<value
type=
"QString"
>
{0
7fcd197-092d-45a0-8500-3be614e6ae31
}
</value>
</data>
<data>
<variable>
ProjectExplorer.Project.Updater.FileVersion
</variable>
...
...
cppa.files
View file @
7023a997
...
...
@@ -181,3 +181,6 @@ cppa/detail/mailman.hpp
src/mailman.cpp
cppa/detail/native_socket.hpp
src/native_socket.cpp
cppa/detail/post_office.hpp
src/post_office.cpp
cppa/detail/buffer.hpp
cppa/actor.hpp
View file @
7023a997
...
...
@@ -10,6 +10,7 @@
#include "cppa/attachable.hpp"
#include "cppa/process_information.hpp"
#include "cppa/util/rm_ref.hpp"
#include "cppa/util/enable_if.hpp"
namespace
cppa
{
...
...
@@ -36,17 +37,20 @@ class actor : public channel
~
actor
();
/**
* @brief Attaches @p ptr to this actor.
* @brief Attaches @p ptr to this actor
* (the actor takes ownership of @p ptr).
*
*
@p ptr will be deleted if actor finished execution of
immediately
* if
the actor
already exited.
*
The actor will call <tt>ptr->detach(...)</tt> on exit or
immediately
* if
he
already exited.
*
* @return @c true if @p ptr was successfully attached to the actor;
* otherwise (actor already exited) @p false.
*
*/
virtual
bool
attach
(
attachable
*
ptr
)
=
0
;
template
<
typename
F
>
bool
attach_functor
(
F
&&
ftor
);
/**
* @brief Detaches the first attached object that matches @p what.
*/
...
...
@@ -127,6 +131,30 @@ bool actor::attach(std::unique_ptr<T>&& ptr,
return
attach
(
static_cast
<
attachable
*>
(
ptr
.
release
()));
}
template
<
class
F
>
class
functor_attachable
:
public
attachable
{
F
m_functor
;
public:
template
<
class
FArg
>
functor_attachable
(
FArg
&&
arg
)
:
m_functor
(
std
::
forward
<
FArg
>
(
arg
))
{
}
virtual
void
detach
(
std
::
uint32_t
reason
)
{
m_functor
(
reason
);
}
};
template
<
typename
F
>
bool
actor
::
attach_functor
(
F
&&
ftor
)
{
typedef
typename
util
::
rm_ref
<
F
>::
type
f_type
;
return
attach
(
new
functor_attachable
<
f_type
>
(
std
::
forward
<
F
>
(
ftor
)));
}
}
// namespace cppa
...
...
cppa/cppa.hpp
View file @
7023a997
...
...
@@ -244,6 +244,8 @@ inline void await_all_others_done()
/**
* @brief Publishes @p whom at given @p port.
*
* The connection is automatically closed if the lifetime of @p whom ends.
*/
void
publish
(
actor_ptr
&
whom
,
std
::
uint16_t
port
);
...
...
cppa/detail/actor_proxy_cache.hpp
View file @
7023a997
...
...
@@ -2,6 +2,7 @@
#define ACTOR_PROXY_CACHE_HPP
#include <string>
#include <functional>
#include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp"
...
...
@@ -13,24 +14,38 @@ class actor_proxy_cache
public:
typedef
std
::
tuple
<
std
::
uint32_t
,
std
::
uint32_t
,
process_information
::
node_id_type
>
key_tuple
;
typedef
std
::
tuple
<
std
::
uint32_t
,
// actor id
std
::
uint32_t
,
// process id
process_information
::
node_id_type
>
// node id
key_tuple
;
typedef
std
::
function
<
void
(
actor_proxy_ptr
&
)
>
new_proxy_callback
;
private:
std
::
map
<
key_tuple
,
process_information_ptr
>
m_pinfos
;
std
::
map
<
key_tuple
,
actor_proxy_ptr
>
m_proxies
;
new_proxy_callback
m_new_cb
;
process_information_ptr
get_pinfo
(
const
key_tuple
&
key
);
void
add
(
const
actor_proxy_ptr
&
pptr
,
const
key_tuple
&
key
);
public:
template
<
typename
F
>
void
set_callback
(
F
&&
cb
)
{
m_new_cb
=
std
::
forward
<
F
>
(
cb
);
}
actor_proxy_ptr
get
(
const
key_tuple
&
key
);
void
add
(
const
actor_proxy_ptr
&
pptr
);
void
add
(
actor_proxy_ptr
&
pptr
);
size_t
size
()
const
;
void
erase
(
const
actor_proxy_ptr
&
pptr
);
template
<
typename
F
>
void
for_each
(
F
&&
fun
)
{
...
...
cppa/detail/buffer.hpp
0 → 100644
View file @
7023a997
#ifndef BUFFER_HPP
#define BUFFER_HPP
#include <ios> // std::ios_base::failure
#include <iostream>
#include <string.h>
#include "cppa/detail/native_socket.hpp"
namespace
cppa
{
namespace
detail
{
template
<
size_t
ChunkSize
,
size_t
MaxBufferSize
,
typename
DataType
=
char
>
class
buffer
{
DataType
*
m_data
;
size_t
m_written
;
size_t
m_allocated
;
size_t
m_final_size
;
template
<
typename
F
>
bool
append_impl
(
F
&&
fun
,
bool
throw_on_error
)
{
auto
recv_result
=
fun
();
if
(
recv_result
==
0
)
{
// connection closed
if
(
throw_on_error
)
{
std
::
ios_base
::
failure
(
"cannot read from a closed pipe/socket"
);
}
return
false
;
}
else
if
(
recv_result
<
0
)
{
switch
(
errno
)
{
case
EAGAIN
:
{
// rdflags or sfd is set to non-blocking,
// this is not treated as error
return
true
;
}
default:
{
// a "real" error occured;
if
(
throw_on_error
)
{
char
*
cstr
=
strerror
(
errno
);
std
::
string
errmsg
=
cstr
;
free
(
cstr
);
throw
std
::
ios_base
::
failure
(
std
::
move
(
errmsg
));
}
return
false
;
}
}
}
inc_written
(
static_cast
<
size_t
>
(
recv_result
));
return
true
;
}
public:
buffer
()
:
m_data
(
nullptr
),
m_written
(
0
),
m_allocated
(
0
),
m_final_size
(
0
)
{
}
buffer
(
buffer
&&
other
)
:
m_data
(
other
.
m_data
),
m_written
(
other
.
m_written
)
,
m_allocated
(
other
.
m_allocated
),
m_final_size
(
other
.
m_final_size
)
{
other
.
m_data
=
nullptr
;
other
.
m_written
=
other
.
m_allocated
=
other
.
m_final_size
=
0
;
}
~
buffer
()
{
delete
[]
m_data
;
}
void
clear
()
{
m_written
=
0
;
}
void
reset
(
size_t
new_final_size
=
0
)
{
m_written
=
0
;
m_final_size
=
new_final_size
;
if
(
new_final_size
>
m_allocated
)
{
if
(
new_final_size
>
MaxBufferSize
)
{
throw
std
::
ios_base
::
failure
(
"maximum buffer size exceeded"
);
}
auto
remainder
=
(
new_final_size
%
ChunkSize
);
if
(
remainder
==
0
)
{
m_allocated
=
new_final_size
;
}
else
{
m_allocated
=
(
new_final_size
-
remainder
)
+
ChunkSize
;
}
delete
[]
m_data
;
m_data
=
new
DataType
[
m_allocated
];
}
}
bool
ready
()
{
return
m_written
==
m_final_size
;
}
// pointer to the current write position
DataType
*
wr_ptr
()
{
return
m_data
+
m_written
;
}
size_t
size
()
{
return
m_written
;
}
size_t
final_size
()
{
return
m_final_size
;
}
size_t
remaining
()
{
return
m_final_size
-
m_written
;
}
void
inc_written
(
size_t
value
)
{
m_written
+=
value
;
}
DataType
*
data
()
{
return
m_data
;
}
bool
append_from_file_descriptor
(
int
fd
,
bool
throw_on_error
=
false
)
{
auto
_this
=
this
;
auto
fun
=
[
_this
,
fd
]()
->
int
{
return
::
read
(
fd
,
_this
->
wr_ptr
(),
_this
->
remaining
());
};
return
append_impl
(
fun
,
throw_on_error
);
}
bool
append_from
(
native_socket_t
sfd
,
int
rdflags
,
bool
throw_on_error
=
false
)
{
auto
_this
=
this
;
auto
fun
=
[
_this
,
sfd
,
rdflags
]()
->
int
{
return
::
recv
(
sfd
,
_this
->
wr_ptr
(),
_this
->
remaining
(),
rdflags
);
};
return
append_impl
(
fun
,
throw_on_error
);
}
};
}
}
// namespace cppa::detail
#endif // BUFFER_HPP
cppa/detail/mock_scheduler.hpp
View file @
7023a997
...
...
@@ -14,7 +14,7 @@ class mock_scheduler : public scheduler
void
register_converted_context
(
context
*
);
//void unregister_converted_context(context*);
actor_ptr
spawn
(
actor_behavior
*
,
scheduling_hint
);
std
::
unique_ptr
<
attachable
>
register_hidden_context
();
attachable
*
register_hidden_context
();
};
...
...
cppa/detail/post_office.hpp
0 → 100644
View file @
7023a997
#ifndef POST_OFFICE_HPP
#define POST_OFFICE_HPP
#include <memory>
#include "cppa/actor_proxy.hpp"
#include "cppa/detail/native_socket.hpp"
namespace
cppa
{
namespace
detail
{
void
post_office_add_peer
(
native_socket_t
peer_socket
,
const
process_information_ptr
&
peer_ptr
,
const
actor_proxy_ptr
&
peer_actor_ptr
,
std
::
unique_ptr
<
attachable
>&&
peer_observer
);
void
post_office_publish
(
native_socket_t
server_socket
,
const
actor_ptr
&
published_actor
);
void
post_office_unpublish
(
std
::
uint32_t
actor_id
);
void
post_office_close_socket
(
native_socket_t
sfd
);
//void post_office_unpublish(const actor_ptr& published_actor);
//void post_office_proxy_exited(const actor_proxy_ptr& proxy_ptr);
}
}
// namespace cppa::detail
#endif // POST_OFFICE_HPP
cppa/process_information.hpp
View file @
7023a997
...
...
@@ -64,6 +64,8 @@ class process_information : public ref_counted,
};
std
::
string
to_string
(
const
process_information
&
what
);
typedef
intrusive_ptr
<
process_information
>
process_information_ptr
;
}
// namespace cppa
...
...
cppa/scheduler.hpp
View file @
7023a997
...
...
@@ -48,13 +48,7 @@ class scheduler
* @return An {@link attachable} that the hidden context has to destroy
* if his lifetime ends.
*/
virtual
std
::
unique_ptr
<
attachable
>
register_hidden_context
()
=
0
;
/**
* @brief Informs the scheduler that the convertex context @p what
* finished execution.
*/
//virtual void unregister_converted_context(context* what) = 0;
virtual
attachable
*
register_hidden_context
()
=
0
;
/**
* @brief Wait until all other actors finished execution.
...
...
src/actor_proxy.cpp
View file @
7023a997
#include "cppa/atom.hpp"
#include "cppa/message.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/actor_proxy.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/mailman.hpp"
namespace
cppa
{
...
...
@@ -9,12 +11,20 @@ actor_proxy::actor_proxy(std::uint32_t mid, const process_information_ptr& pptr)
:
super
(
mid
),
m_parent
(
pptr
)
{
if
(
!
m_parent
)
throw
std
::
runtime_error
(
"parent == nullptr"
);
attach
(
get_scheduler
()
->
register_hidden_context
());
}
actor_proxy
::
actor_proxy
(
std
::
uint32_t
mid
,
process_information_ptr
&&
pptr
)
:
super
(
mid
),
m_parent
(
std
::
move
(
pptr
))
{
if
(
!
m_parent
)
throw
std
::
runtime_error
(
"parent == nullptr"
);
attach
(
get_scheduler
()
->
register_hidden_context
());
}
void
actor_proxy
::
forward_message
(
const
process_information_ptr
&
piptr
,
const
message
&
msg
)
{
detail
::
mailman_queue
().
push_back
(
new
detail
::
mailman_job
(
piptr
,
msg
));
}
void
actor_proxy
::
enqueue
(
const
message
&
msg
)
...
...
src/actor_proxy_cache.cpp
View file @
7023a997
...
...
@@ -35,23 +35,27 @@ actor_proxy_ptr actor_proxy_cache::get(const key_tuple& key)
}
// get_pinfo(key) also inserts to m_pinfos
actor_proxy_ptr
result
(
new
actor_proxy
(
std
::
get
<
0
>
(
key
),
get_pinfo
(
key
)));
// insert to m_proxies
m_proxies
.
insert
(
std
::
make_pair
(
key
,
result
));
result
->
enqueue
(
message
(
result
,
nullptr
,
atom
(
":Monitor"
)));
if
(
m_new_cb
)
m_new_cb
(
result
);
// insert to m_proxies
//result->enqueue(message(result, nullptr, atom(":Monitor")));
return
result
;
}
void
actor_proxy_cache
::
add
(
const
actor_proxy_ptr
&
pptr
,
const
key_tuple
&
key
)
void
actor_proxy_cache
::
add
(
actor_proxy_ptr
&
pptr
)
{
auto
pinfo
=
pptr
->
parent_process_ptr
();
key_tuple
key
(
pptr
->
id
(),
pinfo
->
process_id
,
pinfo
->
node_id
);
m_pinfos
.
insert
(
std
::
make_pair
(
key
,
pptr
->
parent_process_ptr
()));
m_proxies
.
insert
(
std
::
make_pair
(
key
,
pptr
));
if
(
m_new_cb
)
m_new_cb
(
pptr
);
}
void
actor_proxy_cache
::
add
(
const
actor_proxy_ptr
&
pptr
)
void
actor_proxy_cache
::
erase
(
const
actor_proxy_ptr
&
pptr
)
{
auto
pinfo
=
pptr
->
parent_process_ptr
();
key_tuple
key
(
pptr
->
id
(),
pinfo
->
process_id
,
pinfo
->
node_id
);
add
(
pptr
,
key
);
m_proxies
.
erase
(
key
);
}
size_t
actor_proxy_cache
::
size
()
const
...
...
src/mailman.cpp
View file @
7023a997
#include <iostream>
#include "cppa/to_string.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/binary_serializer.hpp"
#include "cppa/detail/post_office.hpp"
#define DEBUG(arg) std::cout << arg << std::endl
// forward declaration
namespace
cppa
{
namespace
detail
{
namespace
{
void
mailman_loop
();
}
}
}
...
...
@@ -83,12 +89,20 @@ mailman_job::~mailman_job()
switch
(
m_type
)
{
case
send_job_type
:
{
m_send_job
.
~
mailman_send_job
();
break
;
}
case
add_peer_type
:
{
m_add_socket
.
~
mailman_add_peer
();
break
;
default:
break
;
}
case
kill_type
:
{
// union doesn't contain a valid object
break
;
}
}
}
...
...
@@ -102,8 +116,6 @@ util::single_reader_queue<mailman_job>& mailman_queue()
namespace
cppa
{
namespace
detail
{
namespace
{
void
mailman_loop
()
{
// send() flags
int
flags
=
0
;
// serializes outgoing messages
binary_serializer
bs
;
// current active job
...
...
@@ -129,29 +141,55 @@ void mailman_loop()
{
bs
<<
out_msg
;
auto
size32
=
static_cast
<
std
::
uint32_t
>
(
bs
.
size
());
//cout << pself.process_id << " --> " << (to_string(out_msg) + "\n"
);
DEBUG
(
"--> "
<<
to_string
(
out_msg
)
);
// write size of serialized message
auto
sent
=
::
send
(
peer
,
&
size32
,
sizeof
(
s
ize32
),
flags
);
if
(
sent
<=
0
)
auto
sent
=
::
send
(
peer
,
&
size32
,
sizeof
(
s
td
::
uint32_t
),
0
);
if
(
sent
>
0
)
{
// write message
sent
=
::
send
(
peer
,
bs
.
data
(),
bs
.
size
(),
flags
);
sent
=
::
send
(
peer
,
bs
.
data
(),
bs
.
size
(),
0
);
}
// disconnect peer if send() failed
disconnect_peer
=
(
sent
>
0
);
disconnect_peer
=
(
sent
<=
0
);
if
(
sent
<=
0
)
{
if
(
sent
==
0
)
{
DEBUG
(
"remote socket closed"
);
}
else
{
DEBUG
(
"send() returned -1"
);
perror
(
"send()"
);
}
}
else
{
if
(
sent
!=
size32
)
{
throw
std
::
logic_error
(
"WTF?!?"
);
}
}
}
// something went wrong; close connection to this peer
catch
(
...
)
catch
(
std
::
exception
&
e
)
{
DEBUG
(
to_uniform_name
(
typeid
(
e
))
<<
": "
<<
e
.
what
());
disconnect_peer
=
true
;
}
if
(
disconnect_peer
)
{
closesocket
(
peer
);
DEBUG
(
"peer disconnected (error during send)"
);
//closesocket(peer);
post_office_close_socket
(
peer
);
peers
.
erase
(
peer_element
);
}
bs
.
reset
();
}
else
{
DEBUG
(
"message to an unknown peer"
);
}
// else: unknown peer
}
else
if
(
job
->
is_add_peer_job
())
...
...
src/mock_scheduler.cpp
View file @
7023a997
...
...
@@ -85,10 +85,10 @@ void mock_scheduler::register_converted_context(context* ctx)
}
}
std
::
unique_ptr
<
attachable
>
mock_scheduler
::
register_hidden_context
()
attachable
*
mock_scheduler
::
register_hidden_context
()
{
inc_actor_count
();
return
std
::
unique_ptr
<
attachable
>
(
new
exit_observer
)
;
return
new
exit_observer
;
}
void
mock_scheduler
::
await_others_done
()
...
...
src/native_socket.cpp
View file @
7023a997
#include "cppa/config.hpp"
#include <ios> // ios_base::failure
#include <errno.h>
#include <sstream>
...
...
src/post_office.cpp
0 → 100644
View file @
7023a997
This diff is collapsed.
Click to expand it.
src/process_information.cpp
View file @
7023a997
...
...
@@ -183,4 +183,11 @@ void process_information::node_id_from_string(const std::string& str,
}
}
std
::
string
to_string
(
const
process_information
&
what
)
{
std
::
ostringstream
oss
;
oss
<<
what
.
process_id
<<
"@"
<<
what
.
node_id_as_string
();
return
oss
.
str
();
}
}
// namespace cppa
src/unicast_network.cpp
View file @
7023a997
...
...
@@ -6,7 +6,8 @@
#include <iostream>
#include <stdexcept>
#include <boost/thread.hpp>
#include <fcntl.h>
//#include <boost/thread.hpp>
#include "cppa/cppa.hpp"
#include "cppa/atom.hpp"
...
...
@@ -20,153 +21,35 @@
#include "cppa/util/single_reader_queue.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
using
std
::
cout
;
using
std
::
endl
;
using
cppa
::
detail
::
mailman_job
;
using
cppa
::
detail
::
mailman_queue
;
//
using cppa::detail::mailman_job;
//
using cppa::detail::mailman_queue;
using
cppa
::
detail
::
native_socket_t
;
using
cppa
::
detail
::
get_actor_proxy_cache
;
//
using cppa::detail::get_actor_proxy_cache;
namespace
cppa
{
namespace
{
/*
// a map that manages links between local actors and remote actors (proxies)
typedef std::map<actor_ptr, std::list<actor_proxy_ptr> > link_map;
class
remote_observer
:
public
attachable
{
process_information_ptr
peer
;
public:
remote_observer
(
const
process_information_ptr
&
piptr
)
:
peer
(
piptr
)
{
}
void
detach
(
std
::
uint32_t
reason
)
{
actor_ptr
self_ptr
=
self
();
message
msg
(
self_ptr
,
self_ptr
,
atom
(
":KillProxy"
),
reason
);
detail
::
mailman_queue
().
push_back
(
new
detail
::
mailman_job
(
peer
,
msg
));
}
};
template
<
typename
T
>
T
&
operator
<<
(
T
&
o
,
const
process_information
&
pinfo
)
{
return
(
o
<<
pinfo
.
process_id
<<
"@"
<<
pinfo
.
node_id_as_string
());
}
void
read_from_socket
(
native_socket_t
sfd
,
void
*
buf
,
size_t
buf_size
)
std::string pid_as_string(const process_information& pinf)
{
char
*
cbuf
=
reinterpret_cast
<
char
*>
(
buf
);
size_t
read_bytes
=
0
;
size_t
left
=
buf_size
;
int
rres
=
0
;
size_t
urres
=
0
;
do
{
rres
=
::
recv
(
sfd
,
cbuf
+
read_bytes
,
left
,
0
);
if
(
rres
<=
0
)
{
throw
std
::
ios_base
::
failure
(
"cannot read from closed socket"
);
}
urres
=
static_cast
<
size_t
>
(
rres
);
read_bytes
+=
urres
;
left
-=
urres
;
}
while
(
urres
<
left
);
return to_string(pinf);
}
// handles *one* socket / peer
void
post_office_loop
(
native_socket_t
socket_fd
,
process_information_ptr
peer
,
actor_proxy_ptr
aptr
,
attachable
*
attachable_ptr
)
std::string pid_as_string()
{
//cout << "--> post_office_loop; self() = "
// << process_information::get()
// << ", peer = "
// << *peer
// << endl;
// destroys attachable_ptr if the function scope is leaved
std
::
unique_ptr
<
attachable
>
exit_guard
(
attachable_ptr
);
if
(
aptr
)
detail
::
get_actor_proxy_cache
().
add
(
aptr
);
message
msg
;
std
::
uint32_t
rsize
;
char
*
buf
=
nullptr
;
size_t
buf_size
=
0
;
size_t
buf_allocated
=
0
;
auto
meta_msg
=
uniform_typeid
<
message
>
();
const
std
::
type_info
&
atom_tinfo
=
typeid
(
atom_value
);
auto
&
pself
=
process_information
::
get
();
try
{
for
(;;)
{
read_from_socket
(
socket_fd
,
&
rsize
,
sizeof
(
rsize
));
if
(
buf_allocated
<
rsize
)
{
// release old memory
delete
[]
buf
;
// always allocate 1KB chunks
buf_allocated
=
1024
;
while
(
buf_allocated
<=
rsize
)
{
buf_allocated
+=
1024
;
}
buf
=
new
char
[
buf_allocated
];
}
buf_size
=
rsize
;
//cout << "[" << pinfo << "] read " << rsize << " bytes" << endl;
read_from_socket
(
socket_fd
,
buf
,
buf_size
);
binary_deserializer
bd
(
buf
,
buf_size
);
meta_msg
->
deserialize
(
&
msg
,
&
bd
);
cout
<<
pself
.
process_id
<<
" <-- "
<<
(
to_string
(
msg
)
+
"
\n
"
);
if
(
msg
.
content
().
size
()
==
1
&&
msg
.
content
().
utype_info_at
(
0
)
==
atom_tinfo
&&
*
reinterpret_cast
<
const
atom_value
*>
(
msg
.
content
().
at
(
0
))
==
atom
(
":Monitor"
))
{
actor_ptr
sender
=
msg
.
sender
();
if
(
sender
->
parent_process
()
==
pself
)
{
//cout << pinfo << " ':Monitor'; actor id = "
// << sender->id() << endl;
// local actor?
// this message was send from a proxy
sender
->
attach
(
new
remote_observer
(
peer
));
}
}
else
{
auto
r
=
msg
.
receiver
();
if
(
r
)
r
->
enqueue
(
msg
);
}
}
}
catch
(
std
::
exception
&
e
)
{
cout
<<
"["
<<
process_information
::
get
()
<<
"] "
<<
detail
::
to_uniform_name
(
typeid
(
e
))
<<
": "
<<
e
.
what
()
<<
endl
;
}
cout
<<
"kill "
<<
detail
::
actor_proxy_cache
().
size
()
<<
" proxies"
<<
endl
;
detail
::
actor_proxy_cache
().
for_each
([](
actor_proxy_ptr
&
pptr
)
{
cout
<<
"send :KillProxy message"
<<
endl
;
if
(
pptr
)
pptr
->
enqueue
(
message
(
nullptr
,
pptr
,
atom
(
":KillProxy"
),
exit_reason
::
remote_link_unreachable
));
});
cout
<<
"["
<<
process_information
::
get
()
<<
"] ~post_office_loop"
<<
endl
;
return pid_as_string(process_information::get());
}
struct mm_worker
...
...
@@ -186,10 +69,10 @@ struct mm_worker
~mm_worker()
{
cout
<<
"=> ["
<<
process_information
::
get
()
<<
"]::~mm_worker()"
<<
endl
;
cout << "=> [" << pid_as_string
() << "]::~mm_worker()" << endl;
detail::closesocket(m_sockfd);
m_thread.join();
cout
<<
"<= ["
<<
process_information
::
get
()
<<
"]::~mm_worker()"
<<
endl
;
cout << "<= [" << pid_as_string
() << "]::~mm_worker()" << endl;
}
};
...
...
@@ -270,14 +153,54 @@ void middle_man_loop(native_socket_t server_socket_fd,
barrier->wait();
//cout << "middle_man_loop finished\n";
}
*/
void
read_from_socket
(
native_socket_t
sfd
,
void
*
buf
,
size_t
buf_size
)
{
char
*
cbuf
=
reinterpret_cast
<
char
*>
(
buf
);
size_t
read_bytes
=
0
;
size_t
left
=
buf_size
;
int
rres
=
0
;
size_t
urres
=
0
;
do
{
rres
=
::
recv
(
sfd
,
cbuf
+
read_bytes
,
left
,
0
);
if
(
rres
<=
0
)
{
throw
std
::
ios_base
::
failure
(
"cannot read from closed socket"
);
}
urres
=
static_cast
<
size_t
>
(
rres
);
read_bytes
+=
urres
;
left
-=
urres
;
}
while
(
urres
<
left
);
}
}
// namespace <anonmyous>
void
actor_proxy
::
forward_message
(
const
process_information_ptr
&
piptr
,
const
message
&
msg
)
struct
socket_guard
{
mailman_queue
().
push_back
(
new
mailman_job
(
piptr
,
msg
));
}
bool
m_released
;
detail
::
native_socket_t
m_socket
;
public:
socket_guard
(
detail
::
native_socket_t
sfd
)
:
m_released
(
false
),
m_socket
(
sfd
)
{
}
~
socket_guard
()
{
if
(
!
m_released
)
detail
::
closesocket
(
m_socket
);
}
void
release
()
{
m_released
=
true
;
}
};
void
publish
(
actor_ptr
&
whom
,
std
::
uint16_t
port
)
{
...
...
@@ -289,10 +212,21 @@ void publish(actor_ptr& whom, std::uint16_t port)
{
throw
network_exception
(
"could not create server socket"
);
}
// closes the socket if an exception occurs
socket_guard
sguard
(
sockfd
);
memset
((
char
*
)
&
serv_addr
,
0
,
sizeof
(
serv_addr
));
serv_addr
.
sin_family
=
AF_INET
;
serv_addr
.
sin_addr
.
s_addr
=
INADDR_ANY
;
serv_addr
.
sin_port
=
htons
(
port
);
int
flags
=
fcntl
(
sockfd
,
F_GETFL
,
0
);
if
(
flags
==
-
1
)
{
throw
network_exception
(
"unable to get socket flags"
);
}
if
(
fcntl
(
sockfd
,
F_SETFL
,
flags
|
O_NONBLOCK
)
==
-
1
)
{
throw
network_exception
(
"unable to set socket to nonblocking"
);
}
if
(
bind
(
sockfd
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
<
0
)
{
throw
bind_failure
(
errno
);
...
...
@@ -301,9 +235,12 @@ void publish(actor_ptr& whom, std::uint16_t port)
{
throw
network_exception
(
"listen() failed"
);
}
intrusive_ptr
<
shared_barrier
>
barrier_ptr
(
new
shared_barrier
);
boost
::
thread
(
middle_man_loop
,
sockfd
,
whom
,
barrier_ptr
).
detach
();
whom
->
attach
(
new
mm_handle
(
sockfd
,
barrier_ptr
));
// ok, no exceptions
sguard
.
release
();
detail
::
post_office_publish
(
sockfd
,
whom
);
//intrusive_ptr<shared_barrier> barrier_ptr(new shared_barrier);
//boost::thread(middle_man_loop, sockfd, whom, barrier_ptr).detach();
//whom->attach(new mm_handle(sockfd, barrier_ptr));
}
void
publish
(
actor_ptr
&&
whom
,
std
::
uint16_t
port
)
...
...
@@ -350,10 +287,12 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
peer_pinf
->
node_id
.
size
());
process_information_ptr
pinfptr
(
peer_pinf
);
actor_proxy_ptr
result
(
new
actor_proxy
(
remote_actor_id
,
pinfptr
));
mailman_queue
().
push_back
(
new
mailman_job
(
sockfd
,
pinfptr
));
auto
ptr
=
get_scheduler
()
->
register_hidden_context
();
boost
::
thread
(
post_office_loop
,
sockfd
,
peer_pinf
,
result
,
ptr
.
release
()).
detach
();
detail
::
mailman_queue
().
push_back
(
new
detail
::
mailman_job
(
sockfd
,
pinfptr
));
detail
::
post_office_add_peer
(
sockfd
,
pinfptr
,
result
,
std
::
unique_ptr
<
attachable
>
());
//auto ptr = get_scheduler()->register_hidden_context();
//boost::thread(post_office_loop, sockfd,
// peer_pinf, result, ptr.release()).detach();
return
result
;
}
...
...
unit_testing/main.cpp
View file @
7023a997
...
...
@@ -101,7 +101,7 @@ int main(int argc, char** c_argv)
RUN_TEST
(
test__spawn
);
RUN_TEST
(
test__local_group
);
RUN_TEST
(
test__atom
);
RUN_TEST_A3
(
test__remote_actor
,
c_argv
[
0
],
false
,
argv
);
//
RUN_TEST_A3(test__remote_actor, c_argv[0], false, argv);
cout
<<
endl
<<
"error(s) in all tests: "
<<
errors
<<
endl
;
...
...
unit_testing/test__remote_actor.cpp
View file @
7023a997
...
...
@@ -46,7 +46,7 @@ size_t test__remote_actor(const char* app_path, bool is_client,
auto
ping_actor
=
spawn
(
ping
);
std
::
uint16_t
port
=
4242
;
bool
success
=
false
;
while
(
!
success
)
do
{
try
{
...
...
@@ -59,22 +59,24 @@ size_t test__remote_actor(const char* app_path, bool is_client,
++
port
;
}
}
while
(
!
success
);
cout
<<
"port = "
<<
port
<<
endl
;
std
::
string
cmd
;
{
std
::
ostringstream
oss
;
oss
<<
app_path
<<
" test__remote_actor "
<<
port
;
//
<< " &>/dev/null";
oss
<<
app_path
<<
" test__remote_actor "
<<
port
<<
" &>/dev/null"
;
cmd
=
oss
.
str
();
}
// execute client_part() in a separate process,
// connected via localhost socket
boost
::
thread
child
([
&
cmd
]()
{
system
(
cmd
.
c_str
());
});
//
boost::thread child([&cmd]() { system(cmd.c_str()); });
cout
<<
__LINE__
<<
endl
;
await_all_others_done
();
cout
<<
__LINE__
<<
endl
;
CPPA_CHECK_EQUAL
(
pongs
(),
5
);
// wait until separate process (in sep. thread) finished execution
cout
<<
__LINE__
<<
endl
;
child
.
join
();
//
child.join();
cout
<<
__LINE__
<<
endl
;
return
CPPA_TEST_RESULT
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment