Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
Actor Framework
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
cpp-libs
Actor Framework
Commits
b61316b9
Commit
b61316b9
authored
Apr 25, 2012
by
neverlord
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bye, bye select()
parent
ad937fb8
Changes
16
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
370 additions
and
864 deletions
+370
-864
cppa/detail/actor_proxy_cache.hpp
cppa/detail/actor_proxy_cache.hpp
+6
-29
cppa/detail/buffer.hpp
cppa/detail/buffer.hpp
+2
-1
cppa/detail/mailman.hpp
cppa/detail/mailman.hpp
+0
-98
cppa/detail/mock_scheduler.hpp
cppa/detail/mock_scheduler.hpp
+7
-1
cppa/detail/network_manager.hpp
cppa/detail/network_manager.hpp
+2
-4
cppa/detail/post_office.hpp
cppa/detail/post_office.hpp
+2
-4
cppa/match.hpp
cppa/match.hpp
+6
-3
src/actor_proxy.cpp
src/actor_proxy.cpp
+4
-2
src/actor_proxy_cache.cpp
src/actor_proxy_cache.cpp
+43
-39
src/mailman.cpp
src/mailman.cpp
+29
-72
src/mock_scheduler.cpp
src/mock_scheduler.cpp
+19
-5
src/network_manager.cpp
src/network_manager.cpp
+20
-42
src/post_office.cpp
src/post_office.cpp
+217
-546
src/singleton_manager.cpp
src/singleton_manager.cpp
+1
-4
src/thread_pool_scheduler.cpp
src/thread_pool_scheduler.cpp
+6
-6
src/unicast_network.cpp
src/unicast_network.cpp
+6
-8
No files found.
cppa/detail/actor_proxy_cache.hpp
View file @
b61316b9
...
...
@@ -32,10 +32,12 @@
#define ACTOR_PROXY_CACHE_HPP
#include <string>
#include <vector>
#include <functional>
#include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp"
#include "cppa/util/shared_spinlock.hpp"
namespace
cppa
{
namespace
detail
{
...
...
@@ -49,42 +51,17 @@ class actor_proxy_cache
process_information
::
node_id_type
>
// node id
key_tuple
;
typedef
std
::
function
<
void
(
actor_proxy_ptr
&
)
>
new_proxy_callback
;
private:
std
::
map
<
key_tuple
,
process_information_ptr
>
m_pinfos
;
std
::
map
<
key_tuple
,
actor_proxy_ptr
>
m_proxies
;
new_proxy_callback
m_new_cb
;
process_information_ptr
get_pinfo
(
key_tuple
const
&
key
);
util
::
shared_spinlock
m_lock
;
std
::
map
<
key_tuple
,
actor_proxy_ptr
>
m_entries
;
public:
// this callback is called if a new proxy instance is created
template
<
typename
F
>
void
set_new_proxy_callback
(
F
&&
cb
)
{
m_new_cb
=
std
::
forward
<
F
>
(
cb
);
}
actor_proxy_ptr
get
(
key_tuple
const
&
key
);
void
add
(
actor_proxy_ptr
&
pptr
);
size_t
size
()
const
;
void
erase
(
actor_proxy_ptr
const
&
pptr
);
template
<
typename
F
>
void
for_each
(
F
&&
fun
)
{
for
(
auto
i
=
m_proxies
.
begin
();
i
!=
m_proxies
.
end
();
++
i
)
{
fun
(
i
->
second
);
}
}
// @returns true if pptr was successfully removed, false otherwise
bool
erase
(
actor_proxy_ptr
const
&
pptr
);
};
...
...
cppa/detail/buffer.hpp
View file @
b61316b9
...
...
@@ -188,7 +188,8 @@ class buffer
return
append_impl
(
fun
,
throw_on_error
);
}
bool
append_from
(
native_socket_type
sfd
,
int
rdflags
,
bool
append_from
(
native_socket_type
sfd
,
int
rdflags
=
0
,
bool
throw_on_error
=
false
)
{
auto
fun
=
[
=
]()
->
int
...
...
cppa/detail/mailman.hpp
View file @
b61316b9
...
...
@@ -40,106 +40,8 @@
namespace
cppa
{
namespace
detail
{
struct
mailman_send_job
{
process_information_ptr
target_peer
;
addressed_message
msg
;
inline
mailman_send_job
(
process_information_ptr
piptr
,
actor_ptr
const
&
from
,
channel_ptr
const
&
to
,
any_tuple
const
&
content
)
:
target_peer
(
piptr
),
msg
(
from
,
to
,
content
)
{
}
};
struct
mailman_add_peer
{
native_socket_type
sockfd
;
process_information_ptr
pinfo
;
inline
mailman_add_peer
(
native_socket_type
fd
,
process_information_ptr
const
&
piptr
)
:
sockfd
(
fd
),
pinfo
(
piptr
)
{
}
};
class
mailman_job
{
public:
enum
job_type
{
invalid_type
,
send_job_type
,
add_peer_type
,
kill_type
};
inline
mailman_job
()
:
next
(
nullptr
),
m_type
(
invalid_type
)
{
}
mailman_job
(
process_information_ptr
piptr
,
actor_ptr
const
&
from
,
channel_ptr
const
&
to
,
any_tuple
const
&
omsg
);
mailman_job
(
native_socket_type
sockfd
,
process_information_ptr
const
&
pinfo
);
static
mailman_job
*
kill_job
();
~
mailman_job
();
inline
mailman_send_job
&
send_job
()
{
return
m_send_job
;
}
inline
mailman_add_peer
&
add_peer_job
()
{
return
m_add_socket
;
}
inline
job_type
type
()
const
{
return
m_type
;
}
inline
bool
is_send_job
()
const
{
return
m_type
==
send_job_type
;
}
inline
bool
is_add_peer_job
()
const
{
return
m_type
==
add_peer_type
;
}
inline
bool
is_kill_job
()
const
{
return
m_type
==
kill_type
;
}
mailman_job
*
next
;
private:
job_type
m_type
;
// unrestricted union
union
{
mailman_send_job
m_send_job
;
mailman_add_peer
m_add_socket
;
};
inline
mailman_job
(
job_type
jt
)
:
next
(
nullptr
),
m_type
(
jt
)
{
}
};
void
mailman_loop
();
intrusive
::
single_reader_queue
<
mailman_job
>&
mailman_queue
();
}}
// namespace cppa::detail
#endif // MAILMAN_HPP
cppa/detail/mock_scheduler.hpp
View file @
b61316b9
...
...
@@ -31,7 +31,11 @@
#ifndef MOCK_SCHEDULER_HPP
#define MOCK_SCHEDULER_HPP
#include <utility>
#include "cppa/scheduler.hpp"
#include "cppa/detail/tdata.hpp"
#include "cppa/detail/thread.hpp"
namespace
cppa
{
namespace
detail
{
...
...
@@ -44,7 +48,9 @@ class mock_scheduler : public scheduler
actor_ptr
spawn
(
std
::
function
<
void
()
>
what
,
scheduling_hint
);
static
actor_ptr
spawn
(
std
::
function
<
void
()
>
what
);
static
actor_ptr
spawn_impl
(
std
::
function
<
void
()
>
what
);
static
thread
spawn_hidden_impl
(
std
::
function
<
void
()
>
what
,
local_actor_ptr
ctx
);
void
enqueue
(
scheduled_actor
*
what
);
...
...
cppa/detail/network_manager.hpp
View file @
b61316b9
...
...
@@ -43,15 +43,13 @@ class network_manager
virtual
~
network_manager
();
virtual
void
write_to_pipe
(
pipe_msg
const
&
what
)
=
0
;
virtual
void
start
()
=
0
;
virtual
void
stop
()
=
0
;
virtual
intrusive
::
single_reader_queue
<
mailman_job
>&
mailman_queue
(
)
=
0
;
virtual
void
send_to_post_office
(
any_tuple
msg
)
=
0
;
virtual
intrusive
::
single_reader_queue
<
post_office_msg
>&
post_office_queue
(
)
=
0
;
virtual
void
send_to_mailman
(
any_tuple
msg
)
=
0
;
static
network_manager
*
create_singleton
();
...
...
cppa/detail/post_office.hpp
View file @
b61316b9
...
...
@@ -38,12 +38,10 @@
namespace
cppa
{
namespace
detail
{
void
post_office_loop
(
int
pipe_read_handle
,
int
pipe_write_handle
);
void
post_office_loop
();
void
post_office_add_peer
(
native_socket_type
peer_socket
,
process_information_ptr
const
&
peer_ptr
,
actor_proxy_ptr
const
&
peer_actor_ptr
,
std
::
unique_ptr
<
attachable
>&&
peer_observer
);
process_information_ptr
const
&
peer_ptr
);
void
post_office_publish
(
native_socket_type
server_socket
,
actor_ptr
const
&
published_actor
);
...
...
cppa/match.hpp
View file @
b61316b9
...
...
@@ -41,18 +41,21 @@ struct match_helper
match_helper
(
match_helper
const
&
)
=
delete
;
match_helper
&
operator
=
(
match_helper
const
&
)
=
delete
;
any_tuple
tup
;
match_helper
(
any_tuple
&&
t
)
:
tup
(
std
::
move
(
t
))
{
}
match_helper
(
any_tuple
t
)
:
tup
(
std
::
move
(
t
))
{
}
match_helper
(
match_helper
&&
)
=
default
;
/*
void operator()(partial_function&& arg)
{
partial_function tmp{std::move(arg)};
tmp(tup);
}
*/
template
<
class
Arg0
,
class
...
Args
>
void
operator
()(
Arg0
&&
arg0
,
Args
&&
...
args
)
{
(
*
this
)(
mexpr_concat_convert
(
std
::
forward
<
Arg0
>
(
arg0
),
std
::
forward
<
Args
>
(
args
)...));
auto
tmp
=
mexpr_concat
(
std
::
forward
<
Arg0
>
(
arg0
),
std
::
forward
<
Args
>
(
args
)...);
tmp
(
tup
);
}
};
...
...
src/actor_proxy.cpp
View file @
b61316b9
...
...
@@ -34,6 +34,8 @@
#include "cppa/actor_proxy.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/singleton_manager.hpp"
namespace
cppa
{
...
...
@@ -47,8 +49,8 @@ void actor_proxy::forward_message(process_information_ptr const& piptr,
actor
*
sender
,
any_tuple
&&
msg
)
{
auto
mailman_msg
=
new
detail
::
mailman_job
(
piptr
,
sender
,
this
,
std
::
move
(
msg
));
detail
::
mailman_queue
().
push_back
(
mailman_msg
);
detail
::
singleton_manager
::
get_network_manager
()
->
send_to_mailman
(
make_any_tuple
(
piptr
,
actor_ptr
{
sender
},
std
::
move
(
msg
))
);
}
void
actor_proxy
::
enqueue
(
actor
*
sender
,
any_tuple
msg
)
...
...
src/actor_proxy_cache.cpp
View file @
b61316b9
...
...
@@ -30,15 +30,23 @@
#include "cppa/atom.hpp"
#include "cppa/any_tuple.hpp"
#include "cppa/util/shared_lock_guard.hpp"
#include "cppa/util/upgrade_lock_guard.hpp"
#include "cppa/detail/thread.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/singleton_manager.hpp"
// thread_specific_ptr
#include <boost/thread/tss.hpp>
namespace
{
boost
::
thread_specific_ptr
<
cppa
::
detail
::
actor_proxy_cache
>
s_proxy_cache
;
//boost::thread_specific_ptr<cppa::detail::actor_proxy_cache> s_proxy_cache;
cppa
::
detail
::
actor_proxy_cache
s_proxy_cache
;
}
// namespace <anonmyous>
...
...
@@ -46,62 +54,58 @@ namespace cppa { namespace detail {
actor_proxy_cache
&
get_actor_proxy_cache
()
{
/*
if (s_proxy_cache.get() == nullptr)
{
s_proxy_cache.reset(new actor_proxy_cache);
}
return *s_proxy_cache;
*/
return
s_proxy_cache
;
}
process_information_ptr
actor_proxy_cache
::
get_pinfo
(
const
actor_proxy_cache
::
key_tuple
&
key
)
actor_proxy_ptr
actor_proxy_cache
::
get
(
key_tuple
const
&
key
)
{
auto
i
=
m_pinfos
.
find
(
key
);
if
(
i
!=
m_pinfos
.
end
())
// lifetime scope of shared guard
{
return
i
->
second
;
util
::
shared_lock_guard
<
util
::
shared_spinlock
>
guard
{
m_lock
};
auto
i
=
m_entries
.
find
(
key
);
if
(
i
!=
m_entries
.
end
())
{
return
i
->
second
;
}
}
process_information_ptr
tmp
(
new
process_information
(
std
::
get
<
1
>
(
key
),
std
::
get
<
2
>
(
key
)));
m_pinfos
.
insert
(
std
::
make_pair
(
key
,
tmp
));
return
tmp
;
}
actor_proxy_ptr
actor_proxy_cache
::
get
(
const
key_tuple
&
key
)
{
auto
i
=
m_proxies
.
find
(
key
);
if
(
i
!=
m_proxies
.
end
())
actor_proxy_ptr
result
{
new
actor_proxy
(
std
::
get
<
0
>
(
key
),
new
process_information
(
std
::
get
<
1
>
(
key
),
std
::
get
<
2
>
(
key
)))};
// lifetime scope of exclusive guard
{
return
i
->
second
;
lock_guard
<
util
::
shared_spinlock
>
guard
{
m_lock
};
auto
i
=
m_entries
.
find
(
key
);
if
(
i
!=
m_entries
.
end
())
{
return
i
->
second
;
}
m_entries
.
insert
(
std
::
make_pair
(
key
,
result
));
}
// get_pinfo(key) also inserts to m_pinfos
actor_proxy_ptr
result
(
new
actor_proxy
(
std
::
get
<
0
>
(
key
),
get_pinfo
(
key
)));
m_proxies
.
insert
(
std
::
make_pair
(
key
,
result
));
if
(
m_new_cb
)
m_new_cb
(
result
);
// insert to m_proxies
//result->enqueue(message(result, nullptr, atom("MONITOR")));
auto
msg
=
make_any_tuple
(
atom
(
"ADD_PROXY"
),
result
);
singleton_manager
::
get_network_manager
()
->
send_to_post_office
(
std
::
move
(
msg
));
result
->
enqueue
(
nullptr
,
make_any_tuple
(
atom
(
"MONITOR"
)));
result
->
attach_functor
([
result
](
std
::
uint32_t
)
{
auto
msg
=
make_any_tuple
(
atom
(
"RM_PROXY"
),
result
);
singleton_manager
::
get_network_manager
()
->
send_to_post_office
(
std
::
move
(
msg
));
});
return
result
;
}
void
actor_proxy_cache
::
add
(
actor_proxy_ptr
&
pptr
)
bool
actor_proxy_cache
::
erase
(
actor_proxy_ptr
const
&
pptr
)
{
auto
pinfo
=
pptr
->
parent_process_ptr
();
key_tuple
key
(
pptr
->
id
(),
pinfo
->
process_id
(),
pinfo
->
node_id
());
m_pinfos
.
insert
(
std
::
make_pair
(
key
,
pptr
->
parent_process_ptr
()));
m_proxies
.
insert
(
std
::
make_pair
(
key
,
pptr
));
if
(
m_new_cb
)
m_new_cb
(
pptr
);
}
void
actor_proxy_cache
::
erase
(
const
actor_proxy_ptr
&
pptr
)
{
auto
pinfo
=
pptr
->
parent_process_ptr
();
key_tuple
key
(
pptr
->
id
(),
pinfo
->
process_id
(),
pinfo
->
node_id
());
m_proxies
.
erase
(
key
);
}
size_t
actor_proxy_cache
::
size
()
const
{
return
m_proxies
.
size
();
{
lock_guard
<
util
::
shared_spinlock
>
guard
{
m_lock
};
return
m_entries
.
erase
(
key
)
>
0
;
}
return
false
;
}
}
}
// namespace cppa::detail
src/mailman.cpp
View file @
b61316b9
...
...
@@ -31,6 +31,7 @@
#include <atomic>
#include <iostream>
#include "cppa/cppa.hpp"
#include "cppa/to_string.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/binary_serializer.hpp"
...
...
@@ -51,80 +52,31 @@ using std::endl;
// implementation of mailman.hpp
namespace
cppa
{
namespace
detail
{
mailman_job
::
mailman_job
(
process_information_ptr
piptr
,
const
actor_ptr
&
from
,
const
channel_ptr
&
to
,
const
any_tuple
&
content
)
:
next
(
nullptr
),
m_type
(
send_job_type
)
{
new
(
&
m_send_job
)
mailman_send_job
(
piptr
,
from
,
to
,
content
);
}
mailman_job
::
mailman_job
(
native_socket_type
sockfd
,
const
process_information_ptr
&
pinfo
)
:
next
(
0
),
m_type
(
add_peer_type
)
{
new
(
&
m_add_socket
)
mailman_add_peer
(
sockfd
,
pinfo
);
}
mailman_job
*
mailman_job
::
kill_job
()
{
return
new
mailman_job
(
kill_type
);
}
mailman_job
::~
mailman_job
()
{
switch
(
m_type
)
{
case
send_job_type
:
{
m_send_job
.
~
mailman_send_job
();
break
;
}
case
add_peer_type
:
{
m_add_socket
.
~
mailman_add_peer
();
break
;
}
default:
{
// union doesn't contain a valid object
break
;
}
}
}
// known issues: send() should be asynchronous and select() should be used
void
mailman_loop
()
{
bool
done
=
false
;
// serializes outgoing messages
binary_serializer
bs
;
// current active job
std
::
unique_ptr
<
mailman_job
>
job
;
// caches mailman_queue()
auto
&
mqueue
=
mailman_queue
();
// connected tcp peers
std
::
map
<
process_information
,
native_socket_type
>
peers
;
for
(;;)
{
job
.
reset
(
mqueue
.
pop
());
if
(
job
->
is_send_job
())
do_receive
(
on_arg_match
>>
[
&
](
process_information_ptr
target_peer
,
addressed_message
msg
)
{
mailman_send_job
&
sjob
=
job
->
send_job
();
// forward message to receiver peer
auto
peer_element
=
peers
.
find
(
*
(
sjob
.
target_peer
));
if
(
peer_element
!=
peers
.
end
())
auto
i
=
peers
.
find
(
*
target_peer
);
if
(
i
!=
peers
.
end
())
{
bool
disconnect_peer
=
false
;
auto
peer
=
peer_element
->
second
;
auto
peer
_fd
=
i
->
second
;
try
{
bs
<<
sjob
.
msg
;
bs
<<
msg
;
auto
size32
=
static_cast
<
std
::
uint32_t
>
(
bs
.
size
());
DEBUG
(
"--> "
<<
to_string
(
sjob
.
msg
));
DEBUG
(
"--> "
<<
to_string
(
msg
));
// write size of serialized message
auto
sent
=
::
send
(
peer
,
&
size32
,
sizeof
(
std
::
uint32_t
),
0
);
auto
sent
=
::
send
(
peer
_fd
,
&
size32
,
sizeof
(
std
::
uint32_t
),
0
);
if
(
sent
!=
static_cast
<
int
>
(
sizeof
(
std
::
uint32_t
))
||
static_cast
<
int
>
(
bs
.
size
())
!=
::
send
(
peer
,
bs
.
data
(),
bs
.
size
(),
0
))
||
static_cast
<
int
>
(
bs
.
size
())
!=
::
send
(
peer
_fd
,
bs
.
data
(),
bs
.
size
(),
0
))
{
disconnect_peer
=
true
;
DEBUG
(
"too few bytes written"
);
...
...
@@ -140,8 +92,8 @@ void mailman_loop()
{
DEBUG
(
"peer disconnected (error during send)"
);
//closesocket(peer);
post_office_close_socket
(
peer
);
peers
.
erase
(
peer_element
);
post_office_close_socket
(
peer
_fd
);
peers
.
erase
(
i
);
}
bs
.
reset
();
}
...
...
@@ -149,28 +101,33 @@ void mailman_loop()
{
DEBUG
(
"message to an unknown peer"
);
}
// else: unknown peer
}
else
if
(
job
->
is_add_peer_job
())
},
on_arg_match
>>
[
&
](
native_socket_type
sockfd
,
process_information_ptr
pinfo
)
{
mailman_add_peer
&
pjob
=
job
->
add_peer_job
();
auto
i
=
peers
.
find
(
*
(
pjob
.
pinfo
));
auto
i
=
peers
.
find
(
*
pinfo
);
if
(
i
==
peers
.
end
())
{
//cout << "mailman added " << pjob.pinfo->process_id() << "@"
// << to_string(pjob.pinfo->node_id()) << endl;
peers
.
insert
(
std
::
make_pair
(
*
(
pjob
.
pinfo
),
pjob
.
sockfd
));
peers
.
insert
(
std
::
make_pair
(
*
pinfo
,
sockfd
));
}
else
{
DEBUG
(
"add_peer_job failed: peer already known"
);
}
}
else
if
(
job
->
is_kill_job
())
},
on
(
atom
(
"DONE"
))
>>
[
&
]()
{
done
=
true
;
},
others
()
>>
[
&
]()
{
return
;
std
::
string
str
=
"unexpected message in post_office: "
;
str
+=
to_string
(
self
->
last_dequeued
());
CPPA_CRITICAL
(
str
.
c_str
());
}
}
)
.
until
(
gref
(
done
));
}
}
}
// namespace cppa::detail
src/mock_scheduler.cpp
View file @
b61316b9
...
...
@@ -63,17 +63,31 @@ void run_actor(cppa::intrusive_ptr<cppa::local_actor> m_self,
cppa
::
detail
::
dec_actor_count
();
}
void
run_hidden_actor
(
cppa
::
intrusive_ptr
<
cppa
::
local_actor
>
m_self
,
std
::
function
<
void
()
>
what
)
{
cppa
::
self
.
set
(
m_self
.
get
());
try
{
what
();
}
catch
(...)
{
}
cppa
::
self
.
set
(
nullptr
);
}
}
// namespace <anonymous>
namespace
cppa
{
namespace
detail
{
actor_ptr
mock_scheduler
::
spawn
(
std
::
function
<
void
()
>
what
)
thread
mock_scheduler
::
spawn_hidden_impl
(
std
::
function
<
void
()
>
what
,
local_actor_ptr
ctx
)
{
return
thread
{
run_hidden_actor
,
ctx
,
std
::
move
(
what
)};
}
actor_ptr
mock_scheduler
::
spawn_impl
(
std
::
function
<
void
()
>
what
)
{
inc_actor_count
();
CPPA_MEMORY_BARRIER
();
intrusive_ptr
<
local_actor
>
ctx
(
new
detail
::
converted_thread_context
)
;
thread
(
run_actor
,
ctx
,
std
::
move
(
what
))
.
detach
();
return
ctx
;
intrusive_ptr
<
local_actor
>
ctx
{
new
detail
::
converted_thread_context
}
;
thread
{
run_actor
,
ctx
,
std
::
move
(
what
)}
.
detach
();
return
std
::
move
(
ctx
)
;
}
actor_ptr
mock_scheduler
::
spawn
(
scheduled_actor
*
)
...
...
@@ -85,7 +99,7 @@ actor_ptr mock_scheduler::spawn(scheduled_actor*)
actor_ptr
mock_scheduler
::
spawn
(
std
::
function
<
void
()
>
what
,
scheduling_hint
)
{
return
spawn
(
std
::
move
(
what
)
);
return
spawn
_impl
(
what
);
}
void
mock_scheduler
::
enqueue
(
scheduled_actor
*
)
...
...
src/network_manager.cpp
View file @
b61316b9
...
...
@@ -40,8 +40,10 @@
#include "cppa/detail/thread.hpp"
#include "cppa/detail/mailman.hpp"
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/post_office_msg.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/converted_thread_context.hpp"
namespace
{
...
...
@@ -51,61 +53,37 @@ using namespace cppa::detail;
struct
network_manager_impl
:
network_manager
{
typedef
intrusive
::
single_reader_queue
<
post_office_msg
>
post_office_queue_t
;
t
ypedef
intrusive
::
single_reader_queue
<
mailman_job
>
mailman_queue_t
;
local_actor_ptr
m_mailman
;
t
hread
m_mailman_thread
;
int
m_pipe
[
2
];
// m_pipe[0]: read; m_pipe[1]: write
local_actor_ptr
m_post_office
;
thread
m_post_office_thread
;
mailman_queue_t
m_mailman_queue
;
post_office_queue_t
m_post_office_queue
;
thread
m_loop
;
// post office thread
void
start
()
/*override*/
void
start
()
// override
{
if
(
pipe
(
m_pipe
)
!=
0
)
{
char
*
error_cstr
=
strerror
(
errno
);
std
::
string
error_str
=
"pipe(): "
;
error_str
+=
error_cstr
;
free
(
error_cstr
);
throw
std
::
logic_error
(
error_str
);
}
m_loop
=
thread
(
post_office_loop
,
m_pipe
[
0
],
m_pipe
[
1
]);
}
m_post_office
.
reset
(
new
converted_thread_context
);
m_post_office_thread
=
mock_scheduler
::
spawn_hidden_impl
(
post_office_loop
,
m_post_office
);
void
write_to_pipe
(
pipe_msg
const
&
what
)
{
if
(
write
(
m_pipe
[
1
],
what
,
pipe_msg_size
)
!=
(
int
)
pipe_msg_size
)
{
std
::
cerr
<<
"FATAL: cannot write to pipe"
<<
std
::
endl
;
abort
();
}
}
inline
int
write_handle
()
const
{
return
m_pipe
[
1
];
m_mailman
.
reset
(
new
converted_thread_context
);
m_mailman_thread
=
mock_scheduler
::
spawn_hidden_impl
(
mailman_loop
,
m_mailman
);
}
mailman_queue_t
&
mailman_queue
()
void
stop
()
// override
{
return
m_mailman_queue
;
m_post_office
->
enqueue
(
nullptr
,
make_any_tuple
(
atom
(
"DONE"
)));
m_mailman
->
enqueue
(
nullptr
,
make_any_tuple
(
atom
(
"DONE"
)));
m_post_office_thread
.
join
();
m_mailman_thread
.
join
();
}
post_office_queue_t
&
post_office_queue
(
)
void
send_to_post_office
(
any_tuple
msg
)
{
return
m_post_office_queue
;
m_post_office
->
enqueue
(
nullptr
,
std
::
move
(
msg
))
;
}
void
s
top
()
/*override*/
void
s
end_to_mailman
(
any_tuple
msg
)
{
pipe_msg
msg
=
{
shutdown_event
,
0
};
write_to_pipe
(
msg
);
// m_loop calls close(m_pipe[0])
m_loop
.
join
();
close
(
m_pipe
[
0
]);
close
(
m_pipe
[
1
]);
m_mailman
->
enqueue
(
nullptr
,
std
::
move
(
msg
));
}
};
...
...
src/post_office.cpp
View file @
b61316b9
...
...
@@ -46,17 +46,14 @@
#include <sys/time.h>
#include <sys/types.h>
// used cppa classes
#include "cppa/cppa.hpp"
#include "cppa/atom.hpp"
#include "cppa/match.hpp"
#include "cppa/config.hpp"
#include "cppa/to_string.hpp"
#include "cppa/deserializer.hpp"
#include "cppa/binary_deserializer.hpp"
// used cppa intrusive containers
#include "cppa/intrusive/single_reader_queue.hpp"
// used cppa details
#include "cppa/detail/thread.hpp"
#include "cppa/detail/buffer.hpp"
#include "cppa/detail/mailman.hpp"
...
...
@@ -98,18 +95,27 @@ static_assert((s_max_buffer_size % s_chunk_size) == 0,
static_assert
(
sizeof
(
cppa
::
detail
::
native_socket_type
)
==
sizeof
(
std
::
uint32_t
),
"sizeof(native_socket_t) != sizeof(std::uint32_t)"
);
constexpr
int
s_rdflag
=
MSG_DONTWAIT
;
}
// namespace <anonmyous>
namespace
cppa
{
namespace
detail
{
intrusive
::
single_reader_queue
<
mailman_job
>&
mailman_queue
()
template
<
typename
...
Args
>
inline
void
send2po
(
Args
&&
...
args
)
{
return
singleton_manager
::
get_network_manager
()
->
mailman_queue
();
singleton_manager
::
get_network_manager
()
->
send_to_post_office
(
make_any_tuple
(
std
::
forward
<
Args
>
(
args
)...));
}
class
po_doorman
;
template
<
class
Fun
>
struct
scope_guard
{
Fun
m_fun
;
scope_guard
(
Fun
&&
fun
)
:
m_fun
(
fun
)
{
}
~
scope_guard
()
{
m_fun
();
}
};
template
<
class
Fun
>
scope_guard
<
Fun
>
make_scope_guard
(
Fun
fun
)
{
return
{
std
::
move
(
fun
)};
}
// represents a TCP connection to another peer
class
po_peer
...
...
@@ -128,287 +134,189 @@ class po_peer
state
m_state
;
// TCP socket to remote peer
native_socket_type
m_socket
;
// TCP socket identifying our parent (that accepted m_socket)
native_socket_type
m_parent_socket
;
// caches process_information::get()
process_information_ptr
m_pself
;
// the process information or our remote peer
process_information_ptr
m_peer
;
std
::
unique_ptr
<
attachable
>
m_observer
;
buffer
<
s_chunk_size
,
s_max_buffer_size
>
m_rdbuf
;
std
::
list
<
actor_proxy_ptr
>
m_children
;
const
uniform_type_info
*
m_meta_msg
;
thread
m_thread
;
public:
explicit
po_peer
(
post_office_msg
::
add_peer
&
from
)
:
m_state
(
wait_for_msg_size
)
,
m_socket
(
from
.
sockfd
)
,
m_parent_socket
(
-
1
)
,
m_pself
(
process_information
::
get
())
,
m_peer
(
std
::
move
(
from
.
peer
))
,
m_observer
(
std
::
move
(
from
.
attachable_ptr
))
,
m_meta_msg
(
uniform_typeid
<
detail
::
addressed_message
>
())
{
}
explicit
po_peer
(
native_socket_type
sockfd
,
native_socket_type
parent_socket
)
:
m_state
(
wait_for_process_info
)
,
m_socket
(
sockfd
)
,
m_parent_socket
(
parent_socket
)
po_peer
(
native_socket_type
fd
,
process_information_ptr
peer
)
:
m_state
((
peer
)
?
wait_for_msg_size
:
wait_for_process_info
)
,
m_socket
(
fd
)
,
m_pself
(
process_information
::
get
())
,
m_meta_msg
(
uniform_typeid
<
detail
::
addressed_message
>
())
{
m_rdbuf
.
reset
(
sizeof
(
std
::
uint32_t
)
+
process_information
::
node_id_size
);
}
po_peer
(
po_peer
&&
other
)
:
m_state
(
other
.
m_state
)
,
m_socket
(
other
.
m_socket
)
,
m_parent_socket
(
other
.
m_parent_socket
)
,
m_pself
(
process_information
::
get
())
,
m_peer
(
std
::
move
(
other
.
m_peer
))
,
m_observer
(
std
::
move
(
other
.
m_observer
))
,
m_rdbuf
(
std
::
move
(
other
.
m_rdbuf
))
,
m_children
(
std
::
move
(
other
.
m_children
))
,
m_meta_msg
(
other
.
m_meta_msg
)
{
other
.
m_socket
=
-
1
;
other
.
m_parent_socket
=
-
1
;
// other.m_children.clear();
}
native_socket_type
get_socket
()
const
{
return
m_socket
;
}
// returns true if @p pod is the parent of this
inline
bool
parent_exited
(
const
po_doorman
&
pod
);
void
add_child
(
const
actor_proxy_ptr
&
pptr
)
{
CPPA_REQUIRE
(
pptr
.
get
()
!=
nullptr
);
if
(
pptr
)
m_children
.
push_back
(
pptr
);
}
inline
size_t
children_count
()
const
{
return
m_children
.
size
();
}
inline
bool
has_parent
()
const
,
m_peer
(
std
::
move
(
peer
))
{
return
m_parent_socket
!=
-
1
;
}
// removes pptr from the list of children and returns
// a <bool, size_t> pair, whereas: first = true if pptr is a child of this
// second = number of remaining children
bool
remove_child
(
const
actor_proxy_ptr
&
pptr
)
~
po_peer
()
{
auto
end
=
m_children
.
end
();
auto
i
=
std
::
find
(
m_children
.
begin
(),
end
,
pptr
);
if
(
i
!=
end
)
{
m_children
.
erase
(
i
);
return
true
;
}
return
false
;
closesocket
(
m_socket
);
m_thread
.
join
();
}
inline
native_socket_type
get_socket
()
const
{
return
m_socket
;
}
~
po_peer
()
void
start
()
{
if
(
!
m_children
.
empty
())
{
auto
msg
=
make_cow_tuple
(
atom
(
"KILL_PROXY"
),
exit_reason
::
remote_link_unreachable
);
for
(
actor_proxy_ptr
&
pptr
:
m_children
)
{
pptr
->
enqueue
(
nullptr
,
msg
);
}
}
if
(
m_socket
!=
-
1
)
closesocket
(
m_socket
);
m_thread
=
thread
{
std
::
bind
(
&
po_peer
::
operator
(),
this
)};
}
// @returns false if an error occured; otherwise true
bool
read_and_continue
()
void
operator
()
()
{
static
constexpr
size_t
wfp_size
=
sizeof
(
std
::
uint32_t
)
+
process_information
::
node_id_size
;
bool
read_more
=
false
;
do
auto
nm
=
singleton_manager
::
get_network_manager
();
auto
meta_msg
=
uniform_typeid
<
addressed_message
>
();
buffer
<
s_chunk_size
,
s_max_buffer_size
>
rdbuf
;
auto
guard
=
make_scope_guard
([
&
]()
{
send2po
(
atom
(
"RM_PEER"
),
m_socket
);
});
for
(;;)
{
read_more
=
false
;
switch
(
m_state
)
{
case
wait_for_process_info
:
{
if
(
m_rdbuf
.
final_size
()
!=
wfp_size
)
{
m_rdbuf
.
reset
(
wfp_size
);
}
if
(
m_rdbuf
.
append_from
(
m_socket
,
s_rdflag
)
==
false
)
{
return
false
;
}
if
(
m_rdbuf
.
ready
()
==
false
)
if
(
rdbuf
.
final_size
()
!=
wfp_size
)
{
break
;
rdbuf
.
reset
(
wfp_size
)
;
}
else
if
(
rdbuf
.
append_from
(
m_socket
)
==
false
)
{
std
::
uint32_t
process_id
;
memcpy
(
&
process_id
,
m_rdbuf
.
data
(),
sizeof
(
std
::
uint32_t
));
process_information
::
node_id_type
node_id
;
memcpy
(
node_id
.
data
(),
m_rdbuf
.
data
()
+
sizeof
(
std
::
uint32_t
),
process_information
::
node_id_size
);
m_peer
.
reset
(
new
process_information
(
process_id
,
node_id
));
// inform mailman about new peer
mailman_queue
().
push_back
(
new
mailman_job
(
m_socket
,
m_peer
));
m_rdbuf
.
reset
();
m_state
=
wait_for_msg_size
;
DEBUG
(
"pinfo read: "
<<
m_peer
->
process_id
()
<<
"@"
<<
to_string
(
m_peer
->
node_id
()));
// fall through and try to read more from socket
DEBUG
(
"m_rdbuf.append_from() failed"
);
return
;
}
CPPA_REQUIRE
(
rdbuf
.
ready
());
std
::
uint32_t
process_id
;
memcpy
(
&
process_id
,
rdbuf
.
data
(),
sizeof
(
std
::
uint32_t
));
process_information
::
node_id_type
node_id
;
memcpy
(
node_id
.
data
(),
rdbuf
.
data
()
+
sizeof
(
std
::
uint32_t
),
process_information
::
node_id_size
);
m_peer
.
reset
(
new
process_information
(
process_id
,
node_id
));
// inform mailman about new peer
nm
->
send_to_mailman
(
make_any_tuple
(
m_socket
,
m_peer
));
rdbuf
.
reset
();
m_state
=
wait_for_msg_size
;
DEBUG
(
"pinfo read: "
<<
m_peer
->
process_id
()
<<
"@"
<<
to_string
(
m_peer
->
node_id
()));
// fall through and try to read more from socket
}
case
wait_for_msg_size
:
{
if
(
m_rdbuf
.
final_size
()
!=
sizeof
(
std
::
uint32_t
))
{
m_rdbuf
.
reset
(
sizeof
(
std
::
uint32_t
));
}
if
(
!
m_rdbuf
.
append_from
(
m_socket
,
s_rdflag
))
return
false
;
if
(
m_rdbuf
.
ready
()
==
false
)
if
(
rdbuf
.
final_size
()
!=
sizeof
(
std
::
uint32_t
))
{
break
;
rdbuf
.
reset
(
sizeof
(
std
::
uint32_t
))
;
}
else
if
(
!
rdbuf
.
append_from
(
m_socket
))
{
// read and set message size
std
::
uint32_t
msg_size
;
memcpy
(
&
msg_size
,
m_rdbuf
.
data
(),
sizeof
(
std
::
uint32_t
));
m_rdbuf
.
reset
(
msg_size
);
m_state
=
read_message
;
// fall through and try to read more from socket
DEBUG
(
"m_rdbuf.append_from() failed"
);
return
;
}
CPPA_REQUIRE
(
rdbuf
.
ready
());
// read and set message size
std
::
uint32_t
msg_size
;
memcpy
(
&
msg_size
,
rdbuf
.
data
(),
sizeof
(
std
::
uint32_t
));
rdbuf
.
reset
(
msg_size
);
m_state
=
read_message
;
// fall through and try to read more from socket
}
case
read_message
:
{
if
(
!
m_rdbuf
.
append_from
(
m_socket
,
s_rdflag
))
{
// could not read from socket
return
false
;
}
if
(
m_rdbuf
.
ready
()
==
false
)
if
(
!
rdbuf
.
append_from
(
m_socket
))
{
// wait for new data
break
;
DEBUG
(
"m_rdbuf.append_from() failed"
);
return
;
}
CPPA_REQUIRE
(
rdbuf
.
ready
());
addressed_message
msg
;
binary_deserializer
bd
(
m_rdbuf
.
data
(),
m_
rdbuf
.
size
());
binary_deserializer
bd
(
rdbuf
.
data
(),
rdbuf
.
size
());
try
{
m
_m
eta_msg
->
deserialize
(
&
msg
,
&
bd
);
meta_msg
->
deserialize
(
&
msg
,
&
bd
);
}
catch
(
std
::
exception
&
e
)
{
// unable to deserialize message (format error)
DEBUG
(
to_uniform_name
(
typeid
(
e
))
<<
": "
<<
e
.
what
());
return
false
;
return
;
}
auto
&
content
=
msg
.
content
();
DEBUG
(
"<-- "
<<
to_string
(
msg
));
// intercept "MONITOR" messages
if
(
content
.
size
()
==
1
&&
t_atom_actor_ptr_types
[
0
]
==
content
.
type_at
(
0
)
&&
content
.
get_as
<
atom_value
>
(
0
)
==
atom
(
"MONITOR"
))
{
auto
receiver
=
msg
.
receiver
().
downcast
<
actor
>
();
CPPA_REQUIRE
(
receiver
.
get
()
!=
nullptr
);
if
(
!
receiver
)
{
DEBUG
(
"empty receiver"
);
}
else
if
(
receiver
->
parent_process
()
==
*
process_information
::
get
())
match
(
content
)
(
on
(
atom
(
"MONITOR"
))
>>
[
&
]()
{
//cout << pinfo << " ':Monitor'; actor id = "
// << sender->id() << endl;
// local actor?
// this message was send from a proxy
receiver
->
attach_functor
([
=
](
std
::
uint32_t
reason
)
auto
receiver
=
msg
.
receiver
().
downcast
<
actor
>
();
CPPA_REQUIRE
(
receiver
.
get
()
!=
nullptr
);
if
(
!
receiver
)
{
any_tuple
kmsg
=
make_cow_tuple
(
atom
(
"KILL_PROXY"
),
reason
);
auto
mjob
=
new
detail
::
mailman_job
(
m_peer
,
receiver
,
receiver
,
kmsg
);
detail
::
mailman_queue
().
push_back
(
mjob
);
});
}
else
DEBUG
(
"empty receiver"
);
}
else
if
(
receiver
->
parent_process
()
==
*
process_information
::
get
())
{
// this message was send from a proxy
receiver
->
attach_functor
([
=
](
std
::
uint32_t
reason
)
{
addressed_message
kmsg
{
receiver
,
receiver
,
make_any_tuple
(
atom
(
"KILL_PROXY"
),
reason
)};
nm
->
send_to_mailman
(
make_any_tuple
(
m_peer
,
kmsg
));
});
}
else
{
DEBUG
(
"MONITOR received for a remote actor"
);
}
},
on
(
atom
(
"LINK"
),
arg_match
)
>>
[
&
](
actor_ptr
ptr
)
{
DEBUG
(
":Monitor received for an remote actor"
);
}
}
// intercept "LINK" messages
else
if
(
content
.
size
()
==
2
&&
t_atom_actor_ptr_types
[
0
]
==
content
.
type_at
(
0
)
&&
t_atom_actor_ptr_types
[
1
]
==
content
.
type_at
(
1
)
&&
content
.
get_as
<
atom_value
>
(
0
)
==
atom
(
"LINK"
))
{
CPPA_REQUIRE
(
msg
.
sender
()
->
is_proxy
());
auto
whom
=
msg
.
sender
().
downcast
<
actor_proxy
>
();
auto
to
=
content
.
get_as
<
actor_ptr
>
(
1
);
if
((
whom
)
&&
(
to
))
whom
->
local_link_to
(
to
);
}
// intercept "UNLINK" messages
else
if
(
content
.
size
()
==
2
&&
t_atom_actor_ptr_types
[
0
]
==
content
.
type_at
(
0
)
&&
t_atom_actor_ptr_types
[
1
]
==
content
.
type_at
(
1
)
&&
content
.
get_as
<
atom_value
>
(
0
)
==
atom
(
"UNLINK"
))
{
CPPA_REQUIRE
(
msg
.
sender
()
->
is_proxy
());
auto
whom
=
msg
.
sender
().
downcast
<
actor_proxy
>
();
auto
from
=
content
.
get_as
<
actor_ptr
>
(
1
);
if
((
whom
)
&&
(
from
))
whom
->
local_unlink_from
(
from
);
}
else
{
if
(
msg
.
receiver
())
if
(
msg
.
sender
()
->
is_proxy
()
==
false
)
{
DEBUG
(
"msg.sender() is not a proxy"
);
return
;
}
auto
whom
=
msg
.
sender
().
downcast
<
actor_proxy
>
();
if
((
whom
)
&&
(
ptr
))
whom
->
local_link_to
(
ptr
);
},
on
(
atom
(
"UNLINK"
),
arg_match
)
>>
[](
actor_ptr
ptr
)
{
msg
.
receiver
()
->
enqueue
(
msg
.
sender
().
get
(),
std
::
move
(
msg
.
content
()));
}
else
if
(
ptr
->
is_proxy
()
==
false
)
{
DEBUG
(
"msg.sender() is not a proxy"
);
return
;
}
auto
whom
=
ptr
.
downcast
<
actor_proxy
>
();
if
((
whom
)
&&
(
ptr
))
whom
->
local_unlink_from
(
ptr
);
},
others
()
>>
[
&
]()
{
DEBUG
(
"empty receiver"
);
if
(
msg
.
receiver
())
{
msg
.
receiver
()
->
enqueue
(
msg
.
sender
().
get
(),
std
::
move
(
msg
.
content
()));
}
else
{
DEBUG
(
"empty receiver"
);
}
}
}
m_
rdbuf
.
reset
();
);
rdbuf
.
reset
();
m_state
=
wait_for_msg_size
;
read_more
=
true
;
break
;
}
default:
{
throw
std
::
logic_error
(
"illegal state"
);
CPPA_CRITICAL
(
"illegal state"
);
}
}
}
while
(
read_more
);
return
true
;
}
};
...
...
@@ -420,17 +328,16 @@ class po_doorman
// server socket
native_socket_type
m_socket
;
actor_ptr
published_actor
;
std
::
list
<
po_peer
>*
m_peers
;
// caches process_information::get()
process_information_ptr
m_pself
;
thread
m_thread
;
public:
po_doorman
(
post_office_msg
::
add_server_socket
&
assm
,
std
::
list
<
po_peer
>*
peers
)
:
m_socket
(
assm
.
server_sockfd
)
,
published_actor
(
assm
.
published_actor
)
,
m_peers
(
peers
)
po_doorman
(
native_socket_type
fd
,
actor_ptr
mactor
)
:
m_socket
(
fd
)
,
published_actor
(
std
::
move
(
mactor
))
,
m_pself
(
process_information
::
get
())
{
}
...
...
@@ -438,330 +345,114 @@ class po_doorman
~
po_doorman
()
{
if
(
m_socket
!=
-
1
)
closesocket
(
m_socket
);
m_thread
.
join
();
}
po_doorman
(
po_doorman
&&
other
)
:
m_socket
(
other
.
m_socket
)
,
published_actor
(
std
::
move
(
other
.
published_actor
))
,
m_peers
(
other
.
m_peers
)
,
m_pself
(
process_information
::
get
())
{
other
.
m_socket
=
-
1
;
}
inline
native_socket_type
get_socket
()
const
void
start
()
{
return
m_socket
;
m_thread
=
thread
{
std
::
bind
(
&
po_doorman
::
operator
(),
this
)}
;
}
// @returns false if an error occured; otherwise true
bool
read_and_continue
()
void
operator
()()
{
sockaddr
addr
;
socklen_t
addrlen
;
memset
(
&
addr
,
0
,
sizeof
(
addr
));
memset
(
&
addrlen
,
0
,
sizeof
(
addrlen
));
auto
sfd
=
::
accept
(
m_socket
,
&
addr
,
&
addrlen
);
if
(
sfd
<
0
)
for
(;;)
{
switch
(
errno
)
memset
(
&
addr
,
0
,
sizeof
(
addr
));
memset
(
&
addrlen
,
0
,
sizeof
(
addrlen
));
auto
sfd
=
::
accept
(
m_socket
,
&
addr
,
&
addrlen
);
if
(
sfd
<
0
)
{
case
EAGAIN
:
# if EAGAIN != EWOULDBLOCK
case
EWOULDBLOCK
:
# endif
{
// just try again
return
true
;
}
default:
return
false
;
DEBUG
(
"accept failed (actor unpublished?)"
);
return
;
}
auto
id
=
published_actor
->
id
();
std
::
uint32_t
process_id
=
m_pself
->
process_id
();
::
send
(
sfd
,
&
id
,
sizeof
(
std
::
uint32_t
),
0
);
::
send
(
sfd
,
&
process_id
,
sizeof
(
std
::
uint32_t
),
0
);
::
send
(
sfd
,
m_pself
->
node_id
().
data
(),
m_pself
->
node_id
().
size
(),
0
);
send2po
(
atom
(
"ADD_PEER"
),
sfd
,
process_information_ptr
{});
DEBUG
(
"socket accepted; published actor: "
<<
id
);
}
auto
id
=
published_actor
->
id
();
std
::
uint32_t
process_id
=
m_pself
->
process_id
();
::
send
(
sfd
,
&
id
,
sizeof
(
std
::
uint32_t
),
0
);
::
send
(
sfd
,
&
process_id
,
sizeof
(
std
::
uint32_t
),
0
);
::
send
(
sfd
,
m_pself
->
node_id
().
data
(),
m_pself
->
node_id
().
size
(),
0
);
m_peers
->
push_back
(
po_peer
(
sfd
,
m_socket
));
DEBUG
(
"socket accepted; published actor: "
<<
id
);
return
true
;
}
};
inline
bool
po_peer
::
parent_exited
(
const
po_doorman
&
pod
)
{
if
(
m_parent_socket
==
pod
.
get_socket
())
{
m_parent_socket
=
-
1
;
return
true
;
}
return
false
;
}
// starts and stops mailman_loop
struct
mailman_worker
{
thread
m_thread
;
mailman_worker
()
:
m_thread
(
mailman_loop
)
{
}
~
mailman_worker
()
{
mailman_queue
().
push_back
(
mailman_job
::
kill_job
());
m_thread
.
join
();
}
};
void
post_office_loop
(
int
pipe_read_handle
,
int
pipe_write_handle
)
void
post_office_loop
()
{
mailman_worker
mworker
;
// map of all published actors
(actor_id => list<doorman>)
std
::
map
<
std
::
uint32_t
,
std
::
list
<
po_doorman
>
>
doormen
;
// list of all
connected peers
bool
done
=
false
;
// map of all published actors
std
::
map
<
actor_id
,
std
::
list
<
po_doorman
>
>
doormen
;
// list of all
peers to which we established a connection via remote_actor()
std
::
list
<
po_peer
>
peers
;
// readset for select()
fd_set
readset
;
// maximum number of all socket descriptors for select()
int
maxfd
=
0
;
// initialize variables for select()
FD_ZERO
(
&
readset
);
maxfd
=
pipe_read_handle
;
FD_SET
(
pipe_read_handle
,
&
readset
);
// keeps track about what peer we are iterating at the moment
po_peer
*
selected_peer
=
nullptr
;
// our event queue
auto
&
msg_queue
=
singleton_manager
::
get_network_manager
()
->
post_office_queue
();
auto
pself
=
process_information
::
get
();
// needed for lookups in our proxy cache
actor_proxy_cache
::
key_tuple
proxy_cache_key
(
0
,
// set on lookup
pself
->
process_id
(),
pself
->
node_id
()
);
// initialize proxy cache
get_actor_proxy_cache
().
set_new_proxy_callback
([
&
](
actor_proxy_ptr
&
pptr
)
{
DEBUG
(
"new_proxy_callback, actor id = "
<<
pptr
->
id
());
// it's ok to access objects on the stack, since this callback
// is guaranteed to be executed in the same thread
if
(
selected_peer
==
nullptr
)
do_receive
(
on
(
atom
(
"ADD_PEER"
),
arg_match
)
>>
[
&
](
native_socket_type
fd
,
process_information_ptr
piptr
)
{
throw
std
::
logic_error
(
"selected_peer == nullptr"
);
}
pptr
->
enqueue
(
nullptr
,
make_cow_tuple
(
atom
(
"MONITOR"
)));
selected_peer
->
add_child
(
pptr
);
auto
aid
=
pptr
->
id
();
auto
pptr_copy
=
pptr
;
pptr
->
attach_functor
([
&
msg_queue
,
aid
,
pipe_write_handle
,
pptr_copy
]
(
std
::
uint32_t
)
DEBUG
(
"add_peer_msg"
);
peers
.
emplace_back
(
fd
,
std
::
move
(
piptr
));
peers
.
back
().
start
();
},
on
(
atom
(
"RM_PEER"
),
arg_match
)
>>
[
&
](
native_socket_type
fd
)
{
// this callback is not guaranteed to be executed in the same thread
msg_queue
.
push_back
(
new
post_office_msg
(
pptr_copy
));
pipe_msg
msg
=
{
rd_queue_event
,
0
};
if
(
write
(
pipe_write_handle
,
msg
,
pipe_msg_size
)
!=
(
int
)
pipe_msg_size
)
DEBUG
(
"rm_peer_msg"
);
auto
i
=
std
::
find_if
(
peers
.
begin
(),
peers
.
end
(),
[
fd
](
po_peer
&
pp
)
{
cerr
<<
"FATAL: cannot write to pipe"
<<
endl
;
abort
();
}
});
});
for
(;;)
{
if
(
select
(
maxfd
+
1
,
&
readset
,
nullptr
,
nullptr
,
nullptr
)
<=
0
)
return
pp
.
get_socket
()
==
fd
;
});
if
(
i
!=
peers
.
end
())
peers
.
erase
(
i
);
},
on
(
atom
(
"ADD_PROXY"
),
arg_match
)
>>
[
&
](
actor_proxy_ptr
)
{
// must not happen
perror
(
"select()"
);
exit
(
3
);
}
// iterate over all peers and remove peers on errors
peers
.
remove_if
([
&
](
po_peer
&
peer
)
->
bool
DEBUG
(
"add_proxy_msg"
);
},
on
(
atom
(
"RM_PROXY"
),
arg_match
)
>>
[
&
](
actor_proxy_ptr
pptr
)
{
if
(
FD_ISSET
(
peer
.
get_socket
(),
&
readset
))
{
selected_peer
=
&
peer
;
return
peer
.
read_and_continue
()
==
false
;
}
return
false
;
});
selected_peer
=
nullptr
;
// iterate over all doormen (accept new connections)
// and remove doormen on errors
for
(
auto
&
kvp
:
doormen
)
DEBUG
(
"rm_proxy_msg"
);
CPPA_REQUIRE
(
pptr
.
get
()
!=
nullptr
);
get_actor_proxy_cache
().
erase
(
pptr
);
},
on
(
atom
(
"PUBLISH"
),
arg_match
)
>>
[
&
](
native_socket_type
sockfd
,
actor_ptr
whom
)
{
// iterate over all doormen and remove doormen on error
kvp
.
second
.
remove_if
([
&
](
po_doorman
&
doorman
)
->
bool
DEBUG
(
"unpublish_actor_event"
);
CPPA_REQUIRE
(
whom
.
get
()
!=
nullptr
);
auto
aid
=
whom
->
id
();
auto
callback
=
[
aid
](
std
::
uint32_t
)
{
return
FD_ISSET
(
doorman
.
get_socket
(),
&
readset
)
&&
doorman
.
read_and_continue
()
==
false
;
});
}
// read events from pipe
if
(
FD_ISSET
(
pipe_read_handle
,
&
readset
))
{
pipe_msg
pmsg
;
//memcpy(pmsg, pipe_msg_buf.data(), pipe_msg_buf.size());
//pipe_msg_buf.clear();
if
(
::
read
(
pipe_read_handle
,
&
pmsg
,
pipe_msg_size
)
!=
(
int
)
pipe_msg_size
)
send2po
(
atom
(
"UNPUBLISH"
),
aid
);
};
if
(
whom
->
attach_functor
(
std
::
move
(
callback
)))
{
cerr
<<
"FATAL: cannot read from pipe"
<<
endl
;
abort
();
auto
&
ls
=
doormen
[
aid
];
ls
.
emplace_back
(
sockfd
,
whom
);
ls
.
back
().
start
();
DEBUG
(
"new doorman"
);
}
switch
(
pmsg
[
0
])
else
{
case
rd_queue_event
:
{
DEBUG
(
"rd_queue_event"
);
std
::
unique_ptr
<
post_office_msg
>
pom
{
msg_queue
.
try_pop
()};
CPPA_REQUIRE
(
pom
.
get
()
!=
nullptr
);
if
(
pom
->
is_add_peer_msg
())
{
DEBUG
(
"add_peer_msg"
);
auto
&
apm
=
pom
->
as_add_peer_msg
();
actor_proxy_ptr
pptr
=
apm
.
first_peer_actor
;
peers
.
push_back
(
po_peer
(
apm
));
selected_peer
=
&
(
peers
.
back
());
if
(
pptr
)
{
DEBUG
(
"proxy added via post_office_msg"
);
get_actor_proxy_cache
().
add
(
pptr
);
}
selected_peer
=
nullptr
;
}
else
if
(
pom
->
is_add_server_socket_msg
())
{
DEBUG
(
"add_server_socket_msg"
);
auto
&
assm
=
pom
->
as_add_server_socket_msg
();
auto
&
pactor
=
assm
.
published_actor
;
if
(
pactor
)
{
auto
aid
=
pactor
->
id
();
auto
callback
=
[
aid
](
std
::
uint32_t
)
{
DEBUG
(
"call post_office_unpublish() ..."
);
post_office_unpublish
(
aid
);
};
if
(
pactor
->
attach_functor
(
std
::
move
(
callback
)))
{
auto
&
dm
=
doormen
[
aid
];
dm
.
push_back
(
po_doorman
(
assm
,
&
peers
));
DEBUG
(
"new doorman"
);
}
}
else
{
DEBUG
(
"nullptr published"
);
}
}
else
if
(
pom
->
is_proxy_exited_msg
())
{
DEBUG
(
"proxy_exited_msg"
);
auto
pptr
=
std
::
move
(
pom
->
as_proxy_exited_msg
().
proxy_ptr
);
if
(
pptr
)
{
// get parent of pptr
auto
i
=
peers
.
begin
();
auto
end
=
peers
.
end
();
DEBUG
(
"search parent of exited proxy"
);
while
(
i
!=
end
)
{
if
(
i
->
remove_child
(
pptr
))
{
DEBUG
(
"found parent of proxy"
);
if
(
i
->
children_count
()
==
0
)
{
// disconnect peer if we don't know any
// actor of it and if the published
// actor already exited
// (this is the case if the peer doesn't
// have a parent)
if
(
i
->
has_parent
()
==
false
)
{
DEBUG
(
"removed peer"
);
peers
.
erase
(
i
);
}
}
i
=
end
;
// done
}
else
{
DEBUG
(
"... next iteration"
);
++
i
;
// next iteration
}
}
}
else
DEBUG
(
"pptr == nullptr"
);
}
break
;
}
case
unpublish_actor_event
:
{
DEBUG
(
"unpublish_actor_event"
);
auto
kvp
=
doormen
.
find
(
pmsg
[
1
]);
if
(
kvp
!=
doormen
.
end
())
{
DEBUG
(
"erase doorman from map"
);
for
(
po_doorman
&
dm
:
kvp
->
second
)
{
// remove peers with no children and no parent
// (that are peers that connected to an already
// exited actor and where we don't know any
// actor from)
peers
.
remove_if
([
&
](
po_peer
&
ppeer
)
{
return
ppeer
.
parent_exited
(
dm
)
&&
ppeer
.
children_count
()
==
0
;
});
}
doormen
.
erase
(
kvp
);
}
break
;
}
case
close_socket_event
:
{
DEBUG
(
"close_socket_event"
);
auto
sockfd
=
static_cast
<
native_socket_type
>
(
pmsg
[
1
]);
auto
end
=
peers
.
end
();
auto
i
=
std
::
find_if
(
peers
.
begin
(),
end
,
[
sockfd
](
po_peer
&
peer
)
->
bool
{
return
peer
.
get_socket
()
==
sockfd
;
});
if
(
i
!=
end
)
peers
.
erase
(
i
);
break
;
}
case
shutdown_event
:
{
// goodbye
return
;
}
default:
{
std
::
ostringstream
oss
;
oss
<<
"unexpected event type: "
<<
pmsg
[
0
];
throw
std
::
logic_error
(
oss
.
str
());
}
closesocket
(
sockfd
);
}
}
// recalculate readset
FD_ZERO
(
&
readset
);
FD_SET
(
pipe_read_handle
,
&
readset
);
maxfd
=
pipe_read_handle
;
for
(
po_peer
&
pd
:
peers
)
},
on
(
atom
(
"UNPUBLISH"
),
arg_match
)
>>
[
&
](
actor_id
whom
)
{
auto
fd
=
pd
.
get_socket
();
maxfd
=
std
::
max
(
maxfd
,
fd
);
FD_SET
(
fd
,
&
readset
);
}
// iterate over key-value (actor id / doormen) pairs
for
(
auto
&
kvp
:
doormen
)
DEBUG
(
"unpublish_actor_event"
);
doormen
.
erase
(
whom
);
},
on
(
atom
(
"DONE"
))
>>
[
&
]()
{
// iterate over values (doormen)
for
(
auto
&
dm
:
kvp
.
second
)
{
auto
fd
=
dm
.
get_socket
();
maxfd
=
std
::
max
(
maxfd
,
fd
)
;
FD_SET
(
fd
,
&
readset
);
}
done
=
true
;
},
others
()
>>
[]()
{
std
::
string
str
=
"unexpected message in post_office: "
;
str
+=
to_string
(
self
->
last_dequeued
()
);
CPPA_CRITICAL
(
str
.
c_str
());
}
}
)
.
until
(
gref
(
done
));
}
/******************************************************************************
...
...
@@ -770,45 +461,25 @@ void post_office_loop(int pipe_read_handle, int pipe_write_handle)
******************************************************************************/
void
post_office_add_peer
(
native_socket_type
a0
,
const
process_information_ptr
&
a1
,
const
actor_proxy_ptr
&
a2
,
std
::
unique_ptr
<
attachable
>&&
a3
)
const
process_information_ptr
&
a1
)
{
auto
nm
=
singleton_manager
::
get_network_manager
();
nm
->
post_office_queue
().
_push_back
(
new
post_office_msg
(
a0
,
a1
,
a2
,
std
::
move
(
a3
)));
CPPA_MEMORY_BARRIER
();
pipe_msg
msg
=
{
rd_queue_event
,
0
};
nm
->
write_to_pipe
(
msg
);
send2po
(
atom
(
"ADD_PEER"
),
a0
,
a1
);
}
void
post_office_publish
(
native_socket_type
server_socket
,
const
actor_ptr
&
published_actor
)
actor_ptr
const
&
published_actor
)
{
DEBUG
(
"post_office_publish("
<<
published_actor
->
id
()
<<
")"
);
auto
nm
=
singleton_manager
::
get_network_manager
();
nm
->
post_office_queue
().
_push_back
(
new
post_office_msg
(
server_socket
,
published_actor
));
CPPA_MEMORY_BARRIER
();
pipe_msg
msg
=
{
rd_queue_event
,
0
};
nm
->
write_to_pipe
(
msg
);
send2po
(
atom
(
"PUBLISH"
),
server_socket
,
published_actor
);
}
void
post_office_unpublish
(
actor_id
whom
)
{
DEBUG
(
"post_office_unpublish("
<<
whom
<<
")"
);
auto
nm
=
singleton_manager
::
get_network_manager
();
CPPA_MEMORY_BARRIER
();
pipe_msg
msg
=
{
unpublish_actor_event
,
whom
};
nm
->
write_to_pipe
(
msg
);
send2po
(
atom
(
"UNPUBLISH"
),
whom
);
}
void
post_office_close_socket
(
native_socket_type
sfd
)
{
auto
nm
=
singleton_manager
::
get_network_manager
();
CPPA_MEMORY_BARRIER
();
pipe_msg
msg
=
{
close_socket_event
,
static_cast
<
std
::
uint32_t
>
(
sfd
)
};
nm
->
write_to_pipe
(
msg
);
send2po
(
atom
(
"RM_PEER"
),
sfd
);
}
}
}
// namespace cppa::detail
src/singleton_manager.cpp
View file @
b61316b9
...
...
@@ -181,10 +181,7 @@ network_manager* singleton_manager::get_network_manager()
{
scheduler
*
s
=
new
thread_pool_scheduler
;
// set_scheduler sets s_network_manager
if
(
set_scheduler
(
s
)
==
false
)
{
//delete s;
}
set_scheduler
(
s
);
return
get_network_manager
();
}
return
result
;
...
...
src/thread_pool_scheduler.cpp
View file @
b61316b9
...
...
@@ -263,21 +263,21 @@ actor_ptr thread_pool_scheduler::spawn(scheduled_actor* what)
actor_ptr
thread_pool_scheduler
::
spawn
(
std
::
function
<
void
()
>
what
,
scheduling_hint
hint
)
{
if
(
hint
==
detach
ed
)
if
(
hint
==
schedul
ed
)
{
return
mock_scheduler
::
spawn
(
std
::
move
(
what
));
auto
new_actor
=
new
yielding_actor
(
std
::
move
(
what
));
return
spawn_impl
(
new_actor
->
attach_to_scheduler
(
this
));
}
else
{
auto
new_actor
=
new
yielding_actor
(
std
::
move
(
what
));
return
spawn_impl
(
new_actor
->
attach_to_scheduler
(
this
));
return
mock_scheduler
::
spawn_impl
(
std
::
move
(
what
));
}
}
#else
actor_ptr
thread_pool_scheduler
::
spawn
(
std
::
function
<
void
()
>
what
,
scheduling_hint
)
scheduling_hint
hint
)
{
return
mock_scheduler
::
spawn
(
what
);
return
mock_scheduler
::
spawn
(
what
,
hint
);
}
#endif
...
...
src/unicast_network.cpp
View file @
b61316b9
...
...
@@ -53,6 +53,7 @@
#include "cppa/detail/post_office.hpp"
#include "cppa/detail/native_socket.hpp"
#include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/network_manager.hpp"
#include "cppa/detail/actor_proxy_cache.hpp"
#include "cppa/detail/singleton_manager.hpp"
...
...
@@ -132,10 +133,6 @@ void publish(actor_ptr& whom, std::uint16_t port)
{
throw
network_error
(
"unable to get socket flags"
);
}
if
(
fcntl
(
sockfd
,
F_SETFL
,
flags
|
O_NONBLOCK
)
==
-
1
)
{
throw
network_error
(
"unable to set socket to nonblocking"
);
}
if
(
bind
(
sockfd
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
<
0
)
{
throw
bind_failure
(
errno
);
...
...
@@ -191,10 +188,11 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
read_from_socket
(
sockfd
,
peer_node_id
.
data
(),
peer_node_id
.
size
());
auto
peer_pinf
=
new
process_information
(
peer_pid
,
peer_node_id
);
process_information_ptr
pinfptr
(
peer_pinf
);
actor_proxy_ptr
result
(
new
actor_proxy
(
remote_actor_id
,
pinfptr
));
detail
::
mailman_queue
().
push_back
(
new
detail
::
mailman_job
(
sockfd
,
pinfptr
));
detail
::
post_office_add_peer
(
sockfd
,
pinfptr
,
result
,
std
::
unique_ptr
<
attachable
>
());
auto
key
=
std
::
make_tuple
(
remote_actor_id
,
pinfptr
->
process_id
(),
pinfptr
->
node_id
());
auto
result
=
detail
::
get_actor_proxy_cache
().
get
(
key
);
detail
::
singleton_manager
::
get_network_manager
()
->
send_to_mailman
(
make_any_tuple
(
sockfd
,
pinfptr
));
detail
::
post_office_add_peer
(
sockfd
,
pinfptr
);
//auto ptr = get_scheduler()->register_hidden_context();
return
result
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment