Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
Actor Framework
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
cpp-libs
Actor Framework
Commits
3d7e4d06
Commit
3d7e4d06
authored
Apr 18, 2012
by
neverlord
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
policy based message handling
parent
08de1c38
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
419 additions
and
371 deletions
+419
-371
cppa.files
cppa.files
+1
-0
cppa/abstract_actor.hpp
cppa/abstract_actor.hpp
+3
-6
cppa/abstract_event_based_actor.hpp
cppa/abstract_event_based_actor.hpp
+13
-42
cppa/detail/abstract_scheduled_actor.hpp
cppa/detail/abstract_scheduled_actor.hpp
+2
-3
cppa/detail/converted_thread_context.hpp
cppa/detail/converted_thread_context.hpp
+35
-13
cppa/detail/nestable_invoke_policy.hpp
cppa/detail/nestable_invoke_policy.hpp
+170
-0
cppa/detail/scheduled_actor_dummy.hpp
cppa/detail/scheduled_actor_dummy.hpp
+1
-1
cppa/detail/yielding_actor.hpp
cppa/detail/yielding_actor.hpp
+51
-24
cppa/intrusive/single_reader_queue.hpp
cppa/intrusive/single_reader_queue.hpp
+27
-83
src/abstract_event_based_actor.cpp
src/abstract_event_based_actor.cpp
+60
-47
src/converted_thread_context.cpp
src/converted_thread_context.cpp
+20
-74
src/mailman.cpp
src/mailman.cpp
+1
-1
src/scheduler.cpp
src/scheduler.cpp
+3
-4
src/yielding_actor.cpp
src/yielding_actor.cpp
+32
-73
No files found.
cppa.files
View file @
3d7e4d06
...
...
@@ -267,3 +267,4 @@ cppa/match_expr.hpp
cppa/detail/pseudo_tuple.hpp
cppa/detail/recursive_queue_node.hpp
cppa/detail/scheduled_actor_dummy.hpp
cppa/detail/nestable_invoke_policy.hpp
cppa/abstract_actor.hpp
View file @
3d7e4d06
...
...
@@ -56,8 +56,7 @@ namespace cppa {
* @tparam Base Either {@link cppa::actor actor}
* or {@link cppa::local_actor local_actor}.
*/
template
<
class
Base
,
class
MailboxType
=
intrusive
::
single_reader_queue
<
detail
::
recursive_queue_node
>
>
template
<
class
Base
>
class
abstract_actor
:
public
Base
{
...
...
@@ -66,10 +65,8 @@ class abstract_actor : public Base
public:
typedef
MailboxType
mailbox_type
;
typedef
typename
mailbox_type
::
value_type
mailbox_element
;
typedef
typename
mailbox_type
::
cache_type
mailbox_cache_type
;
typedef
typename
mailbox_cache_type
::
value_type
mailbox_cache_element
;
typedef
detail
::
recursive_queue_node
mailbox_element
;
typedef
intrusive
::
single_reader_queue
<
mailbox_element
>
mailbox_type
;
bool
attach
(
attachable
*
ptr
)
// override
{
...
...
cppa/abstract_event_based_actor.hpp
View file @
3d7e4d06
...
...
@@ -44,49 +44,13 @@
namespace
cppa
{
struct
vec_append
{
inline
std
::
vector
<
detail
::
recursive_queue_node
>::
iterator
operator
()(
std
::
vector
<
detail
::
recursive_queue_node
>&
result
,
detail
::
recursive_queue_node
*
e
)
const
{
std
::
vector
<
std
::
unique_ptr
<
detail
::
recursive_queue_node
>
>
tmp
;
while
(
e
)
{
auto
next
=
e
->
next
;
tmp
.
emplace_back
(
e
);
e
=
next
;
}
auto
old_size
=
result
.
size
();
for
(
auto
i
=
tmp
.
rbegin
();
i
!=
tmp
.
rend
();
++
i
)
{
result
.
emplace_back
(
std
::
move
(
*
(
*
i
)));
}
return
result
.
begin
()
+
old_size
;
}
};
/**
* @brief Base class for all event-based actor implementations.
*/
class
abstract_event_based_actor
:
public
detail
::
abstract_scheduled_actor
<
intrusive
::
single_reader_queue
<
detail
::
recursive_queue_node
,
std
::
vector
<
detail
::
recursive_queue_node
>
,
vec_append
>
>
class
abstract_event_based_actor
:
public
detail
::
abstract_scheduled_actor
{
typedef
detail
::
abstract_scheduled_actor
<
intrusive
::
single_reader_queue
<
detail
::
recursive_queue_node
,
std
::
vector
<
detail
::
recursive_queue_node
>
,
vec_append
>
>
super
;
typedef
detail
::
abstract_scheduled_actor
super
;
public:
...
...
@@ -108,6 +72,17 @@ class abstract_event_based_actor
protected:
std
::
vector
<
std
::
unique_ptr
<
detail
::
recursive_queue_node
>
>
m_cache
;
enum
handle_message_result
{
drop_msg
,
msg_handled
,
cache_msg
};
auto
handle_message
(
mailbox_element
&
iter
)
->
handle_message_result
;
abstract_event_based_actor
();
// ownership flag + pointer
...
...
@@ -157,10 +132,6 @@ class abstract_event_based_actor
receive
(
std
::
forward
<
Args
>
(
args
)...);
}
private:
bool
handle_message
(
mailbox_element
&
iter
);
};
}
// namespace cppa
...
...
cppa/detail/abstract_scheduled_actor.hpp
View file @
3d7e4d06
...
...
@@ -47,11 +47,10 @@
namespace
cppa
{
namespace
detail
{
// A spawned, scheduled Actor.
template
<
class
MailboxType
=
intrusive
::
single_reader_queue
<
detail
::
recursive_queue_node
>
>
class
abstract_scheduled_actor
:
public
abstract_actor
<
scheduled_actor
,
MailboxType
>
class
abstract_scheduled_actor
:
public
abstract_actor
<
scheduled_actor
>
{
typedef
abstract_actor
<
scheduled_actor
,
MailboxType
>
super
;
typedef
abstract_actor
<
scheduled_actor
>
super
;
protected:
...
...
cppa/detail/converted_thread_context.hpp
View file @
3d7e4d06
...
...
@@ -49,6 +49,7 @@
#include "cppa/exit_reason.hpp"
#include "cppa/abstract_actor.hpp"
#include "cppa/intrusive/singly_linked_list.hpp"
#include "cppa/detail/nestable_invoke_policy.hpp"
namespace
cppa
{
namespace
detail
{
...
...
@@ -60,6 +61,36 @@ class converted_thread_context : public abstract_actor<local_actor>
typedef
abstract_actor
<
local_actor
>
super
;
struct
filter_policy
;
friend
struct
filter_policy
;
struct
filter_policy
{
converted_thread_context
*
m_parent
;
inline
filter_policy
(
converted_thread_context
*
ptr
)
:
m_parent
(
ptr
)
{
}
inline
bool
operator
()(
any_tuple
const
&
msg
)
{
if
(
m_parent
->
m_trap_exit
==
false
&&
matches
(
msg
,
m_parent
->
m_exit_msg_pattern
))
{
auto
reason
=
msg
.
get_as
<
std
::
uint32_t
>
(
1
);
if
(
reason
!=
exit_reason
::
normal
)
{
m_parent
->
quit
(
reason
);
}
return
true
;
}
return
false
;
}
};
public:
converted_thread_context
();
...
...
@@ -75,7 +106,7 @@ class converted_thread_context : public abstract_actor<local_actor>
void
dequeue
(
behavior
&
rules
);
//override
void
dequeue
(
partial_function
&
rules
)
;
//override
void
dequeue
(
partial_function
&
rules
);
//override
inline
decltype
(
m_mailbox
)
&
mailbox
()
{
...
...
@@ -84,20 +115,11 @@ class converted_thread_context : public abstract_actor<local_actor>
private:
//typedef intrusive::singly_linked_list<queue_node> queue_node_buffer;
enum
throw_on_exit_result
{
not_an_exit_signal
,
normal_exit_signal
};
// returns true if node->msg was accepted by rules
bool
dq
(
mailbox_element
&
node
,
partial_function
&
rules
);
throw_on_exit_result
throw_on_exit
(
any_tuple
const
&
msg
);
// a list is safe to use in a nested receive
typedef
std
::
unique_ptr
<
recursive_queue_node
>
queue_node_ptr
;
pattern
<
atom_value
,
std
::
uint32_t
>
m_exit_msg_pattern
;
nestable_invoke_policy
<
filter_policy
>
m_invoke
;
};
...
...
cppa/detail/nestable_invoke_policy.hpp
0 → 100644
View file @
3d7e4d06
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011, 2012 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation, either version 3 of the License *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#ifndef NESTABLE_INVOKE_POLICY_HPP
#define NESTABLE_INVOKE_POLICY_HPP
#include <list>
#include <memory>
#include "cppa/behavior.hpp"
#include "cppa/local_actor.hpp"
#include "cppa/partial_function.hpp"
#include "cppa/detail/recursive_queue_node.hpp"
namespace
cppa
{
namespace
detail
{
template
<
class
FilterPolicy
>
class
nestable_invoke_policy
{
public:
typedef
std
::
unique_ptr
<
recursive_queue_node
>
queue_node_ptr
;
template
<
typename
...
Args
>
nestable_invoke_policy
(
local_actor
*
parent
,
Args
&&
...
args
)
:
m_last_dequeued
(
parent
->
last_dequeued
())
,
m_last_sender
(
parent
->
last_sender
())
,
m_filter_policy
(
std
::
forward
<
Args
>
(
args
)...)
{
}
template
<
typename
...
Args
>
bool
invoke_from_cache
(
partial_function
&
fun
,
Args
...
args
)
{
auto
i
=
m_cache
.
begin
();
auto
e
=
m_cache
.
end
();
while
(
i
!=
e
)
{
switch
(
handle_message
(
*
(
*
i
),
fun
,
args
...))
{
case
hm_drop_msg
:
{
i
=
m_cache
.
erase
(
i
);
break
;
}
case
hm_success
:
{
m_cache
.
erase
(
i
);
return
true
;
}
case
hm_skip_msg
:
case
hm_cache_msg
:
{
++
i
;
break
;
}
default:
exit
(
7
);
// illegal state
}
}
return
false
;
}
template
<
typename
...
Args
>
bool
invoke
(
queue_node_ptr
&
ptr
,
partial_function
&
fun
,
Args
...
args
)
{
switch
(
handle_message
(
*
ptr
,
fun
,
args
...))
{
case
hm_drop_msg
:
{
break
;
}
case
hm_success
:
{
return
true
;
// done
}
case
hm_cache_msg
:
{
m_cache
.
push_back
(
std
::
move
(
ptr
));
break
;
}
case
hm_skip_msg
:
default:
{
exit
(
7
);
// illegal state
}
}
return
false
;
}
private:
enum
handle_message_result
{
hm_timeout_msg
,
hm_skip_msg
,
hm_drop_msg
,
hm_cache_msg
,
hm_success
};
any_tuple
&
m_last_dequeued
;
actor_ptr
&
m_last_sender
;
FilterPolicy
m_filter_policy
;
std
::
list
<
queue_node_ptr
>
m_cache
;
template
<
typename
...
Args
>
handle_message_result
handle_message
(
recursive_queue_node
&
node
,
partial_function
&
fun
,
Args
...
args
)
{
if
(
node
.
marked
)
{
return
hm_skip_msg
;
}
if
(
m_filter_policy
(
node
.
msg
,
args
...))
{
return
hm_drop_msg
;
}
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
{
typename
recursive_queue_node
::
guard
qguard
{
&
node
};
if
(
fun
(
m_last_dequeued
))
{
// client calls erase(iter)
qguard
.
release
();
m_last_dequeued
.
reset
();
m_last_sender
.
reset
();
return
hm_success
;
}
}
// no match (restore members)
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
return
hm_cache_msg
;
}
};
}
}
#endif // NESTABLE_INVOKE_POLICY_HPP
cppa/detail/scheduled_actor_dummy.hpp
View file @
3d7e4d06
...
...
@@ -35,7 +35,7 @@
namespace
cppa
{
namespace
detail
{
struct
scheduled_actor_dummy
:
abstract_scheduled_actor
<>
struct
scheduled_actor_dummy
:
abstract_scheduled_actor
{
void
resume
(
util
::
fiber
*
,
scheduler
::
callback
*
);
void
quit
(
std
::
uint32_t
);
...
...
cppa/detail/yielding_actor.hpp
View file @
3d7e4d06
...
...
@@ -41,11 +41,12 @@
#include "cppa/pattern.hpp"
#include "cppa/detail/yield_interface.hpp"
#include "cppa/detail/nestable_invoke_policy.hpp"
#include "cppa/detail/abstract_scheduled_actor.hpp"
namespace
cppa
{
namespace
detail
{
class
yielding_actor
:
public
abstract_scheduled_actor
<>
class
yielding_actor
:
public
abstract_scheduled_actor
{
typedef
abstract_scheduled_actor
super
;
...
...
@@ -55,10 +56,55 @@ class yielding_actor : public abstract_scheduled_actor<>
static
void
run
(
void
*
_this
);
void
exec_loop_stack
();
void
yield_until_not_empty
();
struct
filter_policy
;
friend
struct
filter_policy
;
struct
filter_policy
{
yielding_actor
*
m_parent
;
inline
filter_policy
(
yielding_actor
*
parent
)
:
m_parent
(
parent
)
{
}
inline
bool
operator
()(
any_tuple
const
&
msg
)
{
return
m_parent
->
filter_msg
(
msg
)
!=
ordinary_message
;
}
inline
bool
operator
()(
any_tuple
const
&
msg
,
behavior
*
bhvr
,
bool
*
timeout_occured
)
{
switch
(
m_parent
->
filter_msg
(
msg
))
{
case
normal_exit_signal
:
{
return
m_parent
->
m_trap_exit
==
false
;
}
case
timeout_message
:
{
bhvr
->
handle_timeout
();
*
timeout_occured
=
true
;
return
true
;
}
case
expired_timeout_message
:
{
return
true
;
}
case
ordinary_message
:
{
return
false
;
}
default:
exit
(
7
);
// illegal state
}
return
false
;
}
};
public:
yielding_actor
(
std
::
function
<
void
()
>
fun
);
...
...
@@ -71,28 +117,9 @@ class yielding_actor : public abstract_scheduled_actor<>
private:
template
<
typename
Fun
>
void
dequeue_impl
(
Fun
rm_fun
)
{
auto
&
mbox_cache
=
m_mailbox
.
cache
();
auto
mbox_end
=
mbox_cache
.
end
();
auto
iter
=
std
::
find_if
(
mbox_cache
.
begin
(),
mbox_end
,
rm_fun
);
while
(
iter
==
mbox_end
)
{
yield_until_not_empty
();
iter
=
std
::
find_if
(
m_mailbox
.
try_fetch_more
(),
mbox_end
,
rm_fun
);
}
mbox_cache
.
erase
(
iter
);
}
enum
dq_result
{
dq_done
,
dq_indeterminate
,
dq_timeout_occured
};
typedef
std
::
unique_ptr
<
recursive_queue_node
>
queue_node_ptr
;
auto
dq
(
mailbox_element
&
node
,
partial_function
&
rules
)
->
dq_result
;
nestable_invoke_policy
<
filter_policy
>
m_invoke
;
};
...
...
cppa/intrusive/single_reader_queue.hpp
View file @
3d7e4d06
...
...
@@ -40,41 +40,12 @@
namespace
cppa
{
namespace
intrusive
{
template
<
typename
List
>
struct
default_list_append
{
template
<
typename
T
>
typename
List
::
iterator
operator
()(
List
&
l
,
T
*
e
)
{
CPPA_REQUIRE
(
e
!=
nullptr
);
// temporary list to convert LIFO to FIFO order
List
tmp
;
// public_tail (e) has LIFO order,
// but private_head requires FIFO order
while
(
e
)
{
// next iteration element
T
*
next
=
e
->
next
;
// insert e to private cache (convert to LIFO order)
tmp
.
emplace_front
(
e
);
e
=
next
;
}
CPPA_REQUIRE
(
tmp
.
empty
()
==
false
);
auto
result
=
tmp
.
begin
();
l
.
splice
(
l
.
end
(),
tmp
);
return
result
;
}
};
/**
* @brief An intrusive, thread safe queue implementation.
* @note For implementation details see
* http://libcppa.blogspot.com/2011/04/mailbox-part-1.html
*/
template
<
typename
T
,
class
CacheType
=
std
::
list
<
std
::
unique_ptr
<
T
>
>
,
class
CacheAppend
=
default_list_append
<
std
::
list
<
std
::
unique_ptr
<
T
>
>
>
>
template
<
typename
T
>
class
single_reader_queue
{
...
...
@@ -85,36 +56,30 @@ class single_reader_queue
typedef
T
value_type
;
typedef
value_type
*
pointer
;
typedef
CacheType
cache_type
;
typedef
typename
cache_type
::
value_type
cache_value_type
;
typedef
typename
cache_type
::
iterator
cache_iterator
;
/**
* @warning call only from the reader (owner)
*/
cache_value_type
pop
()
pointer
pop
()
{
wait_for_data
();
cache_value_type
result
;
take_head
(
result
);
return
result
;
return
take_head
();
}
/**
* @warning call only from the reader (owner)
*/
bool
try_pop
(
cache_value_type
&
result
)
pointer
try_pop
(
)
{
return
take_head
(
result
);
return
take_head
();
}
/**
* @warning call only from the reader (owner)
*/
template
<
typename
TimePoint
>
bool
try_pop
(
cache_value_type
&
result
,
TimePoint
const
&
abs_time
)
pointer
try_pop
(
TimePoint
const
&
abs_time
)
{
return
(
timed_wait_for_data
(
abs_time
))
?
take_head
(
result
)
:
false
;
return
(
timed_wait_for_data
(
abs_time
))
?
take_head
(
)
:
nullptr
;
}
// returns true if the queue was empty
...
...
@@ -156,8 +121,6 @@ class single_reader_queue
}
}
inline
cache_type
&
cache
()
{
return
m_cache
;
}
inline
bool
can_fetch_more
()
const
{
return
m_stack
.
load
()
!=
nullptr
;
...
...
@@ -168,7 +131,7 @@ class single_reader_queue
*/
inline
bool
empty
()
const
{
return
m_cache
.
empty
()
&&
m_stack
.
load
()
==
nullptr
;
return
!
m_head
&&
m_stack
.
load
()
==
nullptr
;
}
/**
...
...
@@ -179,7 +142,7 @@ class single_reader_queue
return
!
empty
();
}
single_reader_queue
()
:
m_stack
(
nullptr
)
single_reader_queue
()
:
m_stack
(
nullptr
)
,
m_head
(
nullptr
)
{
}
...
...
@@ -189,37 +152,13 @@ class single_reader_queue
(
void
)
fetch_new_data
();
}
cache_iterator
try_fetch_more
()
{
cache_iterator
result
=
m_cache
.
end
();
fetch_new_data
(
&
result
);
return
result
;
}
template
<
typename
TimePoint
>
cache_iterator
try_fetch_more
(
TimePoint
const
&
abs_time
)
{
cache_iterator
result
=
m_cache
.
end
();
if
(
timed_wait_for_data
(
abs_time
))
fetch_new_data
(
&
result
);
return
result
;
}
cache_iterator
fetch_more
()
{
cache_iterator
result
=
m_cache
.
end
();
wait_for_data
();
fetch_new_data
(
&
result
);
return
result
;
}
private:
// exposed to "outside" access
std
::
atomic
<
pointer
>
m_stack
;
// accessed only by the owner
cache_type
m_cache
;
CacheAppend
m_append
;
pointer
m_head
;
// locked on enqueue/dequeue operations to/from an empty list
detail
::
mutex
m_mtx
;
...
...
@@ -228,7 +167,7 @@ class single_reader_queue
template
<
typename
TimePoint
>
bool
timed_wait_for_data
(
TimePoint
const
&
timeout
)
{
if
(
m_cache
.
empty
()
&&
!
(
m_stack
.
load
()
))
if
(
empty
(
))
{
lock_type
guard
(
m_mtx
);
while
(
!
(
m_stack
.
load
()))
...
...
@@ -244,7 +183,7 @@ class single_reader_queue
void
wait_for_data
()
{
if
(
m_cache
.
empty
()
&&
!
(
m_stack
.
load
()
))
if
(
empty
(
))
{
lock_type
guard
(
m_mtx
);
while
(
!
(
m_stack
.
load
()))
m_cv
.
wait
(
guard
);
...
...
@@ -252,16 +191,21 @@ class single_reader_queue
}
// atomically sets m_stack to nullptr and enqueues all elements to the cache
bool
fetch_new_data
(
cache_iterator
*
iter
=
nullptr
)
bool
fetch_new_data
()
{
CPPA_REQUIRE
(
m_head
==
nullptr
);
pointer
e
=
m_stack
.
load
();
while
(
e
)
{
if
(
m_stack
.
compare_exchange_weak
(
e
,
0
))
{
auto
i
=
m_append
(
m_cache
,
e
);
if
(
iter
)
*
iter
=
i
;
return
true
;
while
(
e
)
{
auto
next
=
e
->
next
;
e
->
next
=
m_head
;
m_head
=
e
;
e
=
next
;
}
}
// next iteration
}
...
...
@@ -269,15 +213,15 @@ class single_reader_queue
return
false
;
}
bool
take_head
(
cache_value_type
&
result
)
pointer
take_head
(
)
{
if
(
!
m_cache
.
empty
()
||
fetch_new_data
())
if
(
m_head
!=
nullptr
||
fetch_new_data
())
{
result
=
std
::
move
(
m_cache
.
front
())
;
m_
cache
.
pop_front
()
;
return
true
;
auto
result
=
m_head
;
m_
head
=
m_head
->
next
;
return
result
;
}
return
false
;
return
nullptr
;
}
};
...
...
src/abstract_event_based_actor.cpp
View file @
3d7e4d06
...
...
@@ -53,17 +53,15 @@ void abstract_event_based_actor::dequeue(partial_function&)
quit
(
exit_reason
::
unallowed_function_call
);
}
bool
abstract_event_based_actor
::
handle_message
(
mailbox_element
&
node
)
auto
abstract_event_based_actor
::
handle_message
(
mailbox_element
&
node
)
->
handle_message_result
{
CPPA_REQUIRE
(
m_loop_stack
.
empty
()
==
false
);
if
(
node
.
marked
)
return
false
;
auto
&
bhvr
=
*
(
m_loop_stack
.
back
());
switch
(
filter_msg
(
node
.
msg
))
{
case
normal_exit_signal
:
case
expired_timeout_message
:
node
.
marked
=
true
;
return
false
;
return
drop_msg
;
case
timeout_message
:
m_has_pending_timeout_request
=
false
;
...
...
@@ -74,7 +72,7 @@ bool abstract_event_based_actor::handle_message(mailbox_element& node)
auto
&
next_bhvr
=
*
(
m_loop_stack
.
back
());
request_timeout
(
next_bhvr
.
timeout
());
}
return
true
;
return
msg_handled
;
default:
break
;
...
...
@@ -87,76 +85,91 @@ bool abstract_event_based_actor::handle_message(mailbox_element& node)
++
m_active_timeout_id
;
if
((
bhvr
.
get_partial_function
())(
m_last_dequeued
))
{
node
.
marked
=
true
;
m_last_dequeued
.
reset
();
m_last_sender
.
reset
();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request
=
false
;
return
true
;
return
msg_handled
;
}
// no match, restore members
--
m_active_timeout_id
;
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
return
false
;
return
cache_msg
;
}
void
abstract_event_based_actor
::
resume
(
util
::
fiber
*
,
scheduler
::
callback
*
cb
)
{
self
.
set
(
this
);
auto
&
mbox_cache
=
m_mailbox
.
cache
();
auto
pos
=
mbox_cache
.
end
();
try
{
for
(;;)
{
if
(
m_loop_stack
.
empty
())
std
::
unique_ptr
<
detail
::
recursive_queue_node
>
e
{
m_mailbox
.
try_pop
()};
if
(
!
e
)
{
cleanup
(
exit_reason
::
normal
);
m_state
.
store
(
abstract_scheduled_actor
::
done
);
m_loop_stack
.
clear
();
on_exit
();
cb
->
exec_done
();
return
;
m_state
.
store
(
abstract_scheduled_actor
::
about_to_block
);
CPPA_MEMORY_BARRIER
();
if
(
m_mailbox
.
can_fetch_more
()
==
false
)
{
switch
(
compare_exchange_state
(
abstract_scheduled_actor
::
about_to_block
,
abstract_scheduled_actor
::
blocked
))
{
case
abstract_scheduled_actor
:
:
ready
:
{
break
;
}
case
abstract_scheduled_actor
:
:
blocked
:
{
return
;
}
default:
exit
(
7
);
// illegal state
};
}
}
while
(
pos
==
mbox_cache
.
end
())
else
{
// try fetch more
if
(
m_mailbox
.
can_fetch_more
()
==
false
)
switch
(
handle_message
(
*
e
))
{
// sweep marked elements
auto
new_end
=
std
::
remove_if
(
mbox_cache
.
begin
(),
mbox_cache
.
end
(),
[](
detail
::
recursive_queue_node
const
&
n
)
{
return
n
.
marked
;
});
mbox_cache
.
resize
(
std
::
distance
(
mbox_cache
.
begin
(),
new_end
));
m_state
.
store
(
abstract_scheduled_actor
::
about_to_block
);
CPPA_MEMORY_BARRIER
();
if
(
m_mailbox
.
can_fetch_more
()
==
false
)
case
drop_msg
:
{
switch
(
compare_exchange_state
(
abstract_scheduled_actor
::
about_to_block
,
abstract_scheduled_actor
::
blocked
))
break
;
// nop
}
case
msg_handled
:
{
// try to match cached messages
auto
i
=
m_cache
.
begin
();
while
(
i
!=
m_cache
.
end
()
&&
!
m_loop_stack
.
empty
())
{
case
abstract_scheduled_actor
:
:
ready
:
{
// someone preempt us, set position to new end()
pos
=
mbox_cache
.
end
();
break
;
}
case
abstract_scheduled_actor
:
:
blocked
:
switch
(
handle_message
(
*
(
*
i
)))
{
return
;
case
drop_msg
:
{
i
=
m_cache
.
erase
(
i
);
break
;
}
case
msg_handled
:
{
m_cache
.
erase
(
i
);
i
=
m_cache
.
begin
();
break
;
}
case
cache_msg
:
{
++
i
;
break
;
}
default:
exit
(
7
);
// illegal state
}
default:
exit
(
7
);
// illegal state
};
}
}
case
cache_msg
:
{
m_cache
.
push_back
(
std
::
move
(
e
));
break
;
}
default:
exit
(
7
);
// illegal state
}
pos
=
m_mailbox
.
try_fetch_more
();
}
pos
=
std
::
find_if
(
pos
,
mbox_cache
.
end
(),
[
&
](
mailbox_element
&
e
)
{
return
handle_message
(
e
);
});
if
(
pos
!=
mbox_cache
.
end
())
{
// handled a message, scan mailbox from start again
pos
=
mbox_cache
.
begin
();
}
}
}
...
...
src/converted_thread_context.cpp
View file @
3d7e4d06
...
...
@@ -29,6 +29,7 @@
#include <memory>
#include <iostream>
#include <algorithm>
#include "cppa/self.hpp"
...
...
@@ -40,7 +41,7 @@
namespace
cppa
{
namespace
detail
{
converted_thread_context
::
converted_thread_context
()
:
m_exit_msg_pattern
(
atom
(
":Exit"
))
:
m_exit_msg_pattern
(
atom
(
":Exit"
))
,
m_invoke
(
this
,
this
)
{
}
...
...
@@ -68,93 +69,38 @@ void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
m_mailbox
.
push_back
(
fetch_node
(
sender
,
msg
));
}
void
converted_thread_context
::
dequeue
(
partial_function
&
rules
)
/*override*/
void
converted_thread_context
::
dequeue
(
partial_function
&
fun
)
// override
{
auto
rm_fun
=
[
&
](
mailbox_cache_element
&
node
)
{
return
dq
(
*
node
,
rules
);
};
auto
&
mbox_cache
=
m_mailbox
.
cache
();
auto
mbox_end
=
mbox_cache
.
end
();
auto
iter
=
std
::
find_if
(
mbox_cache
.
begin
(),
mbox_end
,
rm_fun
);
while
(
iter
==
mbox_end
)
if
(
m_invoke
.
invoke_from_cache
(
fun
)
==
false
)
{
iter
=
std
::
find_if
(
m_mailbox
.
fetch_more
(),
mbox_end
,
rm_fun
);
}
mbox_cache
.
erase
(
iter
);
}
void
converted_thread_context
::
dequeue
(
behavior
&
rules
)
/*override*/
{
if
(
rules
.
timeout
().
valid
())
{
auto
timeout
=
now
();
timeout
+=
rules
.
timeout
();
auto
rm_fun
=
[
&
](
mailbox_cache_element
&
node
)
{
return
dq
(
*
node
,
rules
.
get_partial_function
());
};
auto
&
mbox_cache
=
m_mailbox
.
cache
();
auto
mbox_end
=
mbox_cache
.
end
();
auto
iter
=
std
::
find_if
(
mbox_cache
.
begin
(),
mbox_end
,
rm_fun
);
while
(
iter
==
mbox_end
)
queue_node_ptr
e
{
m_mailbox
.
pop
()};
while
(
m_invoke
.
invoke
(
e
,
fun
)
==
false
)
{
auto
next
=
m_mailbox
.
try_fetch_more
(
timeout
);
if
(
next
==
mbox_end
)
{
rules
.
handle_timeout
();
return
;
}
iter
=
std
::
find_if
(
next
,
mbox_end
,
rm_fun
);
e
.
reset
(
m_mailbox
.
pop
());
}
mbox_cache
.
erase
(
iter
);
}
else
{
converted_thread_context
::
dequeue
(
rules
.
get_partial_function
());
}
}
converted_thread_context
::
throw_on_exit_result
converted_thread_context
::
throw_on_exit
(
any_tuple
const
&
msg
)
void
converted_thread_context
::
dequeue
(
behavior
&
bhvr
)
// override
{
if
(
matches
(
msg
,
m_exit_msg_pattern
))
auto
&
fun
=
bhvr
.
get_partial_function
();
if
(
bhvr
.
timeout
().
valid
()
==
false
)
{
auto
reason
=
msg
.
get_as
<
std
::
uint32_t
>
(
1
);
if
(
reason
!=
exit_reason
::
normal
)
{
// throws
quit
(
reason
);
}
else
{
return
normal_exit_signal
;
}
dequeue
(
fun
);
return
;
}
return
not_an_exit_signal
;
}
bool
converted_thread_context
::
dq
(
mailbox_element
&
node
,
partial_function
&
rules
)
{
if
(
m_trap_exit
==
false
&&
throw_on_exit
(
node
.
msg
)
==
normal_exit_signal
)
{
return
false
;
}
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
if
(
m_invoke
.
invoke_from_cache
(
fun
)
==
false
)
{
mailbox_element
::
guard
qguard
{
&
node
};
if
(
rules
(
m_last_dequeued
))
auto
timeout
=
now
();
timeout
+=
bhvr
.
timeout
();
queue_node_ptr
e
{
m_mailbox
.
try_pop
(
timeout
)};
while
(
e
)
{
// client calls erase(iter)
qguard
.
release
();
m_last_dequeued
.
reset
();
m_last_sender
.
reset
();
return
true
;
if
(
m_invoke
.
invoke
(
e
,
fun
))
return
;
else
e
.
reset
(
m_mailbox
.
try_pop
(
timeout
));
}
bhvr
.
handle_timeout
();
}
// no match (restore members)
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
return
false
;
}
}
}
// namespace cppa::detail
src/mailman.cpp
View file @
3d7e4d06
...
...
@@ -103,7 +103,7 @@ void mailman_loop()
std
::
map
<
process_information
,
native_socket_type
>
peers
;
for
(;;)
{
job
=
mqueue
.
pop
(
);
job
.
reset
(
mqueue
.
pop
()
);
if
(
job
->
is_send_job
())
{
mailman_send_job
&
sjob
=
job
->
send_job
();
...
...
src/scheduler.cpp
View file @
3d7e4d06
...
...
@@ -100,7 +100,7 @@ struct scheduler_helper
void
scheduler_helper
::
time_emitter
(
scheduler_helper
::
ptr_type
m_self
)
{
typedef
abstract_actor
<
local_actor
>
impl_type
;
typedef
impl_type
::
mailbox_type
::
cache_value_type
queue_node_ptr
;
typedef
std
::
unique_ptr
<
detail
::
recursive_queue_node
>
queue_node_ptr
;
// setup & local variables
self
.
set
(
m_self
.
get
());
auto
&
queue
=
m_self
->
mailbox
();
...
...
@@ -142,7 +142,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{
if
(
messages
.
empty
())
{
msg_ptr
=
queue
.
pop
(
);
msg_ptr
.
reset
(
queue
.
pop
()
);
}
else
{
...
...
@@ -167,8 +167,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
// wait for next message or next timeout
if
(
it
!=
messages
.
end
())
{
msg_ptr
.
reset
();
queue
.
try_pop
(
msg_ptr
,
it
->
first
);
msg_ptr
.
reset
(
queue
.
try_pop
(
it
->
first
));
}
}
}
...
...
src/yielding_actor.cpp
View file @
3d7e4d06
...
...
@@ -42,6 +42,7 @@ namespace cppa { namespace detail {
yielding_actor
::
yielding_actor
(
std
::
function
<
void
()
>
fun
)
:
m_fiber
(
&
yielding_actor
::
run
,
this
)
,
m_behavior
(
fun
)
,
m_invoke
(
this
,
this
)
{
}
...
...
@@ -88,37 +89,46 @@ void yielding_actor::yield_until_not_empty()
void
yielding_actor
::
dequeue
(
partial_function
&
fun
)
{
auto
rm_fun
=
[
&
](
mailbox_cache_element
&
nod
e
)
if
(
m_invoke
.
invoke_from_cache
(
fun
)
==
fals
e
)
{
return
dq
(
*
node
,
fun
)
==
dq_done
;
};
dequeue_impl
(
rm_fun
);
for
(;;)
{
queue_node_ptr
e
{
m_mailbox
.
try_pop
()};
while
(
!
e
)
{
yield_until_not_empty
();
e
.
reset
(
m_mailbox
.
try_pop
());
}
if
(
m_invoke
.
invoke
(
e
,
fun
))
return
;
}
}
}
void
yielding_actor
::
dequeue
(
behavior
&
bhvr
)
{
if
(
bhvr
.
timeout
().
valid
())
auto
&
fun
=
bhvr
.
get_partial_function
();
if
(
bhvr
.
timeout
().
valid
()
==
false
)
{
dequeue
(
bhvr
.
get_partial_function
());
return
;
}
if
(
m_invoke
.
invoke_from_cache
(
fun
)
==
false
)
{
request_timeout
(
bhvr
.
timeout
())
;
auto
rm_fun
=
[
&
](
mailbox_cache_element
&
node
)
->
bool
bool
timeout_occured
=
false
;
for
(;;)
{
switch
(
dq
(
*
node
,
bhvr
.
get_partial_function
()))
queue_node_ptr
e
{
m_mailbox
.
try_pop
()};
while
(
!
e
)
{
case
dq_timeout_occured
:
bhvr
.
handle_timeout
();
return
true
;
case
dq_done
:
return
true
;
default:
return
false
;
yield_until_not_empty
();
e
.
reset
(
m_mailbox
.
try_pop
());
}
};
dequeue_impl
(
rm_fun
);
}
else
{
// suppress virtual function call
yielding_actor
::
dequeue
(
bhvr
.
get_partial_function
());
if
(
m_invoke
.
invoke
(
e
,
fun
,
&
bhvr
,
&
timeout_occured
)
||
timeout_occured
)
{
return
;
}
}
}
}
...
...
@@ -169,57 +179,6 @@ void yielding_actor::resume(util::fiber* from, scheduler::callback* callback)
}
}
auto
yielding_actor
::
dq
(
mailbox_element
&
node
,
partial_function
&
fun
)
->
dq_result
{
CPPA_REQUIRE
(
node
.
msg
.
cvals
().
get
()
!=
nullptr
);
if
(
node
.
marked
)
return
dq_indeterminate
;
switch
(
filter_msg
(
node
.
msg
))
{
case
normal_exit_signal
:
case
expired_timeout_message
:
{
// skip message
return
dq_indeterminate
;
}
case
timeout_message
:
{
// m_active_timeout_id is already invalid
m_has_pending_timeout_request
=
false
;
return
dq_timeout_occured
;
}
default:
break
;
}
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
//m_last_dequeued = node.msg;
//m_last_sender = node.sender;
// make sure no timeout is handled incorrectly in a nested receive
++
m_active_timeout_id
;
// lifetime scope of qguard
{
// make sure nested receives do not process this node again
mailbox_element
::
guard
qguard
{
&
node
};
// try to invoke given function
if
(
fun
(
m_last_dequeued
))
{
// client erases node later (keep it marked until it's removed)
qguard
.
release
();
// this members are only valid during invocation
m_last_dequeued
.
reset
();
m_last_sender
.
reset
();
// we definitely don't have a pending timeout now
m_has_pending_timeout_request
=
false
;
return
dq_done
;
}
}
// no match, restore members
--
m_active_timeout_id
;
std
::
swap
(
m_last_dequeued
,
node
.
msg
);
std
::
swap
(
m_last_sender
,
node
.
sender
);
return
dq_indeterminate
;
}
}
}
// namespace cppa::detail
#else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment