Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
Actor Framework
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
cpp-libs
Actor Framework
Commits
26d23666
Commit
26d23666
authored
Apr 14, 2023
by
Dominik Charousset
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix demand signaling in the merge operator
parent
6742ddcd
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
229 additions
and
151 deletions
+229
-151
libcaf_core/caf/defaults.hpp
libcaf_core/caf/defaults.hpp
+10
-0
libcaf_core/caf/flow/op/merge.hpp
libcaf_core/caf/flow/op/merge.hpp
+188
-148
libcaf_core/test/flow/mixed.cpp
libcaf_core/test/flow/mixed.cpp
+27
-0
libcaf_core/test/flow/op/merge.cpp
libcaf_core/test/flow/op/merge.cpp
+4
-3
No files found.
libcaf_core/caf/defaults.hpp
View file @
26d23666
...
...
@@ -141,10 +141,20 @@ constexpr auto network_backend = std::string_view{"default"};
namespace
caf
::
defaults
::
flow
{
/// Defines how much demand should accumulate before signaling demand upstream.
/// A minimum demand is used by operators such as `observe_on` to avoid overly
/// frequent signaling across asynchronous barriers.
constexpr
auto
min_demand
=
size_t
{
8
};
/// Defines how many items a single batch may contain.
constexpr
auto
batch_size
=
size_t
{
32
};
/// Limits how many items an operator buffers internally.
constexpr
auto
buffer_size
=
size_t
{
128
};
/// Limits the number of concurrent subscriptions for operators such as `merge`.
constexpr
auto
max_concurrent
=
size_t
{
8
};
}
// namespace caf::defaults::flow
namespace
caf
::
defaults
::
net
{
...
...
libcaf_core/caf/flow/op/merge.hpp
View file @
26d23666
...
...
@@ -19,62 +19,111 @@
namespace
caf
::
flow
::
op
{
/// @relates merge
/// Receives observables from the pre-merge step and merges their inputs for the
/// observer.
template
<
class
T
>
struct
merge_input
{
/// The subscription to this input.
subscription
sub
;
/// Stores received items until the merge can forward them downstream.
std
::
deque
<
T
>
buf
;
};
template
<
class
T
>
class
merge_sub
:
public
subscription
::
impl_base
{
class
merge_sub
:
public
subscription
::
impl_base
,
public
observer_impl
<
observable
<
T
>>
{
public:
// -- member types -----------------------------------------------------------
using
input_t
=
merge_input
<
T
>
;
using
input_key
=
size_t
;
using
input_ptr
=
std
::
unique_ptr
<
input_t
>
;
using
input_map
=
unordered_flat_map
<
input_key
,
subscription
>
;
struct
item_t
{
T
value
;
input_key
source
;
};
using
item_queue
=
std
::
deque
<
item_t
>
;
// -- constants --------------------------------------------------------------
using
input_map
=
unordered_flat_map
<
input_key
,
input_ptr
>
;
/// Limits how many items the merge operator pulls in per input. This is
/// deliberately small to make sure that we get reasonably small "batches" of
/// items per input to make sure all inputs get their turn.
static
constexpr
size_t
default_max_pending_per_input
=
8
;
// -- constructors, destructors, and assignment operators --------------------
merge_sub
(
coordinator
*
ctx
,
observer
<
T
>
out
)
:
ctx_
(
ctx
),
out_
(
std
::
move
(
out
))
{
merge_sub
(
coordinator
*
ctx
,
observer
<
T
>
out
,
size_t
max_concurrent
,
size_t
max_pending_per_input
=
default_max_pending_per_input
)
:
ctx_
(
ctx
),
out_
(
std
::
move
(
out
)),
max_concurrent_
(
max_concurrent
),
max_pending_per_input_
(
max_pending_per_input
)
{
// nop
}
// -- i
nput management ---------------
----------------------------------------
// -- i
mplementation of observer_impl
----------------------------------------
void
subscribe_to
(
observable
<
T
>
what
)
{
void
ref_coordinated
()
const
noexcept
override
{
ref
();
}
void
deref_coordinated
()
const
noexcept
override
{
deref
();
}
void
on_next
(
const
observable
<
T
>&
what
)
override
{
CAF_ASSERT
(
what
);
auto
key
=
next_key_
++
;
inputs_
.
container
().
emplace_back
(
key
,
std
::
make_unique
<
input_t
>
()
);
inputs_
.
emplace
(
key
,
subscription
{}
);
using
fwd_impl
=
forwarder
<
T
,
merge_sub
,
size_t
>
;
auto
fwd
=
make_counted
<
fwd_impl
>
(
this
,
key
);
what
.
subscribe
(
fwd
->
as_observer
());
what
.
pimpl
()
->
subscribe
(
fwd
->
as_observer
());
}
void
subscribe_to
(
observable
<
observable
<
T
>>
what
)
{
auto
key
=
next_key_
++
;
auto
&
vec
=
inputs_
.
container
();
vec
.
emplace_back
(
key
,
std
::
make_unique
<
input_t
>
());
using
fwd_impl
=
forwarder
<
observable
<
T
>
,
merge_sub
,
size_t
>
;
auto
fwd
=
make_counted
<
fwd_impl
>
(
this
,
key
);
what
.
subscribe
(
fwd
->
as_observer
());
void
on_error
(
const
error
&
what
)
override
{
sub_
=
nullptr
;
if
(
out_
)
{
if
(
inputs_
.
empty
()
&&
queue_
.
empty
())
{
auto
out
=
std
::
move
(
out_
);
out
.
on_error
(
what
);
return
;
}
err_
=
what
;
}
}
void
on_complete
()
override
{
sub_
=
nullptr
;
if
(
out_
&&
inputs_
.
empty
()
&&
queue_
.
empty
())
{
auto
out
=
std
::
move
(
out_
);
out
.
on_complete
();
}
}
void
on_subscribe
(
flow
::
subscription
sub
)
override
{
if
(
!
sub_
&&
out_
)
{
sub_
=
std
::
move
(
sub
);
if
(
max_concurrent_
>
inputs_
.
size
())
{
// Note: the factory might call on_next a couple of times before
// subscribing this object to the pre-merge.
auto
new_demand
=
max_concurrent_
-
inputs_
.
size
();
sub_
.
request
(
new_demand
);
}
}
else
{
sub
.
dispose
();
}
}
friend
void
intrusive_ptr_add_ref
(
const
merge_sub
*
ptr
)
noexcept
{
ptr
->
ref
();
}
friend
void
intrusive_ptr_release
(
const
merge_sub
*
ptr
)
noexcept
{
ptr
->
deref
();
}
// -- callbacks for the forwarders -------------------------------------------
void
fwd_on_subscribe
(
input_key
key
,
subscription
sub
)
{
CAF_LOG_TRACE
(
CAF_ARG
(
key
));
if
(
auto
ptr
=
get
(
key
);
ptr
&&
!
ptr
->
sub
&&
out_
)
{
sub
.
request
(
max_pending_
);
ptr
->
sub
=
std
::
move
(
sub
);
if
(
auto
ptr
=
get
(
key
);
ptr
&&
!
*
ptr
)
{
*
ptr
=
std
::
move
(
sub
);
ptr
->
request
(
max_pending_per_input_
);
}
else
{
sub
.
dispose
();
}
...
...
@@ -82,62 +131,46 @@ public:
void
fwd_on_complete
(
input_key
key
)
{
CAF_LOG_TRACE
(
CAF_ARG
(
key
));
if
(
auto
i
=
inputs_
.
find
(
key
);
i
!=
inputs_
.
end
())
{
if
(
i
->
second
->
buf
.
empty
())
{
inputs_
.
erase
(
i
);
run_later
();
}
else
{
i
->
second
->
sub
=
nullptr
;
}
if
(
inputs_
.
erase
(
key
)
==
0
)
return
;
if
(
sub_
)
{
if
(
inputs_
.
size
()
<
max_concurrent_
)
sub_
.
request
(
1
);
return
;
}
if
(
inputs_
.
empty
()
&&
queue_
.
empty
())
{
auto
out
=
std
::
move
(
out_
);
out
.
on_complete
();
}
}
void
fwd_on_error
(
input_key
key
,
const
error
&
what
)
{
CAF_LOG_TRACE
(
CAF_ARG
(
key
)
<<
CAF_ARG
(
what
));
if
(
!
err_
)
{
err_
=
what
;
if
(
!
flags_
.
delay_error
)
{
auto
i
=
inputs_
.
begin
();
while
(
i
!=
inputs_
.
end
())
{
auto
&
input
=
*
i
->
second
;
if
(
auto
&
sub
=
input
.
sub
)
{
auto
tmp
=
std
::
move
(
input
.
sub
);
tmp
.
dispose
();
}
if
(
input
.
buf
.
empty
())
i
=
inputs_
.
erase
(
i
);
else
++
i
;
}
}
if
(
inputs_
.
erase
(
key
)
==
0
)
return
;
err_
=
what
;
drop_inputs
();
if
(
queue_
.
empty
())
{
auto
out
=
std
::
move
(
out_
);
out
.
on_error
(
what
);
}
fwd_on_complete
(
key
);
}
void
fwd_on_next
(
input_key
key
,
const
T
&
item
)
{
CAF_LOG_TRACE
(
CAF_ARG
(
key
)
<<
CAF_ARG
(
item
));
if
(
auto
ptr
=
get
(
key
))
{
if
(
!
flags_
.
running
&&
demand_
>
0
)
{
if
(
!
running_
&&
demand_
>
0
)
{
CAF_ASSERT
(
out_
.
valid
());
--
demand_
;
if
(
*
ptr
)
ptr
->
request
(
1
);
out_
.
on_next
(
item
);
ptr
->
sub
.
request
(
1
);
}
else
{
ptr
->
buf
.
push_back
(
item
);
queue_
.
push_back
(
item_t
{
item
,
key
}
);
}
}
}
void
fwd_on_next
(
input_key
key
,
const
observable
<
T
>&
item
)
{
CAF_LOG_TRACE
(
CAF_ARG
(
key
)
<<
CAF_ARG
(
item
));
if
(
auto
ptr
=
get
(
key
))
subscribe_to
(
item
);
// Note: we need to double-check that the key still exists here, because
// subscribe_on may result in an error (that nukes all inputs).
if
(
auto
ptr
=
get
(
key
))
ptr
->
sub
.
request
(
1
);
}
// -- implementation of subscription_impl ------------------------------------
bool
disposed
()
const
noexcept
override
{
...
...
@@ -146,113 +179,89 @@ public:
void
dispose
()
override
{
if
(
out_
)
{
for
(
auto
&
kvp
:
inputs_
)
if
(
auto
&
sub
=
kvp
.
second
->
sub
)
sub
.
dispose
(
);
i
nputs_
.
clear
();
run_later
();
drop_inputs
();
queue_
.
clear
();
ctx_
->
delay_fn
([
out
=
std
::
move
(
out_
)]()
mutable
{
out
.
on_complete
();
}
);
i
f
(
sub_
)
sub_
.
dispose
();
}
}
void
request
(
size_t
n
)
override
{
CAF_ASSERT
(
out_
.
valid
());
demand_
+=
n
;
if
(
demand_
==
n
)
run_later
();
if
(
out_
)
{
demand_
+=
n
;
if
(
demand_
==
n
)
run_later
();
}
}
size_t
buffered
()
const
noexcept
{
return
std
::
accumulate
(
inputs_
.
begin
(),
inputs_
.
end
(),
size_t
{
0
},
[](
size_t
n
,
auto
&
kvp
)
{
return
n
+
kvp
.
second
->
buf
.
size
();
});
return
queue_
.
size
();
}
private:
void
drop_inputs
()
{
input_map
inputs
;
inputs
.
swap
(
inputs_
);
for
(
auto
&
[
key
,
sub
]
:
inputs
)
sub
.
dispose
();
}
void
run_later
()
{
if
(
!
flags_
.
running
)
{
flags_
.
running
=
true
;
if
(
!
running_
)
{
running_
=
true
;
ctx_
->
delay_fn
([
strong_this
=
intrusive_ptr
<
merge_sub
>
{
this
}]
{
strong_this
->
do_run
();
});
}
}
auto
next_input
()
{
CAF_ASSERT
(
!
inputs_
.
empty
());
auto
has_items_at
=
[
this
](
size_t
pos
)
{
auto
&
vec
=
inputs_
.
container
();
return
!
vec
[
pos
].
second
->
buf
.
empty
();
};
auto
start
=
pos_
%
inputs_
.
size
();
pos_
=
(
pos_
+
1
)
%
inputs_
.
size
();
if
(
has_items_at
(
start
))
return
inputs_
.
begin
()
+
start
;
while
(
pos_
!=
start
)
{
auto
p
=
pos_
;
pos_
=
(
pos_
+
1
)
%
inputs_
.
size
();
if
(
has_items_at
(
p
))
return
inputs_
.
begin
()
+
p
;
}
return
inputs_
.
end
();
bool
done
()
const
noexcept
{
return
!
sub_
&&
inputs_
.
empty
()
&&
queue_
.
empty
();
}
void
do_run
()
{
while
(
out_
&&
demand_
>
0
&&
!
inputs_
.
empty
())
{
if
(
auto
i
=
next_input
();
i
!=
inputs_
.
end
())
{
--
demand_
;
auto
&
buf
=
i
->
second
->
buf
;
auto
tmp
=
std
::
move
(
buf
.
front
());
buf
.
pop_front
();
if
(
auto
&
sub
=
i
->
second
->
sub
)
{
sub
.
request
(
1
);
}
else
if
(
buf
.
empty
())
{
inputs_
.
erase
(
i
);
}
out_
.
on_next
(
tmp
);
}
else
{
break
;
}
while
(
out_
&&
demand_
>
0
&&
!
queue_
.
empty
())
{
// Fetch the next item.
auto
[
item
,
key
]
=
std
::
move
(
queue_
.
front
());
queue_
.
pop_front
();
--
demand_
;
// Request a new item from the input if we still have a subscription.
if
(
auto
ptr
=
get
(
key
);
ptr
&&
*
ptr
)
ptr
->
request
(
1
);
// Call the observer. This might nuke out_ by calling dispose().
out_
.
on_next
(
item
);
}
if
(
out_
&&
inputs_
.
empty
())
{
auto
tmp
=
std
::
move
(
out_
);
running_
=
false
;
// Check if we can call it a day.
if
(
out_
&&
done
())
{
auto
out
=
std
::
move
(
out_
);
if
(
!
err_
)
tmp
.
on_complete
();
out
.
on_complete
();
else
tmp
.
on_error
(
err_
);
out
.
on_error
(
err_
);
}
flags_
.
running
=
false
;
}
/// Selects an input object by key or returns null.
input_t
*
get
(
input_key
key
)
{
subscription
*
get
(
input_key
key
)
{
if
(
auto
i
=
inputs_
.
find
(
key
);
i
!=
inputs_
.
end
())
return
i
->
second
.
get
(
);
return
std
::
addressof
(
i
->
second
);
else
return
nullptr
;
}
/// Groups various Boolean flags.
struct
flags_t
{
/// Configures whether an error immediately aborts the merging or not.
bool
delay_error
:
1
;
/// Stores whether the merge is currently executing do_run.
bool
running
:
1
;
flags_t
()
:
delay_error
(
false
),
running
(
false
)
{
// nop
}
};
/// Stores the context (coordinator) that runs this flow.
coordinator
*
ctx_
;
/// Stores the first error that occurred on any input.
error
err_
;
/// Fine-tunes the behavior of the merge.
flags_t
flags_
;
/// Subscription to the pre-merger that produces the input observables.
subscription
sub_
;
/// Stores whether the merge is currently executing do_run.
bool
running_
=
false
;
/// Stores our current demand for items from the subscriber.
size_t
demand_
=
0
;
...
...
@@ -266,11 +275,17 @@ private:
/// Associates inputs with ascending keys.
input_map
inputs_
;
/// Caches items that arrived without having downstream demand.
item_queue
queue_
;
/// Stores the key for the next input.
size_t
next_key_
=
0
;
size_t
next_key_
=
1
;
/// Configures how many items we buffer per input.
size_t
max_pending_
=
defaults
::
flow
::
buffer_size
;
size_t
max_concurrent_
;
/// Configures how many items we have pending per input at most.
size_t
max_pending_per_input_
;
};
template
<
class
T
>
...
...
@@ -280,8 +295,6 @@ public:
using
super
=
cold
<
T
>
;
using
input_type
=
std
::
variant
<
observable
<
T
>
,
observable
<
observable
<
T
>>>
;
// -- constructors, destructors, and assignment operators --------------------
template
<
class
...
Ts
,
class
...
Inputs
>
...
...
@@ -292,25 +305,50 @@ public:
// -- properties -------------------------------------------------------------
size_t
inputs
()
const
noexcept
{
return
inputs_
.
size
();
return
plain_inputs_
.
size
()
+
wrapped_
inputs_
.
size
();
}
// -- implementation of observable_impl<T> -----------------------------------
disposable
subscribe
(
observer
<
T
>
out
)
override
{
using
sub_t
=
merge_sub
<
T
>
;
using
pre_sub_t
=
merge_sub
<
observable
<
T
>>
;
// Trivial case: nothing to do.
if
(
inputs
()
==
0
)
{
auto
ptr
=
make_counted
<
empty
<
T
>>
(
super
::
ctx_
);
return
ptr
->
subscribe
(
std
::
move
(
out
));
}
else
{
auto
sub
=
make_counted
<
merge_sub
<
T
>>
(
super
::
ctx_
,
out
);
for
(
auto
&
input
:
inputs_
)
std
::
visit
([
&
sub
](
auto
&
in
)
{
sub
->
subscribe_to
(
in
);
},
input
);
}
// Simple case: all observables for the merge are available right away.
if
(
wrapped_inputs_
.
empty
())
{
auto
sub
=
make_counted
<
merge_sub
<
T
>>
(
super
::
ctx_
,
out
,
max_concurrent_
);
for
(
auto
&
input
:
plain_inputs_
)
sub
->
on_next
(
input
);
out
.
on_subscribe
(
subscription
{
sub
});
return
sub
->
as_disposable
();
}
// Complex case: we need a "pre-merge" step to get the observables for the
// actual merge operation.
auto
sub
=
make_counted
<
sub_t
>
(
super
::
ctx_
,
out
,
max_concurrent_
);
for
(
auto
&
input
:
plain_inputs_
)
sub
->
on_next
(
input
);
auto
pre_sub
=
make_counted
<
pre_sub_t
>
(
super
::
ctx
(),
sub
->
as_observer
(),
max_concurrent_
,
1
);
for
(
auto
&
input
:
wrapped_inputs_
)
pre_sub
->
on_next
(
input
);
sub
->
on_subscribe
(
subscription
{
pre_sub
});
out
.
on_subscribe
(
subscription
{
sub
});
return
sub
->
as_disposable
();
}
private:
void
do_add
(
observable
<
observable
<
T
>>
in
)
{
wrapped_inputs_
.
emplace_back
(
std
::
move
(
in
));
}
void
do_add
(
observable
<
T
>
in
)
{
plain_inputs_
.
emplace_back
(
std
::
move
(
in
));
}
template
<
class
Input
>
void
add
(
Input
&&
x
)
{
using
input_t
=
std
::
decay_t
<
Input
>
;
...
...
@@ -319,11 +357,13 @@ private:
add
(
in
);
}
else
{
static_assert
(
is_observable_v
<
input_t
>
);
inputs_
.
emplace_back
(
std
::
forward
<
Input
>
(
x
).
as_observable
());
do_add
(
std
::
forward
<
Input
>
(
x
).
as_observable
());
}
}
std
::
vector
<
input_type
>
inputs_
;
std
::
vector
<
observable
<
T
>>
plain_inputs_
;
std
::
vector
<
observable
<
observable
<
T
>>>
wrapped_inputs_
;
size_t
max_concurrent_
=
defaults
::
flow
::
max_concurrent
;
};
}
// namespace caf::flow::op
libcaf_core/test/flow/mixed.cpp
View file @
26d23666
...
...
@@ -12,6 +12,7 @@
#include "caf/flow/observable.hpp"
#include "caf/flow/observable_builder.hpp"
#include "caf/flow/scoped_coordinator.hpp"
#include "caf/scheduled_actor/flow.hpp"
using
namespace
caf
;
...
...
@@ -68,4 +69,30 @@ SCENARIO("sum up all the multiples of 3 or 5 below 1000") {
}
}
TEST_CASE
(
"GH-1399 regression"
)
{
// Original issue: flat_map does not limit the demand it signals upstream.
// When running flat_map on an unbound sequence like iota-observable, it
// produces an infinite amount of observables without ever giving downstream
// operators the opportunity to cut off the flow items.
auto
worker_fn
=
[]()
->
behavior
{
return
{
[](
int
x
)
{
return
-
x
;
},
};
};
auto
worker
=
sys
.
spawn
(
worker_fn
);
auto
results
=
std
::
make_shared
<
std
::
vector
<
int
>>
();
auto
run_fn
=
[
worker
,
results
](
caf
::
event_based_actor
*
self
)
{
self
->
make_observable
()
.
iota
(
1
)
.
flat_map
([
self
,
worker
](
int
x
)
{
return
self
->
request
(
worker
,
infinite
,
x
).
as_observable
<
int32_t
>
();
})
.
take
(
10
)
.
for_each
([
results
](
int
value
)
{
results
->
push_back
(
value
);
});
};
sys
.
spawn
(
run_fn
);
run
();
CHECK_EQ
(
*
results
,
ls
(
-
1
,
-
2
,
-
3
,
-
4
,
-
5
,
-
6
,
-
7
,
-
8
,
-
9
,
-
10
));
}
END_FIXTURE_SCOPE
()
libcaf_core/test/flow/op/merge.cpp
View file @
26d23666
...
...
@@ -43,8 +43,9 @@ struct fixture : test_coordinator_fixture<> {
template
<
class
T
,
class
...
Ts
>
auto
raw_sub
(
flow
::
observer
<
T
>
out
,
Ts
&&
...
xs
)
{
using
flow
::
observable
;
auto
ptr
=
make_counted
<
flow
::
op
::
merge_sub
<
T
>>
(
ctx
.
get
(),
out
);
(
ptr
->
subscribe_to
(
xs
),
...);
auto
ptr
=
make_counted
<
flow
::
op
::
merge_sub
<
T
>>
(
ctx
.
get
(),
out
,
sizeof
...(
Ts
));
(
ptr
->
on_next
(
xs
),
...);
out
.
on_subscribe
(
flow
::
subscription
{
ptr
});
return
ptr
;
}
...
...
@@ -54,7 +55,7 @@ struct fixture : test_coordinator_fixture<> {
BEGIN_FIXTURE_SCOPE
(
fixture
)
SCENARIO
(
"the merge operator combine inputs"
)
{
SCENARIO
(
"the merge operator combine
s
inputs"
)
{
GIVEN
(
"two observables"
)
{
WHEN
(
"merging them to a single observable"
)
{
THEN
(
"the observer receives the output of both sources"
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment