Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
Actor Framework
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
cpp-libs
Actor Framework
Commits
a63af3ea
Commit
a63af3ea
authored
Aug 21, 2022
by
Dominik Charousset
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Re-implement work-stealing queue without spinlocks
parent
8a188d75
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
67 additions
and
232 deletions
+67
-232
libcaf_core/caf/detail/double_ended_queue.hpp
libcaf_core/caf/detail/double_ended_queue.hpp
+56
-175
libcaf_core/caf/policy/work_stealing.hpp
libcaf_core/caf/policy/work_stealing.hpp
+11
-57
No files found.
libcaf_core/caf/detail/double_ended_queue.hpp
View file @
a63af3ea
...
...
@@ -6,55 +6,17 @@
#include "caf/config.hpp"
#include <atomic>
#include <cassert>
#include <chrono>
#include <condition_variable>
#include <list>
#include <mutex>
#include <thread>
// GCC hack
#if defined(CAF_GCC) && !defined(_GLIBCXX_USE_SCHED_YIELD)
# include <time.h>
namespace
std
{
namespace
this_thread
{
namespace
{
inline
void
yield
()
noexcept
{
timespec
req
;
req
.
tv_sec
=
0
;
req
.
tv_nsec
=
1
;
nanosleep
(
&
req
,
nullptr
);
}
}
// namespace
}
// namespace this_thread
}
// namespace std
#endif
// another GCC hack
#if defined(CAF_GCC) && !defined(_GLIBCXX_USE_NANOSLEEP)
# include <time.h>
namespace
std
{
namespace
this_thread
{
namespace
{
template
<
class
Rep
,
typename
Period
>
inline
void
sleep_for
(
const
chrono
::
duration
<
Rep
,
Period
>&
rt
)
{
auto
sec
=
chrono
::
duration_cast
<
chrono
::
seconds
>
(
rt
);
auto
nsec
=
chrono
::
duration_cast
<
chrono
::
nanoseconds
>
(
rt
-
sec
);
timespec
req
;
req
.
tv_sec
=
sec
.
count
();
req
.
tv_nsec
=
nsec
.
count
();
nanosleep
(
&
req
,
nullptr
);
}
}
// namespace
}
// namespace this_thread
}
// namespace std
#endif
namespace
caf
::
detail
{
/*
* A thread-safe double-ended queue based on http://drdobbs.com/cpp/211601363.
* This implementation is optimized for FIFO, i.e., it supports fast insertion
* at the end and fast removal from the beginning. As long as the queue is
* only used for FIFO operations, readers do not block writers and vice versa.
* A thread-safe, double-ended queue for work-stealing.
*/
template
<
class
T
>
class
double_ended_queue
{
...
...
@@ -67,163 +29,82 @@ public:
using
pointer
=
value_type
*
;
using
const_pointer
=
const
value_type
*
;
class
node
{
public:
pointer
value
;
std
::
atomic
<
node
*>
next
;
explicit
node
(
pointer
val
)
:
value
(
val
),
next
(
nullptr
)
{
// nop
}
private:
static
constexpr
size_type
payload_size
=
sizeof
(
pointer
)
+
sizeof
(
std
::
atomic
<
node
*>
);
static
constexpr
size_type
cline_size
=
CAF_CACHE_LINE_SIZE
;
static
constexpr
size_type
pad_size
=
(
cline_size
*
((
payload_size
/
cline_size
)
+
1
))
-
payload_size
;
// avoid false sharing
static_assert
(
pad_size
>
0
,
"invalid padding size calculated"
);
char
pad
[
pad_size
];
};
using
unique_node_ptr
=
std
::
unique_ptr
<
node
>
;
static_assert
(
sizeof
(
node
*
)
<
CAF_CACHE_LINE_SIZE
,
"sizeof(node*) >= CAF_CACHE_LINE_SIZE"
);
double_ended_queue
()
{
head_lock_
.
clear
();
tail_lock_
.
clear
();
auto
ptr
=
new
node
(
nullptr
);
head_
=
ptr
;
tail_
=
ptr
;
}
~
double_ended_queue
()
{
auto
ptr
=
head_
.
load
();
while
(
ptr
)
{
unique_node_ptr
tmp
{
ptr
};
ptr
=
tmp
->
next
.
load
();
}
}
// -- for the owner ----------------------------------------------------------
// acquires only one lock
void
append
(
pointer
value
)
{
void
prepend
(
pointer
value
)
{
CAF_ASSERT
(
value
!=
nullptr
);
auto
*
tmp
=
new
node
(
value
);
lock_guard
guard
(
tail_lock_
);
// publish & swing last forward
tail_
.
load
()
->
next
=
tmp
;
tail_
=
tmp
;
std
::
unique_lock
guard
{
mtx_
};
items_
.
push_front
(
value
);
}
// acquires both locks
void
prepend
(
pointer
value
)
{
CAF_ASSERT
(
value
!=
nullptr
);
auto
*
tmp
=
new
node
(
value
);
node
*
first
=
nullptr
;
// acquire both locks since we might touch last_ too
lock_guard
guard1
(
head_lock_
);
lock_guard
guard2
(
tail_lock_
);
first
=
head_
.
load
();
CAF_ASSERT
(
first
!=
nullptr
);
auto
next
=
first
->
next
.
load
();
// first_ always points to a dummy with no value,
// hence we put the new element second
if
(
next
)
{
CAF_ASSERT
(
first
!=
tail_
);
tmp
->
next
=
next
;
}
else
{
// queue is empty
CAF_ASSERT
(
first
==
tail_
);
tail_
=
tmp
;
pointer
try_take_head
()
{
std
::
unique_lock
guard
{
mtx_
};
if
(
!
items_
.
empty
())
{
auto
*
result
=
items_
.
front
();
items_
.
pop_front
();
return
result
;
}
first
->
next
=
tmp
;
return
nullptr
;
}
// acquires only one lock, returns nullptr on failure
pointer
take_head
()
{
unique_node_ptr
first
;
pointer
result
=
nullptr
;
{
// lifetime scope of guard
lock_guard
guard
(
head_lock_
);
first
.
reset
(
head_
.
load
());
node
*
next
=
first
->
next
;
if
(
next
==
nullptr
)
{
// queue is empty
first
.
release
();
template
<
class
Duration
>
pointer
try_take_head
(
Duration
rel_timeout
)
{
auto
abs_timeout
=
std
::
chrono
::
system_clock
::
now
()
+
rel_timeout
;
std
::
unique_lock
guard
{
mtx_
};
while
(
items_
.
empty
())
{
if
(
cv_
.
wait_until
(
guard
,
abs_timeout
)
==
std
::
cv_status
::
timeout
)
{
return
nullptr
;
}
// take it out of the node & swing first forward
result
=
next
->
value
;
next
->
value
=
nullptr
;
head_
=
next
;
}
auto
*
result
=
items_
.
front
();
items_
.
pop_front
();
return
result
;
}
// acquires both locks, returns nullptr on failure
pointer
take_tail
()
{
pointer
result
=
nullptr
;
unique_node_ptr
last
;
{
// lifetime scope of guards
lock_guard
guard1
(
head_lock_
);
lock_guard
guard2
(
tail_lock_
);
CAF_ASSERT
(
head_
!=
nullptr
);
last
.
reset
(
tail_
.
load
());
if
(
last
.
get
()
==
head_
.
load
())
{
last
.
release
();
return
nullptr
;
}
result
=
last
->
value
;
tail_
=
find_predecessor
(
last
.
get
());
CAF_ASSERT
(
tail_
!=
nullptr
);
tail_
.
load
()
->
next
=
nullptr
;
pointer
take_head
()
{
std
::
unique_lock
guard
{
mtx_
};
while
(
items_
.
empty
())
{
cv_
.
wait
(
guard
);
}
auto
*
result
=
items_
.
front
();
items_
.
pop_front
();
return
result
;
}
//
does not lock
bool
empty
()
const
{
// atomically compares first and last pointer without locks
return
head_
.
load
()
==
tail_
.
load
(
);
//
Unsafe, since it does not wake up a currently sleeping worker.
void
unsafe_append
(
pointer
value
)
{
std
::
unique_lock
guard
{
mtx_
};
items_
.
push_back
(
value
);
}
private:
// precondition: *both* locks acquired
node
*
find_predecessor
(
node
*
what
)
{
for
(
auto
i
=
head_
.
load
();
i
!=
nullptr
;
i
=
i
->
next
)
{
if
(
i
->
next
==
what
)
{
return
i
;
}
// -- for others -------------------------------------------------------------
void
append
(
pointer
value
)
{
bool
do_notify
=
false
;
{
std
::
unique_lock
guard
{
mtx_
};
do_notify
=
items_
.
empty
();
items_
.
push_back
(
value
);
}
if
(
do_notify
)
{
cv_
.
notify_one
();
}
return
nullptr
;
}
// guarded by head_lock_
std
::
atomic
<
node
*>
head_
;
char
pad1_
[
CAF_CACHE_LINE_SIZE
-
sizeof
(
node
*
)];
// guarded by tail_lock_
std
::
atomic
<
node
*>
tail_
;
char
pad2_
[
CAF_CACHE_LINE_SIZE
-
sizeof
(
node
*
)];
// enforce exclusive access
std
::
atomic_flag
head_lock_
;
std
::
atomic_flag
tail_lock_
;
class
lock_guard
{
public:
explicit
lock_guard
(
std
::
atomic_flag
&
lock
)
:
lock_
(
lock
)
{
while
(
lock
.
test_and_set
(
std
::
memory_order_acquire
))
{
std
::
this_thread
::
yield
();
}
}
~
lock_guard
()
{
lock_
.
clear
(
std
::
memory_order_release
);
pointer
try_take_tail
()
{
std
::
unique_lock
guard
{
mtx_
};
if
(
!
items_
.
empty
())
{
auto
*
result
=
items_
.
back
();
items_
.
pop_back
();
return
result
;
}
return
nullptr
;
}
private:
std
::
atomic_flag
&
lock_
;
};
private:
std
::
mutex
mtx_
;
std
::
condition_variable
cv_
;
std
::
list
<
pointer
>
items_
;
};
}
// namespace caf::detail
libcaf_core/caf/policy/work_stealing.hpp
View file @
a63af3ea
...
...
@@ -39,13 +39,6 @@ public:
timespan
sleep_duration
;
};
// what is needed to implement the waiting strategy.
struct
wait_strategy
{
std
::
mutex
lock
;
std
::
condition_variable
cv
;
bool
sleeping
{
false
};
};
// The coordinator has only a counter for round-robin enqueue to its workers.
struct
coordinator_data
{
explicit
coordinator_data
(
scheduler
::
abstract_coordinator
*
)
...
...
@@ -68,7 +61,6 @@ public:
std
::
default_random_engine
rengine
;
std
::
uniform_int_distribution
<
size_t
>
uniform
;
std
::
array
<
poll_strategy
,
3
>
strategies
;
wait_strategy
waitdata
;
};
// Goes on a raid in quest for a shiny new job.
...
...
@@ -84,7 +76,7 @@ public:
if
(
victim
==
self
->
id
())
victim
=
p
->
num_workers
()
-
1
;
// steal oldest element from the victim's queue
return
d
(
p
->
worker_by_id
(
victim
)).
queue
.
take_tail
();
return
d
(
p
->
worker_by_id
(
victim
)).
queue
.
t
ry_t
ake_tail
();
}
template
<
class
Coordinator
>
...
...
@@ -96,14 +88,6 @@ public:
template
<
class
Worker
>
void
external_enqueue
(
Worker
*
self
,
resumable
*
job
)
{
d
(
self
).
queue
.
append
(
job
);
auto
&
lock
=
d
(
self
).
waitdata
.
lock
;
auto
&
cv
=
d
(
self
).
waitdata
.
cv
;
{
// guard scope
std
::
unique_lock
<
std
::
mutex
>
guard
(
lock
);
// check if the worker is sleeping
if
(
d
(
self
).
waitdata
.
sleeping
&&
!
d
(
self
).
queue
.
empty
())
cv
.
notify_one
();
}
}
template
<
class
Worker
>
...
...
@@ -115,7 +99,7 @@ public:
void
resume_job_later
(
Worker
*
self
,
resumable
*
job
)
{
// job has voluntarily released the CPU to let others run instead
// this means we are going to put this job to the very end of our queue
d
(
self
).
queue
.
append
(
job
);
d
(
self
).
queue
.
unsafe_
append
(
job
);
}
template
<
class
Worker
>
...
...
@@ -125,67 +109,37 @@ public:
// polling, then we relax our polling a bit and wait 50 us between
// dequeue attempts
auto
&
strategies
=
d
(
self
).
strategies
;
resumable
*
job
=
nullptr
;
auto
*
job
=
d
(
self
).
queue
.
try_take_head
();
if
(
job
)
return
job
;
for
(
size_t
k
=
0
;
k
<
2
;
++
k
)
{
// iterate over the first two strategies
for
(
size_t
i
=
0
;
i
<
strategies
[
k
].
attempts
;
i
+=
strategies
[
k
].
step_size
)
{
job
=
d
(
self
).
queue
.
take_head
();
if
(
job
)
return
job
;
// try to steal every X poll attempts
if
((
i
%
strategies
[
k
].
steal_interval
)
==
0
)
{
job
=
try_steal
(
self
);
if
(
job
)
return
job
;
}
if
(
strategies
[
k
].
sleep_duration
.
count
()
>
0
)
{
#ifdef CAF_MSVC
// Windows cannot sleep less than 1000 us, so timeout is converted to
// 0 inside sleep_for(), but Sleep(0) is dangerous so replace it with
// yield()
if
(
strategies
[
k
].
sleep_duration
.
count
()
<
1000
)
std
::
this_thread
::
yield
();
else
std
::
this_thread
::
sleep_for
(
strategies
[
k
].
sleep_duration
);
#else
std
::
this_thread
::
sleep_for
(
strategies
[
k
].
sleep_duration
);
#endif
}
// wait for some work to appear
job
=
d
(
self
).
queue
.
try_take_head
(
strategies
[
k
].
sleep_duration
);
if
(
job
)
return
job
;
}
}
// we assume pretty much nothing is going on so we can relax polling
// and falling to sleep on a condition variable whose timeout is the one
// of the relaxed polling strategy
auto
&
relaxed
=
strategies
[
2
];
auto
&
sleeping
=
d
(
self
).
waitdata
.
sleeping
;
auto
&
lock
=
d
(
self
).
waitdata
.
lock
;
auto
&
cv
=
d
(
self
).
waitdata
.
cv
;
bool
notimeout
=
true
;
size_t
i
=
1
;
do
{
{
// guard scope
std
::
unique_lock
<
std
::
mutex
>
guard
(
lock
);
sleeping
=
true
;
if
(
!
cv
.
wait_for
(
guard
,
relaxed
.
sleep_duration
,
[
&
]
{
return
!
d
(
self
).
queue
.
empty
();
}))
notimeout
=
false
;
sleeping
=
false
;
}
if
(
notimeout
)
{
job
=
d
(
self
).
queue
.
take_head
();
}
else
{
notimeout
=
true
;
if
((
i
%
relaxed
.
steal_interval
)
==
0
)
job
=
try_steal
(
self
);
}
++
i
;
job
=
d
(
self
).
queue
.
try_take_head
(
relaxed
.
sleep_duration
);
}
while
(
job
==
nullptr
);
return
job
;
}
template
<
class
Worker
,
class
UnaryFunction
>
void
foreach_resumable
(
Worker
*
self
,
UnaryFunction
f
)
{
auto
next
=
[
&
]
{
return
d
(
self
).
queue
.
take_head
();
};
auto
next
=
[
&
]
{
return
d
(
self
).
queue
.
t
ry_t
ake_head
();
};
for
(
auto
job
=
next
();
job
!=
nullptr
;
job
=
next
())
{
f
(
job
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment