Commit 8bcc85aa authored by neverlord's avatar neverlord

prefetch optimization

parent d4fdf6a7
......@@ -178,6 +178,7 @@ nobase_library_include_HEADERS = \
cppa/util/compare_tuples.hpp \
cppa/util/concat_type_lists.hpp \
cppa/util/conjunction.hpp \
cppa/util/default_deallocator.hpp \
cppa/util/disable_if.hpp \
cppa/util/disjunction.hpp \
cppa/util/duration.hpp \
......
......@@ -4,122 +4,240 @@ import akka.actor.Actor.actorOf
import scala.annotation.tailrec
case class Token(value: Int)
case class Init(ringSize: Int, repetitions: Int)
case class Init(ringSize: Int, initialTokenValue: Int, repetitions: Int)
case class Calc(value: Long)
case class Factors(values: List[Long])
case object Done
case object MasterExited
object global {
final val taskN: Long = 86028157l * 329545133
final val factor1: Long = 86028157
final val factor2: Long = 329545133
final val numMessages = 1000
final val factors = List(factor2,factor1)
val latch = new java.util.concurrent.CountDownLatch(1)
def checkFactors(f: List[Long]) {
assert(f.length == 2 && f(1) == factor1 && f(2) == factor2)
assert(f equals factors)
}
@tailrec final def fac(n: Long, m: Long, interim: List[Long]) : List[Long] = {
if (n == m) m :: interim
else if ((n % m) == 0) fac(n/m, m, m :: interim)
else fac(n, if (m == 2) 3 else m + 2, interim)
}
def factorize(arg: Long): List[Long] = {
var n = arg
if (n <= 3)
return List[Long](n)
var result = new scala.collection.mutable.LinkedList[Long]
var d: Long = 2
while (d < n) {
if ((n % d) == 0) {
result :+ d
n = n / d
if (arg <= 3) List(arg)
else fac(arg, 2, List())
}
else
d = if (d == 2) 3 else d + 2
}
class ThreadedWorker(supervisor: Actor) extends Actor {
override def act() = receive {
case Calc(value) => supervisor ! Factors(global.factorize(value)); act
case Done => // recursion ends
}
}
class ThreadedChainLink(next: Actor) extends Actor {
override def act() = {
var done = false
while (done == false)
receive {
case Token(value) => next ! Token(value); if (value > 0) done = true
}
(result :+ d) toList
}
}
class ThreadedWorker extends Actor {
override def act() {
class ThreadedChainMaster(supervisor: Actor) extends Actor {
val worker = (new ThreadedWorker(supervisor)).start
@tailrec final def newRing(next: Actor, rsize: Int): Actor = {
if (rsize == 0) next
else newRing((new ThreadedChainLink(next)).start, rsize-1)
}
override def act() = receive {
case Init(rsize, initialTokenValue, repetitions) =>
for (_ <- 0 until repetitions) {
worker ! Calc(global.taskN)
val next = newRing(this, rsize-1)
next ! Token(initialTokenValue)
var ringDone = false
while (ringDone == false) {
receive {
case Calc(value) =>
reply(global.factorize(value))
act()
case Token(0) => ringDone = true
case Token(value) => next ! Token(value-1)
}
}
}
worker ! Done
supervisor ! MasterExited
}
}
class ThreadedChainLink(next: Actor) extends Actor {
override def act() {
class ThreadedSupervisor(numMessages: Int) extends Actor {
override def act() = for (_ <- 0 until numMessages) {
receive {
case Token(value) =>
next ! Token(value)
if (value > 0) act
case Factors(f) => global.checkFactors(f);
case MasterExited =>
}
}
}
class ThreadedChainMaster extends Actor {
@tailrec final def newRing(a: Actor, i: Int): Actor = {
val next = (new ThreadedChainLink(a)).start
if (i > 0) newRing(next, i-1) else next
class ThreadlessWorker(supervisor: Actor) extends Actor {
override def act() = react {
case Calc(value) => supervisor ! Factors(global.factorize(value)); act
case Done => // recursion ends
}
override def act() {
val worker = (new ThreadedWorker).start
receive {
case Init(rsize, iterations) =>
var remainingFactors = 0
for (_ <- 0 until iterations) {
val next = newRing(this, rsize)
remainingFactors += 1
}
class ThreadlessChainLink(next: Actor) extends Actor {
override def act() = react {
case Token(value) => next ! Token(value); if (value > 0) act
}
}
class ThreadlessChainMaster(supervisor: Actor) extends Actor {
val worker = (new ThreadlessWorker(supervisor)).start
@tailrec final def newRing(next: Actor, rsize: Int): Actor = {
if (rsize == 0) next
else newRing((new ThreadlessChainLink(next)).start, rsize-1)
}
var initialTokenValue = 0
var repetitions = 0
var iteration = 0
var rsize = 0
var next: Actor = null
def rloop(): Nothing = react {
case Token(0) =>
iteration += 1
if (iteration < repetitions) {
worker ! Calc(global.taskN)
var done = false
while (done == false)
receive {
case Token(value) =>
if (value > 0) next ! Token(value-1) else done = true
case Factors(f) =>
global.checkFactors(f)
remainingFactors -= 1
next = newRing(this, rsize-1)
next ! Token(initialTokenValue)
rloop
}
else
{
worker ! Done
supervisor ! MasterExited
}
while (remainingFactors > 0)
receive {
case Factors(f) =>
global.checkFactors(f)
remainingFactors -= 1
case Token(value) => next ! Token(value-1) ; rloop
}
override def act() = react {
case Init(rs, itv, rep) =>
rsize = rs ; initialTokenValue = itv ; repetitions = rep
worker ! Calc(global.taskN)
next = newRing(this, rsize-1)
next ! Token(initialTokenValue)
rloop
}
}
class ThreadlessSupervisor(numMessages: Int) extends Actor {
def rcv(remaining: Int): Nothing = react {
case Factors(f) => global.checkFactors(f); if (remaining > 1) rcv(remaining-1)
case MasterExited => if (remaining > 1) rcv(remaining-1)
}
override def act() = rcv(numMessages)
}
class AkkaWorker(supervisor: akka.actor.ActorRef) extends akka.actor.Actor {
def receive = {
case Calc(value) => supervisor ! Factors(global.factorize(value))
case Done => self.exit
}
}
class AkkaChainLink(next: akka.actor.ActorRef) extends akka.actor.Actor {
def receive = {
case Token(value) => next ! Token(value); if (value == 0) self.exit
}
}
class AkkaChainMaster(supervisor: akka.actor.ActorRef) extends akka.actor.Actor {
var initialTokenValue = 0
var repetitions = 0
var iteration = 0
var rsize = 0
var next: akka.actor.ActorRef = null
val worker = actorOf(new AkkaWorker(supervisor)).start
@tailrec final def newRing(next: akka.actor.ActorRef, rsize: Int): akka.actor.ActorRef = {
if (rsize == 0) next
else newRing(actorOf(new AkkaChainLink(next)).start, rsize-1)
}
def initialized: Receive = {
case Token(0) =>
iteration += 1
if (iteration < repetitions) {
worker ! Calc(global.taskN)
next = newRing(self, rsize-1)
next ! Token(initialTokenValue)
}
else
{
worker ! Done
supervisor ! MasterExited
self.exit
}
case Token(value) => next ! Token(value-1)
}
def receive = {
case Init(rs, itv, rep) =>
rsize = rs ; initialTokenValue = itv ; repetitions = rep
worker ! Calc(global.taskN)
next = newRing(self, rsize-1)
next ! Token(initialTokenValue)
become(initialized)
}
}
class AkkaSupervisor(numMessages: Int) extends akka.actor.Actor {
var i = 0
def inc() {
i += 1;
if (i == numMessages) {
global.latch.countDown
self.exit
}
}
def receive = {
case Factors(f) => global.checkFactors(f); inc
case MasterExited => inc
}
}
object MixedCase {
def usage() {
Console println "usage: (threaded|threadless|akka) (ring_size) (repetitions)"
def usage(): Nothing = {
Console println "usage: ('threaded'|'threadless'|'akka') (num rings) (ring size) (initial token value) (repetitions)"
System.exit(1) // why doesn't exit return Nothing?
throw new RuntimeException("")
}
def main(args: Array[String]) = {
if (args.size != 3) {
usage
throw new IllegalArgumentException("")
}
val ringSize = args(1).toInt
val repetitions = args(2).toInt
val impl = List("threaded", "threadless", "akka").indexOf(args(0))
if (impl == -1) {
usage
}
else if (impl == 0) {
System.setProperty("actors.maxPoolSize", ((11 * ringSize) + 10).toString)
for (_ <- 0 until 10) {
val a = (new ThreadedChainMaster()).start
a ! Init(ringSize, repetitions)
}
}
/*else {
val rcvRef = actorOf(new AkkaReceiver(threads*msgs)).start
for (i <- 0 until threads)
(new java.lang.Thread {
override def run() { for (_ <- 0 until msgs) rcvRef ! Msg }
}).start
if (args.size != 5) usage
val numRings = args(1).toInt
val ringSize = args(2).toInt
val initialTokenValue = args(3).toInt
val repetitions = args(4).toInt
val initMsg = Init(ringSize, initialTokenValue, repetitions)
val numMessages = (numRings + (numRings * repetitions))
val impl = args(0)
if (impl == "threaded") {
//System.setProperty("actors.maxPoolSize", ((11 * ringSize) + 10).toString)
val s = (new ThreadedSupervisor(numMessages)).start
for (_ <- 0 until numRings)
(new ThreadedChainMaster(s)).start ! initMsg
}
else if (impl == "threadless") {
val s = (new ThreadlessSupervisor(numMessages)).start
for (_ <- 0 until numRings)
(new ThreadlessChainMaster(s)).start ! initMsg
}
else if (impl == "akka") {
val s = actorOf(new AkkaSupervisor(numMessages)).start
for (_ <- 0 until numRings)
actorOf(new AkkaChainMaster(s)).start ! initMsg
global.latch.await
}*/
}
else usage
}
}
#!/bin/bash
read -r cmd
export JAVA_OPTS="-Xmx4096M"
/usr/bin/time -p -f "%e" $cmd 2>&1 #| grep "^real" | grep -o -P "[0-9]*(\.[0-9]*)?"
if [[ $(uname) == "Darwin" ]] ; then
/usr/bin/time -p $cmd 2>&1
else
/usr/bin/time -p -f "%e" $cmd 2>&1
fi
......@@ -46,7 +46,6 @@ typedef std::vector<uint64_t> factors;
using namespace cppa;
constexpr int s_num_messages = 1000;
constexpr uint64_t s_task_n = uint64_t(86028157)*329545133;
constexpr uint64_t s_factor1 = 86028157;
constexpr uint64_t s_factor2 = 329545133;
......@@ -54,17 +53,14 @@ constexpr uint64_t s_factor2 = 329545133;
factors factorize(uint64_t n)
{
factors result;
uint64_t d = 2;
if (n <= 3)
{
result.push_back(n);
return std::move(result);
}
// while the factor being tested
// is lower than the number to factorize
uint64_t d = 2;
while(d < n)
{
// if valid prime
if((n % d) == 0)
{
result.push_back(d);
......@@ -88,14 +84,19 @@ void check_factors(factors const& vec)
struct fsm_worker : fsm_actor<fsm_worker>
{
actor_ptr mc;
behavior init_state;
fsm_worker()
fsm_worker(actor_ptr const& msgcollector) : mc(msgcollector)
{
init_state =
(
on<atom("calc"), uint64_t>() >> [=](uint64_t what)
{
reply(atom("result"), factorize(what));
send(mc, atom("result"), factorize(what));
},
on(atom("done")) >> [=]()
{
become_void();
}
);
}
......@@ -111,15 +112,8 @@ struct fsm_chain_link : fsm_actor<fsm_chain_link>
(
on<atom("token"), int>() >> [=](int v)
{
send(next, atom("token"), v);
if (v == 0)
{
become_void();
}
},
on(atom("done")) >> [=]()
{
become_void();
next->enqueue(nullptr, std::move(last_received()));
if (v == 0) become_void();
}
);
}
......@@ -128,84 +122,80 @@ struct fsm_chain_link : fsm_actor<fsm_chain_link>
struct fsm_chain_master : fsm_actor<fsm_chain_master>
{
int iteration;
actor_ptr mc;
actor_ptr next;
actor_ptr worker;
behavior init_state;
int remainig_results;
void new_ring(int ring_size)
void new_ring(int ring_size, int initial_token_value)
{
send(worker, atom("calc"), s_task_n);
++remainig_results;
next = self;
for (int i = 1; i < ring_size; ++i)
{
next = spawn(new fsm_chain_link(next));
}
++remainig_results;
send(next, atom("token"), s_num_messages);
send(next, atom("token"), initial_token_value);
}
fsm_chain_master() : iteration(0)
fsm_chain_master(actor_ptr msgcollector) : iteration(0), mc(msgcollector)
{
remainig_results = 0;
worker = spawn(new fsm_worker);
worker = spawn(new fsm_worker(msgcollector));
init_state =
(
on<atom("init"), int, int>() >> [=](int ring_size, int repetitions)
on<atom("init"), int, int, int>() >> [=](int rs, int itv, int n)
{
iteration = 0;
new_ring(ring_size);
new_ring(rs, itv);
become
(
on<atom("token"), int>() >> [=](int v)
{
if (v == 0)
{
if (++iteration < repetitions)
if (++iteration < n)
{
new_ring(ring_size);
new_ring(rs, itv);
}
else
{
send(worker, atom(":Exit"),
exit_reason::user_defined);
if (remainig_results == 0)
{
send(worker, atom("done"));
send(mc, atom("masterdone"));
become_void();
}
}
else
{
become
(
on<atom("result"), factors>() >> [=](factors const& vec)
{
check_factors(vec);
--remainig_results;
if (remainig_results == 0)
{
send(worker, atom("done"));
become_void();
send(next, atom("token"), v - 1);
}
}
);
}
);
}
}
else
};
struct fsm_supervisor : fsm_actor<fsm_supervisor>
{
int left;
behavior init_state;
fsm_supervisor(int num_msgs) : left(num_msgs)
{
send(next, atom("token"), v - 1);
}
init_state =
(
on(atom("masterdone")) >> [=]()
{
if (--left == 0) become_void();
},
on<atom("result"), factors>() >> [=](factors const& vec)
{
check_factors(vec);
--remainig_results;
}
);
if (--left == 0) become_void();
}
);
}
};
void tst();
void chain_link(actor_ptr next)
{
......@@ -214,7 +204,7 @@ void chain_link(actor_ptr next)
(
on<atom("token"), int>() >> [&](int v)
{
send(next, atom("token"), v);
next->enqueue(nullptr, std::move(last_received()));
if (v == 0)
{
done = true;
......@@ -224,16 +214,17 @@ void chain_link(actor_ptr next)
.until([&]() { return done == true; });
}
void chain_master()
void chain_master(actor_ptr msgcollector)
{
auto worker = spawn([]()
auto worker = spawn([=]()
{
actor_ptr mc = msgcollector;
bool done = false;
do_receive
(
on<atom("calc"), uint64_t>() >> [](uint64_t what)
on<atom("calc"), uint64_t>() >> [&](uint64_t what)
{
reply(atom("result"), factorize(what));
send(mc, atom("result"), factorize(what));
},
on(atom("done")) >> [&]()
{
......@@ -242,62 +233,80 @@ void chain_master()
)
.until([&]() { return done == true; });
});
actor_ptr next;
int remaining_results = 0;
auto new_ring = [&](int ring_size)
auto new_ring = [&](int ring_size, int initial_token_value) -> actor_ptr
{
send(worker, atom("calc"), s_task_n);
next = self;
actor_ptr next = self;
for (int i = 1; i < ring_size; ++i)
{
next = spawn(chain_link, next);
}
send(next, atom("token"), s_num_messages);
++remaining_results;
send(next, atom("token"), initial_token_value);
return next;
};
receive
(
on<atom("init"), int, int>() >> [&](int ring_size, int repetitions)
on<atom("init"), int, int, int>() >> [&](int rs, int itv, int n)
{
int iteration = 0;
new_ring(ring_size);
auto next = new_ring(rs, itv);
do_receive
(
on<atom("token"), int>() >> [&](int v)
{
if (v == 0)
{
if (++iteration < repetitions)
if (++iteration < n)
{
new_ring(ring_size);
next = new_ring(rs, itv);
}
}
else
{
send(next, atom("token"), v - 1);
}
}
)
.until([&]() { return iteration == n; });
}
);
send(msgcollector, atom("masterdone"));
send(worker, atom("done"));
}
void supervisor(int num_msgs)
{
do_receive
(
on(atom("masterdone")) >> [&]()
{
--num_msgs;
},
on<atom("result"), factors>() >> [&](factors const& vec)
{
--remaining_results;
--num_msgs;
check_factors(vec);
}
)
.until([&]() { return iteration == repetitions
&& remaining_results == 0; });
}
);
send(worker, atom("done"));
.until([&]() { return num_msgs == 0; });
}
template<typename F>
void run_test(F&& spawn_impl, int ring_size, int repetitions)
void run_test(F&& spawn_impl,
int num_rings, int ring_size,
int initial_token_value, int repetitions)
{
std::vector<actor_ptr> masters; // of the universe
for (int i = 0; i < 10; ++i)
// each master sends one masterdone message and one
// factorization is calculated per repetition
//auto supermaster = spawn(supervisor, num_rings+repetitions);
for (int i = 0; i < num_rings; ++i)
{
masters.push_back(spawn_impl());
send(masters.back(), atom("init"), ring_size, repetitions);
send(masters.back(), atom("init"),
ring_size,
initial_token_value,
repetitions);
}
await_all_others_done();
}
......@@ -329,20 +338,25 @@ T rd(char const* cstr)
int main(int argc, char** argv)
{
announce<factors>();
if (argc == 4)
if (argc == 6)
{
int ring_size = rd<int>(argv[2]);
int repetitions = rd<int>(argv[3]);
int num_rings = rd<int>(argv[2]);
int ring_size = rd<int>(argv[3]);
int initial_token_value = rd<int>(argv[4]);
int repetitions = rd<int>(argv[5]);
int num_msgs = num_rings + (num_rings * repetitions);
if (strcmp(argv[1], "event-based") == 0)
{
run_test([]() { return spawn(new fsm_chain_master); },
ring_size, repetitions);
auto mc = spawn(new fsm_supervisor(num_msgs));
run_test([&]() { return spawn(new fsm_chain_master(mc)); },
num_rings, ring_size, initial_token_value, repetitions);
return 0;
}
else if (strcmp(argv[1], "stacked") == 0)
{
run_test([]() { return spawn(chain_master); },
ring_size, repetitions);
auto mc = spawn(supervisor, num_msgs);
run_test([&]() { return spawn(chain_master, mc); },
num_rings, ring_size, initial_token_value, repetitions);
return 0;
}
}
......
#!/bin/bash
for i in *.scala; do
if [[ $# -eq 0 ]] ; then
for i in *.scala; do
echo "scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar \"$i\""
scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar "$i"
done
done
elif [[ $# -eq 1 ]] ; then
echo "scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar \"$1.scala\""
scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar "$1.scala"
fi
echo done
......@@ -247,3 +247,4 @@ src/receive.cpp
benchmarks/actor_creation.cpp
benchmarks/mailbox_performance.cpp
benchmarks/mixed_case.cpp
cppa/util/default_deallocator.hpp
......@@ -68,26 +68,107 @@ class abstract_actor : public Base
std::vector<attachable_ptr> m_attachables;
protected:
public:
class queue_node_ptr;
struct queue_node_deallocator;
struct queue_node
{
friend class abstract_actor;
friend class queue_node_ptr;
friend struct queue_node_deallocator;
queue_node* next;
std::atomic<queue_node*>* owner;
actor_ptr sender;
any_tuple msg;
private: // you have to be a friend to create or destroy a node
inline ~queue_node() { }
queue_node() : next(nullptr), owner(nullptr) { }
queue_node(actor* from, any_tuple&& content)
: next(nullptr), sender(from), msg(std::move(content))
: next(nullptr), owner(nullptr), sender(from), msg(std::move(content))
{
}
queue_node(actor* from, any_tuple const& content)
: next(nullptr), sender(from), msg(content)
: next(nullptr), owner(nullptr), sender(from), msg(content)
{
}
};
struct queue_node_deallocator
{
inline void operator()(queue_node* ptr)
{
if (ptr)
{
if (ptr->owner != nullptr)
{
ptr->sender.reset();
ptr->msg = any_tuple();
auto owner = ptr->owner;
ptr->next = owner->load();
for (;;)
{
if (owner->compare_exchange_weak(ptr->next, ptr)) return;
}
}
else
{
delete ptr;
}
}
}
};
util::single_reader_queue<queue_node> m_mailbox;
class queue_node_ptr
{
queue_node* m_ptr;
queue_node_deallocator d;
public:
inline queue_node_ptr(queue_node* ptr = nullptr) : m_ptr(ptr)
{
}
inline queue_node_ptr(queue_node_ptr&& other) : m_ptr(other.m_ptr)
{
other.m_ptr = nullptr;
}
inline ~queue_node_ptr()
{
d(m_ptr);
}
inline queue_node* operator->() { return m_ptr; }
queue_node* release()
{
auto result = m_ptr;
m_ptr = nullptr;
return result;
}
inline void reset(queue_node* ptr = nullptr)
{
d(m_ptr);
m_ptr = ptr;
}
inline operator bool() const { return m_ptr != nullptr; }
};
protected:
queue_node m_prefetched_nodes[10];
std::atomic<queue_node*> m_prefetched;
util::single_reader_queue<queue_node,queue_node_deallocator> m_mailbox;
private:
......@@ -129,10 +210,35 @@ class abstract_actor : public Base
protected:
template<typename T>
queue_node* fetch_node(actor* sender, T&& msg)
{
queue_node* result = m_prefetched.load();
while (result)
{
queue_node* next = result->next;
if (m_prefetched.compare_exchange_weak(result, next))
{
result->next = nullptr;
result->sender.reset(sender);
result->msg = std::forward<T>(msg);
return result;
}
}
return new queue_node(sender, std::forward<T>(msg));
}
template<typename... Args>
abstract_actor(Args&&... args) : Base(std::forward<Args>(args)...)
, m_exit_reason(exit_reason::not_exited)
{
for (int i = 0; i < 9; ++i)
{
m_prefetched_nodes[i].next = &(m_prefetched_nodes[i+1]);
m_prefetched_nodes[i].owner = &m_prefetched;
}
m_prefetched_nodes[9].owner = &m_prefetched;
m_prefetched.store(m_prefetched_nodes);
}
void cleanup(std::uint32_t reason)
......
......@@ -79,13 +79,13 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
private:
void handle_message(std::unique_ptr<queue_node>& node,
void handle_message(queue_node_ptr& node,
invoke_rules& behavior);
void handle_message(std::unique_ptr<queue_node>& node,
void handle_message(queue_node_ptr& node,
timed_invoke_rules& behavior);
void handle_message(std::unique_ptr<queue_node>& node);
void handle_message(queue_node_ptr& node);
protected:
......
......@@ -65,7 +65,8 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
typedef abstract_actor super;
typedef super::queue_node queue_node;
typedef util::singly_linked_list<queue_node> queue_node_buffer;
typedef util::singly_linked_list<queue_node,super::queue_node_deallocator>
queue_node_buffer;
enum dq_result
{
......@@ -84,7 +85,7 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
filter_result filter_msg(any_tuple const& msg);
dq_result dq(std::unique_ptr<queue_node>& node,
dq_result dq(queue_node_ptr& node,
invoke_rules_base& rules,
queue_node_buffer& buffer);
......
......@@ -61,6 +61,7 @@ class converted_thread_context : public abstract_actor<local_actor>
typedef abstract_actor<local_actor> super;
typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
public:
......@@ -79,14 +80,15 @@ class converted_thread_context : public abstract_actor<local_actor>
void dequeue(timed_invoke_rules& rules) /*override*/;
inline util::single_reader_queue<queue_node>& mailbox()
inline decltype(m_mailbox)& mailbox()
{
return m_mailbox;
}
private:
typedef util::singly_linked_list<queue_node> queue_node_buffer;
typedef util::singly_linked_list<queue_node,super::queue_node_deallocator>
queue_node_buffer;
enum throw_on_exit_result
{
......@@ -95,7 +97,7 @@ class converted_thread_context : public abstract_actor<local_actor>
};
// returns true if node->msg was accepted by rules
bool dq(std::unique_ptr<queue_node>& node,
bool dq(queue_node_ptr& node,
invoke_rules_base& rules,
queue_node_buffer& buffer);
......
......@@ -49,7 +49,8 @@ class yielding_actor : public abstract_scheduled_actor
typedef abstract_scheduled_actor super;
typedef super::queue_node queue_node;
typedef util::singly_linked_list<queue_node> queue_node_buffer;
typedef super::queue_node_ptr queue_node_ptr;
typedef super::queue_node_buffer queue_node_buffer;
util::fiber m_fiber;
scheduled_actor* m_behavior;
......
#ifndef DEFAULT_DEALLOCATOR_HPP
#define DEFAULT_DEALLOCATOR_HPP
namespace cppa { namespace util {
template<typename T>
struct default_deallocator
{
inline void operator()(T* ptr) { delete ptr; }
};
} } // namespace cppa::detail
#endif // DEFAULT_DEALLOCATOR_HPP
......@@ -34,17 +34,19 @@
#include <atomic>
#include "cppa/detail/thread.hpp"
#include "cppa/util/default_deallocator.hpp"
namespace cppa { namespace util {
/**
* @brief An intrusive, thread safe queue implementation.
*/
template<typename T>
template<typename T, class Deallocator = default_deallocator<T> >
class single_reader_queue
{
typedef detail::unique_lock<detail::mutex> lock_type;
Deallocator d;
public:
......@@ -170,7 +172,8 @@ class single_reader_queue
{
element_type* tmp = e;
e = e->next;
delete tmp;
d(tmp);
//delete tmp;
}
}
......
......@@ -32,13 +32,15 @@
#define SINGLY_LINKED_LIST_HPP
#include <utility>
#include "cppa/util/default_deallocator.hpp"
namespace cppa { namespace util {
template<typename T>
template<typename T, class Deallocator = default_deallocator<T> >
class singly_linked_list
{
Deallocator d;
T* m_head;
T* m_tail;
......@@ -82,7 +84,7 @@ class singly_linked_list
while (m_head)
{
T* next = m_head->next;
delete m_head;
d(m_head);
m_head = next;
}
m_head = m_tail = nullptr;
......
......@@ -49,14 +49,14 @@ void abstract_event_based_actor::dequeue(timed_invoke_rules&)
quit(exit_reason::unallowed_function_call);
}
void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& node,
void abstract_event_based_actor::handle_message(queue_node_ptr& node,
invoke_rules& behavior)
{
// no need to handle result
(void) dq(node, behavior, m_buffer);
}
void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& node,
void abstract_event_based_actor::handle_message(queue_node_ptr& node,
timed_invoke_rules& behavior)
{
switch (dq(node, behavior, m_buffer))
......@@ -83,7 +83,7 @@ void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& nod
}
}
void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& node)
void abstract_event_based_actor::handle_message(queue_node_ptr& node)
{
auto& bhvr = m_loop_stack.top();
if (bhvr.is_left())
......@@ -107,7 +107,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
callback->exec_done();
};
std::unique_ptr<queue_node> node;
queue_node_ptr node;
for (;;)
//do
{
......
......@@ -91,12 +91,14 @@ void abstract_scheduled_actor::enqueue_node(queue_node* node)
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple&& msg)
{
enqueue_node(new queue_node(sender, std::move(msg)));
enqueue_node(fetch_node(sender, std::move(msg)));
//enqueue_node(new queue_node(sender, std::move(msg)));
}
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple const& msg)
{
enqueue_node(new queue_node(sender, msg));
enqueue_node(fetch_node(sender, msg));
//enqueue_node(new queue_node(sender, msg));
}
int abstract_scheduled_actor::compare_exchange_state(int expected,
......@@ -146,7 +148,7 @@ auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result
return ordinary_message;
}
auto abstract_scheduled_actor::dq(std::unique_ptr<queue_node>& node,
auto abstract_scheduled_actor::dq(queue_node_ptr& node,
invoke_rules_base& rules,
queue_node_buffer& buffer) -> dq_result
{
......
......@@ -61,18 +61,20 @@ void converted_thread_context::cleanup(std::uint32_t reason)
void converted_thread_context::enqueue(actor* sender, any_tuple&& msg)
{
m_mailbox.push_back(new queue_node(sender, std::move(msg)));
m_mailbox.push_back(fetch_node(sender, std::move(msg)));
//m_mailbox.push_back(new queue_node(sender, std::move(msg)));
}
void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
{
m_mailbox.push_back(new queue_node(sender, msg));
m_mailbox.push_back(fetch_node(sender, msg));
//m_mailbox.push_back(new queue_node(sender, msg));
}
void converted_thread_context::dequeue(invoke_rules& rules) /*override*/
{
queue_node_buffer buffer;
std::unique_ptr<queue_node> node(m_mailbox.pop());
queue_node_ptr node(m_mailbox.pop());
while (dq(node, rules, buffer) == false)
{
node.reset(m_mailbox.pop());
......@@ -84,7 +86,7 @@ void converted_thread_context::dequeue(timed_invoke_rules& rules) /*override*/
auto timeout = now();
timeout += rules.timeout();
queue_node_buffer buffer;
std::unique_ptr<queue_node> node(m_mailbox.try_pop());
queue_node_ptr node(m_mailbox.try_pop());
do
{
while (!node)
......@@ -119,7 +121,7 @@ converted_thread_context::throw_on_exit(const any_tuple& msg)
return not_an_exit_signal;
}
bool converted_thread_context::dq(std::unique_ptr<queue_node>& node,
bool converted_thread_context::dq(queue_node_ptr& node,
invoke_rules_base& rules,
queue_node_buffer& buffer)
{
......
......@@ -99,11 +99,13 @@ struct scheduler_helper
void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{
typedef abstract_actor<local_actor>::queue_node_ptr queue_node_ptr;
// setup & local variables
self.set(m_self.get());
auto& queue = m_self->mailbox();
std::multimap<decltype(detail::now()), decltype(queue.pop())> messages;
decltype(queue.pop()) msg_ptr = nullptr;
std::multimap<decltype(detail::now()), queue_node_ptr> messages;
queue_node_ptr msg_ptr;
//decltype(queue.pop()) msg_ptr = nullptr;
decltype(detail::now()) now;
bool done = false;
// message handling rules
......@@ -117,8 +119,6 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
timeout += d;
messages.insert(std::make_pair(std::move(timeout),
std::move(msg_ptr)));
// do not delete this msg_ptr (now)
msg_ptr = nullptr;
},
on<atom(":_DIE")>() >> [&]()
{
......@@ -128,11 +128,11 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
// loop
while (!done)
{
while (msg_ptr == nullptr)
while (!msg_ptr)
{
if (messages.empty())
{
msg_ptr = queue.pop();
msg_ptr.reset(queue.pop());
}
else
{
......@@ -141,7 +141,8 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
auto it = messages.begin();
while (it != messages.end() && (it->first) <= now)
{
auto ptr = it->second;
abstract_actor<local_actor>::queue_node_ptr ptr(std::move(it->second));
//auto ptr = it->second;
auto whom = const_cast<actor_ptr*>(
reinterpret_cast<actor_ptr const*>(
ptr->msg.at(1)));
......@@ -152,7 +153,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
}
messages.erase(it);
it = messages.begin();
delete ptr;
//delete ptr;
}
// wait for next message or next timeout
if (it != messages.end())
......@@ -162,7 +163,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
}
}
handle_msg(msg_ptr->msg);
delete msg_ptr;
//delete msg_ptr;
}
}
......
......@@ -163,11 +163,13 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
//size_t num_workers = std::max<size_t>(thread::hardware_concurrency(), 2);
// init with 2 threads per core but no less than 4
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 4);
auto new_worker = [&]()
size_t max_workers = num_workers * 4;
auto new_worker = [&]() -> worker*
{
worker_ptr wptr(new worker(&wqueue, jqueue));
wptr->start();
workers.push_back(std::move(wptr));
return workers.back().get();
};
for (size_t i = 0; i < num_workers; ++i)
{
......@@ -185,18 +187,29 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
}
else
{
// fetch next idle worker (wait up to 500ms)
//worker* w = nullptr;
//auto timeout = now();
//timeout += std::chrono::milliseconds(500);
/*while (!w)
/*
// fetch next idle worker
worker* w = nullptr;
if (num_workers < max_workers)
{
w = wqueue.try_pop();
if (!w)
{
// fetch next idle worker (wait up to 500ms)
timeout = now();
timeout += std::chrono::milliseconds(500);
w = wqueue.try_pop(timeout);
// all workers are blocked since 500ms, start a new one
if (!w)
{
new_worker();
w = new_worker();
++num_workers;
}
}
}
else
{
w = wqueue.pop();
}
*/
worker* w = wqueue.pop();
......
......@@ -95,7 +95,7 @@ void yielding_actor::dequeue(invoke_rules& rules)
{
queue_node_buffer buffer;
yield_until_not_empty();
std::unique_ptr<queue_node> node(m_mailbox.pop());
queue_node_ptr node(m_mailbox.pop());
while (dq(node, rules, buffer) != dq_done)
{
yield_until_not_empty();
......@@ -115,7 +115,7 @@ void yielding_actor::dequeue(timed_invoke_rules& rules)
// request_timeout(rules.timeout());
//}
yield_until_not_empty();
std::unique_ptr<queue_node> node(m_mailbox.pop());
queue_node_ptr node(m_mailbox.pop());
switch (dq(node, rules, buffer))
{
case dq_done:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment