Commit 8bcc85aa authored by neverlord's avatar neverlord

prefetch optimization

parent d4fdf6a7
...@@ -178,6 +178,7 @@ nobase_library_include_HEADERS = \ ...@@ -178,6 +178,7 @@ nobase_library_include_HEADERS = \
cppa/util/compare_tuples.hpp \ cppa/util/compare_tuples.hpp \
cppa/util/concat_type_lists.hpp \ cppa/util/concat_type_lists.hpp \
cppa/util/conjunction.hpp \ cppa/util/conjunction.hpp \
cppa/util/default_deallocator.hpp \
cppa/util/disable_if.hpp \ cppa/util/disable_if.hpp \
cppa/util/disjunction.hpp \ cppa/util/disjunction.hpp \
cppa/util/duration.hpp \ cppa/util/duration.hpp \
......
This diff is collapsed.
#!/bin/bash #!/bin/bash
read -r cmd read -r cmd
export JAVA_OPTS="-Xmx4096M" export JAVA_OPTS="-Xmx4096M"
/usr/bin/time -p -f "%e" $cmd 2>&1 #| grep "^real" | grep -o -P "[0-9]*(\.[0-9]*)?" if [[ $(uname) == "Darwin" ]] ; then
/usr/bin/time -p $cmd 2>&1
else
/usr/bin/time -p -f "%e" $cmd 2>&1
fi
This diff is collapsed.
#!/bin/bash #!/bin/bash
for i in *.scala; do if [[ $# -eq 0 ]] ; then
echo "scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar \"$i\"" for i in *.scala; do
scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar "$i" echo "scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar \"$i\""
done scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar "$i"
done
elif [[ $# -eq 1 ]] ; then
echo "scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar \"$1.scala\""
scalac -cp ../../akka-microkernel-1.2/lib/akka/akka-actor-1.2.jar "$1.scala"
fi
echo done echo done
...@@ -247,3 +247,4 @@ src/receive.cpp ...@@ -247,3 +247,4 @@ src/receive.cpp
benchmarks/actor_creation.cpp benchmarks/actor_creation.cpp
benchmarks/mailbox_performance.cpp benchmarks/mailbox_performance.cpp
benchmarks/mixed_case.cpp benchmarks/mixed_case.cpp
cppa/util/default_deallocator.hpp
...@@ -68,26 +68,107 @@ class abstract_actor : public Base ...@@ -68,26 +68,107 @@ class abstract_actor : public Base
std::vector<attachable_ptr> m_attachables; std::vector<attachable_ptr> m_attachables;
protected: public:
class queue_node_ptr;
struct queue_node_deallocator;
struct queue_node struct queue_node
{ {
friend class abstract_actor;
friend class queue_node_ptr;
friend struct queue_node_deallocator;
queue_node* next; queue_node* next;
std::atomic<queue_node*>* owner;
actor_ptr sender; actor_ptr sender;
any_tuple msg; any_tuple msg;
private: // you have to be a friend to create or destroy a node
inline ~queue_node() { }
queue_node() : next(nullptr), owner(nullptr) { }
queue_node(actor* from, any_tuple&& content) queue_node(actor* from, any_tuple&& content)
: next(nullptr), sender(from), msg(std::move(content)) : next(nullptr), owner(nullptr), sender(from), msg(std::move(content))
{ {
} }
queue_node(actor* from, any_tuple const& content) queue_node(actor* from, any_tuple const& content)
: next(nullptr), sender(from), msg(content) : next(nullptr), owner(nullptr), sender(from), msg(content)
{ {
} }
}; };
util::single_reader_queue<queue_node> m_mailbox; struct queue_node_deallocator
{
inline void operator()(queue_node* ptr)
{
if (ptr)
{
if (ptr->owner != nullptr)
{
ptr->sender.reset();
ptr->msg = any_tuple();
auto owner = ptr->owner;
ptr->next = owner->load();
for (;;)
{
if (owner->compare_exchange_weak(ptr->next, ptr)) return;
}
}
else
{
delete ptr;
}
}
}
};
class queue_node_ptr
{
queue_node* m_ptr;
queue_node_deallocator d;
public:
inline queue_node_ptr(queue_node* ptr = nullptr) : m_ptr(ptr)
{
}
inline queue_node_ptr(queue_node_ptr&& other) : m_ptr(other.m_ptr)
{
other.m_ptr = nullptr;
}
inline ~queue_node_ptr()
{
d(m_ptr);
}
inline queue_node* operator->() { return m_ptr; }
queue_node* release()
{
auto result = m_ptr;
m_ptr = nullptr;
return result;
}
inline void reset(queue_node* ptr = nullptr)
{
d(m_ptr);
m_ptr = ptr;
}
inline operator bool() const { return m_ptr != nullptr; }
};
protected:
queue_node m_prefetched_nodes[10];
std::atomic<queue_node*> m_prefetched;
util::single_reader_queue<queue_node,queue_node_deallocator> m_mailbox;
private: private:
...@@ -129,10 +210,35 @@ class abstract_actor : public Base ...@@ -129,10 +210,35 @@ class abstract_actor : public Base
protected: protected:
template<typename T>
queue_node* fetch_node(actor* sender, T&& msg)
{
queue_node* result = m_prefetched.load();
while (result)
{
queue_node* next = result->next;
if (m_prefetched.compare_exchange_weak(result, next))
{
result->next = nullptr;
result->sender.reset(sender);
result->msg = std::forward<T>(msg);
return result;
}
}
return new queue_node(sender, std::forward<T>(msg));
}
template<typename... Args> template<typename... Args>
abstract_actor(Args&&... args) : Base(std::forward<Args>(args)...) abstract_actor(Args&&... args) : Base(std::forward<Args>(args)...)
, m_exit_reason(exit_reason::not_exited) , m_exit_reason(exit_reason::not_exited)
{ {
for (int i = 0; i < 9; ++i)
{
m_prefetched_nodes[i].next = &(m_prefetched_nodes[i+1]);
m_prefetched_nodes[i].owner = &m_prefetched;
}
m_prefetched_nodes[9].owner = &m_prefetched;
m_prefetched.store(m_prefetched_nodes);
} }
void cleanup(std::uint32_t reason) void cleanup(std::uint32_t reason)
......
...@@ -79,13 +79,13 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor ...@@ -79,13 +79,13 @@ class abstract_event_based_actor : public detail::abstract_scheduled_actor
private: private:
void handle_message(std::unique_ptr<queue_node>& node, void handle_message(queue_node_ptr& node,
invoke_rules& behavior); invoke_rules& behavior);
void handle_message(std::unique_ptr<queue_node>& node, void handle_message(queue_node_ptr& node,
timed_invoke_rules& behavior); timed_invoke_rules& behavior);
void handle_message(std::unique_ptr<queue_node>& node); void handle_message(queue_node_ptr& node);
protected: protected:
......
...@@ -65,7 +65,8 @@ class abstract_scheduled_actor : public abstract_actor<local_actor> ...@@ -65,7 +65,8 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
typedef abstract_actor super; typedef abstract_actor super;
typedef super::queue_node queue_node; typedef super::queue_node queue_node;
typedef util::singly_linked_list<queue_node> queue_node_buffer; typedef util::singly_linked_list<queue_node,super::queue_node_deallocator>
queue_node_buffer;
enum dq_result enum dq_result
{ {
...@@ -84,7 +85,7 @@ class abstract_scheduled_actor : public abstract_actor<local_actor> ...@@ -84,7 +85,7 @@ class abstract_scheduled_actor : public abstract_actor<local_actor>
filter_result filter_msg(any_tuple const& msg); filter_result filter_msg(any_tuple const& msg);
dq_result dq(std::unique_ptr<queue_node>& node, dq_result dq(queue_node_ptr& node,
invoke_rules_base& rules, invoke_rules_base& rules,
queue_node_buffer& buffer); queue_node_buffer& buffer);
......
...@@ -61,6 +61,7 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -61,6 +61,7 @@ class converted_thread_context : public abstract_actor<local_actor>
typedef abstract_actor<local_actor> super; typedef abstract_actor<local_actor> super;
typedef super::queue_node queue_node; typedef super::queue_node queue_node;
typedef super::queue_node_ptr queue_node_ptr;
public: public:
...@@ -79,14 +80,15 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -79,14 +80,15 @@ class converted_thread_context : public abstract_actor<local_actor>
void dequeue(timed_invoke_rules& rules) /*override*/; void dequeue(timed_invoke_rules& rules) /*override*/;
inline util::single_reader_queue<queue_node>& mailbox() inline decltype(m_mailbox)& mailbox()
{ {
return m_mailbox; return m_mailbox;
} }
private: private:
typedef util::singly_linked_list<queue_node> queue_node_buffer; typedef util::singly_linked_list<queue_node,super::queue_node_deallocator>
queue_node_buffer;
enum throw_on_exit_result enum throw_on_exit_result
{ {
...@@ -95,7 +97,7 @@ class converted_thread_context : public abstract_actor<local_actor> ...@@ -95,7 +97,7 @@ class converted_thread_context : public abstract_actor<local_actor>
}; };
// returns true if node->msg was accepted by rules // returns true if node->msg was accepted by rules
bool dq(std::unique_ptr<queue_node>& node, bool dq(queue_node_ptr& node,
invoke_rules_base& rules, invoke_rules_base& rules,
queue_node_buffer& buffer); queue_node_buffer& buffer);
......
...@@ -49,7 +49,8 @@ class yielding_actor : public abstract_scheduled_actor ...@@ -49,7 +49,8 @@ class yielding_actor : public abstract_scheduled_actor
typedef abstract_scheduled_actor super; typedef abstract_scheduled_actor super;
typedef super::queue_node queue_node; typedef super::queue_node queue_node;
typedef util::singly_linked_list<queue_node> queue_node_buffer; typedef super::queue_node_ptr queue_node_ptr;
typedef super::queue_node_buffer queue_node_buffer;
util::fiber m_fiber; util::fiber m_fiber;
scheduled_actor* m_behavior; scheduled_actor* m_behavior;
......
#ifndef DEFAULT_DEALLOCATOR_HPP
#define DEFAULT_DEALLOCATOR_HPP
namespace cppa { namespace util {
template<typename T>
struct default_deallocator
{
inline void operator()(T* ptr) { delete ptr; }
};
} } // namespace cppa::detail
#endif // DEFAULT_DEALLOCATOR_HPP
...@@ -34,17 +34,19 @@ ...@@ -34,17 +34,19 @@
#include <atomic> #include <atomic>
#include "cppa/detail/thread.hpp" #include "cppa/detail/thread.hpp"
#include "cppa/util/default_deallocator.hpp"
namespace cppa { namespace util { namespace cppa { namespace util {
/** /**
* @brief An intrusive, thread safe queue implementation. * @brief An intrusive, thread safe queue implementation.
*/ */
template<typename T> template<typename T, class Deallocator = default_deallocator<T> >
class single_reader_queue class single_reader_queue
{ {
typedef detail::unique_lock<detail::mutex> lock_type; typedef detail::unique_lock<detail::mutex> lock_type;
Deallocator d;
public: public:
...@@ -170,7 +172,8 @@ class single_reader_queue ...@@ -170,7 +172,8 @@ class single_reader_queue
{ {
element_type* tmp = e; element_type* tmp = e;
e = e->next; e = e->next;
delete tmp; d(tmp);
//delete tmp;
} }
} }
......
...@@ -32,13 +32,15 @@ ...@@ -32,13 +32,15 @@
#define SINGLY_LINKED_LIST_HPP #define SINGLY_LINKED_LIST_HPP
#include <utility> #include <utility>
#include "cppa/util/default_deallocator.hpp"
namespace cppa { namespace util { namespace cppa { namespace util {
template<typename T> template<typename T, class Deallocator = default_deallocator<T> >
class singly_linked_list class singly_linked_list
{ {
Deallocator d;
T* m_head; T* m_head;
T* m_tail; T* m_tail;
...@@ -82,7 +84,7 @@ class singly_linked_list ...@@ -82,7 +84,7 @@ class singly_linked_list
while (m_head) while (m_head)
{ {
T* next = m_head->next; T* next = m_head->next;
delete m_head; d(m_head);
m_head = next; m_head = next;
} }
m_head = m_tail = nullptr; m_head = m_tail = nullptr;
......
...@@ -49,14 +49,14 @@ void abstract_event_based_actor::dequeue(timed_invoke_rules&) ...@@ -49,14 +49,14 @@ void abstract_event_based_actor::dequeue(timed_invoke_rules&)
quit(exit_reason::unallowed_function_call); quit(exit_reason::unallowed_function_call);
} }
void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& node, void abstract_event_based_actor::handle_message(queue_node_ptr& node,
invoke_rules& behavior) invoke_rules& behavior)
{ {
// no need to handle result // no need to handle result
(void) dq(node, behavior, m_buffer); (void) dq(node, behavior, m_buffer);
} }
void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& node, void abstract_event_based_actor::handle_message(queue_node_ptr& node,
timed_invoke_rules& behavior) timed_invoke_rules& behavior)
{ {
switch (dq(node, behavior, m_buffer)) switch (dq(node, behavior, m_buffer))
...@@ -83,7 +83,7 @@ void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& nod ...@@ -83,7 +83,7 @@ void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& nod
} }
} }
void abstract_event_based_actor::handle_message(std::unique_ptr<queue_node>& node) void abstract_event_based_actor::handle_message(queue_node_ptr& node)
{ {
auto& bhvr = m_loop_stack.top(); auto& bhvr = m_loop_stack.top();
if (bhvr.is_left()) if (bhvr.is_left())
...@@ -107,7 +107,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback) ...@@ -107,7 +107,7 @@ void abstract_event_based_actor::resume(util::fiber*, resume_callback* callback)
callback->exec_done(); callback->exec_done();
}; };
std::unique_ptr<queue_node> node; queue_node_ptr node;
for (;;) for (;;)
//do //do
{ {
......
...@@ -91,12 +91,14 @@ void abstract_scheduled_actor::enqueue_node(queue_node* node) ...@@ -91,12 +91,14 @@ void abstract_scheduled_actor::enqueue_node(queue_node* node)
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple&& msg) void abstract_scheduled_actor::enqueue(actor* sender, any_tuple&& msg)
{ {
enqueue_node(new queue_node(sender, std::move(msg))); enqueue_node(fetch_node(sender, std::move(msg)));
//enqueue_node(new queue_node(sender, std::move(msg)));
} }
void abstract_scheduled_actor::enqueue(actor* sender, any_tuple const& msg) void abstract_scheduled_actor::enqueue(actor* sender, any_tuple const& msg)
{ {
enqueue_node(new queue_node(sender, msg)); enqueue_node(fetch_node(sender, msg));
//enqueue_node(new queue_node(sender, msg));
} }
int abstract_scheduled_actor::compare_exchange_state(int expected, int abstract_scheduled_actor::compare_exchange_state(int expected,
...@@ -146,7 +148,7 @@ auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result ...@@ -146,7 +148,7 @@ auto abstract_scheduled_actor::filter_msg(const any_tuple& msg) -> filter_result
return ordinary_message; return ordinary_message;
} }
auto abstract_scheduled_actor::dq(std::unique_ptr<queue_node>& node, auto abstract_scheduled_actor::dq(queue_node_ptr& node,
invoke_rules_base& rules, invoke_rules_base& rules,
queue_node_buffer& buffer) -> dq_result queue_node_buffer& buffer) -> dq_result
{ {
......
...@@ -61,18 +61,20 @@ void converted_thread_context::cleanup(std::uint32_t reason) ...@@ -61,18 +61,20 @@ void converted_thread_context::cleanup(std::uint32_t reason)
void converted_thread_context::enqueue(actor* sender, any_tuple&& msg) void converted_thread_context::enqueue(actor* sender, any_tuple&& msg)
{ {
m_mailbox.push_back(new queue_node(sender, std::move(msg))); m_mailbox.push_back(fetch_node(sender, std::move(msg)));
//m_mailbox.push_back(new queue_node(sender, std::move(msg)));
} }
void converted_thread_context::enqueue(actor* sender, const any_tuple& msg) void converted_thread_context::enqueue(actor* sender, const any_tuple& msg)
{ {
m_mailbox.push_back(new queue_node(sender, msg)); m_mailbox.push_back(fetch_node(sender, msg));
//m_mailbox.push_back(new queue_node(sender, msg));
} }
void converted_thread_context::dequeue(invoke_rules& rules) /*override*/ void converted_thread_context::dequeue(invoke_rules& rules) /*override*/
{ {
queue_node_buffer buffer; queue_node_buffer buffer;
std::unique_ptr<queue_node> node(m_mailbox.pop()); queue_node_ptr node(m_mailbox.pop());
while (dq(node, rules, buffer) == false) while (dq(node, rules, buffer) == false)
{ {
node.reset(m_mailbox.pop()); node.reset(m_mailbox.pop());
...@@ -84,7 +86,7 @@ void converted_thread_context::dequeue(timed_invoke_rules& rules) /*override*/ ...@@ -84,7 +86,7 @@ void converted_thread_context::dequeue(timed_invoke_rules& rules) /*override*/
auto timeout = now(); auto timeout = now();
timeout += rules.timeout(); timeout += rules.timeout();
queue_node_buffer buffer; queue_node_buffer buffer;
std::unique_ptr<queue_node> node(m_mailbox.try_pop()); queue_node_ptr node(m_mailbox.try_pop());
do do
{ {
while (!node) while (!node)
...@@ -119,7 +121,7 @@ converted_thread_context::throw_on_exit(const any_tuple& msg) ...@@ -119,7 +121,7 @@ converted_thread_context::throw_on_exit(const any_tuple& msg)
return not_an_exit_signal; return not_an_exit_signal;
} }
bool converted_thread_context::dq(std::unique_ptr<queue_node>& node, bool converted_thread_context::dq(queue_node_ptr& node,
invoke_rules_base& rules, invoke_rules_base& rules,
queue_node_buffer& buffer) queue_node_buffer& buffer)
{ {
......
...@@ -99,11 +99,13 @@ struct scheduler_helper ...@@ -99,11 +99,13 @@ struct scheduler_helper
void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{ {
typedef abstract_actor<local_actor>::queue_node_ptr queue_node_ptr;
// setup & local variables // setup & local variables
self.set(m_self.get()); self.set(m_self.get());
auto& queue = m_self->mailbox(); auto& queue = m_self->mailbox();
std::multimap<decltype(detail::now()), decltype(queue.pop())> messages; std::multimap<decltype(detail::now()), queue_node_ptr> messages;
decltype(queue.pop()) msg_ptr = nullptr; queue_node_ptr msg_ptr;
//decltype(queue.pop()) msg_ptr = nullptr;
decltype(detail::now()) now; decltype(detail::now()) now;
bool done = false; bool done = false;
// message handling rules // message handling rules
...@@ -117,8 +119,6 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -117,8 +119,6 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
timeout += d; timeout += d;
messages.insert(std::make_pair(std::move(timeout), messages.insert(std::make_pair(std::move(timeout),
std::move(msg_ptr))); std::move(msg_ptr)));
// do not delete this msg_ptr (now)
msg_ptr = nullptr;
}, },
on<atom(":_DIE")>() >> [&]() on<atom(":_DIE")>() >> [&]()
{ {
...@@ -128,11 +128,11 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -128,11 +128,11 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
// loop // loop
while (!done) while (!done)
{ {
while (msg_ptr == nullptr) while (!msg_ptr)
{ {
if (messages.empty()) if (messages.empty())
{ {
msg_ptr = queue.pop(); msg_ptr.reset(queue.pop());
} }
else else
{ {
...@@ -141,7 +141,8 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -141,7 +141,8 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
auto it = messages.begin(); auto it = messages.begin();
while (it != messages.end() && (it->first) <= now) while (it != messages.end() && (it->first) <= now)
{ {
auto ptr = it->second; abstract_actor<local_actor>::queue_node_ptr ptr(std::move(it->second));
//auto ptr = it->second;
auto whom = const_cast<actor_ptr*>( auto whom = const_cast<actor_ptr*>(
reinterpret_cast<actor_ptr const*>( reinterpret_cast<actor_ptr const*>(
ptr->msg.at(1))); ptr->msg.at(1)));
...@@ -152,7 +153,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -152,7 +153,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
} }
messages.erase(it); messages.erase(it);
it = messages.begin(); it = messages.begin();
delete ptr; //delete ptr;
} }
// wait for next message or next timeout // wait for next message or next timeout
if (it != messages.end()) if (it != messages.end())
...@@ -162,7 +163,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self) ...@@ -162,7 +163,7 @@ void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
} }
} }
handle_msg(msg_ptr->msg); handle_msg(msg_ptr->msg);
delete msg_ptr; //delete msg_ptr;
} }
} }
......
...@@ -163,11 +163,13 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue, ...@@ -163,11 +163,13 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
//size_t num_workers = std::max<size_t>(thread::hardware_concurrency(), 2); //size_t num_workers = std::max<size_t>(thread::hardware_concurrency(), 2);
// init with 2 threads per core but no less than 4 // init with 2 threads per core but no less than 4
size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 4); size_t num_workers = std::max<size_t>(thread::hardware_concurrency() * 2, 4);
auto new_worker = [&]() size_t max_workers = num_workers * 4;
auto new_worker = [&]() -> worker*
{ {
worker_ptr wptr(new worker(&wqueue, jqueue)); worker_ptr wptr(new worker(&wqueue, jqueue));
wptr->start(); wptr->start();
workers.push_back(std::move(wptr)); workers.push_back(std::move(wptr));
return workers.back().get();
}; };
for (size_t i = 0; i < num_workers; ++i) for (size_t i = 0; i < num_workers; ++i)
{ {
...@@ -185,19 +187,30 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue, ...@@ -185,19 +187,30 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
} }
else else
{ {
// fetch next idle worker (wait up to 500ms) /*
//worker* w = nullptr; // fetch next idle worker
//auto timeout = now(); worker* w = nullptr;
//timeout += std::chrono::milliseconds(500); if (num_workers < max_workers)
/*while (!w)
{ {
w = wqueue.try_pop(timeout); w = wqueue.try_pop();
// all workers are blocked since 500ms, start a new one
if (!w) if (!w)
{ {
new_worker(); // fetch next idle worker (wait up to 500ms)
timeout = now();
timeout += std::chrono::milliseconds(500);
w = wqueue.try_pop(timeout);
// all workers are blocked since 500ms, start a new one
if (!w)
{
w = new_worker();
++num_workers;
}
} }
} }
else
{
w = wqueue.pop();
}
*/ */
worker* w = wqueue.pop(); worker* w = wqueue.pop();
// lifetime scope of guard // lifetime scope of guard
......
...@@ -95,7 +95,7 @@ void yielding_actor::dequeue(invoke_rules& rules) ...@@ -95,7 +95,7 @@ void yielding_actor::dequeue(invoke_rules& rules)
{ {
queue_node_buffer buffer; queue_node_buffer buffer;
yield_until_not_empty(); yield_until_not_empty();
std::unique_ptr<queue_node> node(m_mailbox.pop()); queue_node_ptr node(m_mailbox.pop());
while (dq(node, rules, buffer) != dq_done) while (dq(node, rules, buffer) != dq_done)
{ {
yield_until_not_empty(); yield_until_not_empty();
...@@ -115,7 +115,7 @@ void yielding_actor::dequeue(timed_invoke_rules& rules) ...@@ -115,7 +115,7 @@ void yielding_actor::dequeue(timed_invoke_rules& rules)
// request_timeout(rules.timeout()); // request_timeout(rules.timeout());
//} //}
yield_until_not_empty(); yield_until_not_empty();
std::unique_ptr<queue_node> node(m_mailbox.pop()); queue_node_ptr node(m_mailbox.pop());
switch (dq(node, rules, buffer)) switch (dq(node, rules, buffer))
{ {
case dq_done: case dq_done:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment