Commit 00525dd1 authored by neverlord's avatar neverlord

scheduler::future_send

parent a57456b7
......@@ -13,6 +13,8 @@ namespace cppa { namespace detail {
class blocking_message_queue_impl : public message_queue
{
public:
struct queue_node
{
queue_node* next;
......@@ -20,7 +22,11 @@ class blocking_message_queue_impl : public message_queue
queue_node(const message& from);
};
util::single_reader_queue<queue_node> m_queue;
typedef util::single_reader_queue<queue_node> queue_type;
private:
queue_type m_queue;
protected:
......@@ -44,6 +50,16 @@ class blocking_message_queue_impl : public message_queue
public:
inline queue_type& queue()
{
return m_queue;
}
inline const queue_type& queue() const
{
return m_queue;
}
virtual void enqueue(const message& msg) /*override*/;
};
......
......@@ -23,11 +23,11 @@ class converted_thread_context : public abstract_actor<context>
typedef abstract_actor<context> super;
public:
// mailbox implementation
blocking_message_queue m_mailbox;
public:
message_queue& mailbox() /*override*/;
const message_queue& mailbox() const /*override*/;
......
#ifndef SCHEDULER_HPP
#define SCHEDULER_HPP
#include <chrono>
#include <memory>
#include <cstdint>
#include "cppa/atom.hpp"
#include "cppa/tuple.hpp"
#include "cppa/actor.hpp"
#include "cppa/attachable.hpp"
#include "cppa/scheduling_hint.hpp"
#include "cppa/util/duration.hpp"
namespace cppa {
// forward declarations
class context;
class actor_behavior;
class scheduler_helper;
context* self();
/**
* @brief
*/
......@@ -21,6 +31,8 @@ class scheduler
scheduler_helper* m_helper;
channel* future_send_helper();
protected:
scheduler();
......@@ -63,6 +75,14 @@ class scheduler
*/
virtual void await_others_done();
template<typename Duration, typename... Data>
void future_send(actor_ptr whom, const Duration& d, const Data&... data)
{
static_assert(sizeof...(Data) > 0, "no message to send");
any_tuple tup = make_tuple(util::duration(d), data...);
future_send_helper()->enqueue(message(whom, whom, tup));
}
};
/**
......
......@@ -37,6 +37,12 @@ class single_reader_queue
return take_head();
}
element_type* try_pop(boost::system_time timeout)
{
return (timed_wait_for_data(timeout)) ? take_head() : nullptr;
}
/*
element_type* try_pop(unsigned long ms_timeout)
{
boost::system_time st = boost::get_system_time();
......@@ -47,11 +53,7 @@ class single_reader_queue
}
return nullptr;
}
//element_type* peek()
//{
// return (m_head || fetch_new_data()) ? m_head : nullptr;
//}
*/
/**
* @warning call only from the reader (owner)
......@@ -94,19 +96,9 @@ class single_reader_queue
for (;;)
{
new_element->next = e;
if (!e)
{
if (m_tail.compare_exchange_weak(e, new_element))
{
return true;
}
}
else
{
if (m_tail.compare_exchange_weak(e, new_element))
{
return false;
}
return (e == nullptr);
}
}
}
......
......@@ -5,8 +5,13 @@
#endif
#include <mutex>
#include <iostream>
#include "cppa/on.hpp"
#include "cppa/context.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/to_string.hpp"
#include "cppa/detail/actor_count.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/thread_pool_scheduler.hpp"
......@@ -14,6 +19,8 @@
namespace {
typedef std::uint32_t ui32;
std::atomic<cppa::scheduler*> m_instance;
/*
......@@ -39,19 +46,15 @@ s_cleanup_helper;
namespace cppa {
class scheduler_helper
struct scheduler_helper
{
cppa::intrusive_ptr<cppa::context> m_worker;
static void worker_loop(cppa::intrusive_ptr<cppa::context> m_self);
public:
typedef intrusive_ptr<detail::converted_thread_context> ptr_type;
scheduler_helper() : m_worker(new detail::converted_thread_context)
{
// do NOT increase actor count; worker is "invisible"
boost::thread(&scheduler_helper::worker_loop, m_worker).detach();
boost::thread(&scheduler_helper::time_emitter, m_worker).detach();
}
~scheduler_helper()
......@@ -59,11 +62,97 @@ class scheduler_helper
m_worker->enqueue(message(m_worker, m_worker, atom(":_DIE")));
}
//std::multimap<boost::system_time, cppa::any_tuple> m_messages;
ptr_type m_worker;
private:
static void time_emitter(ptr_type m_self);
};
void scheduler_helper::worker_loop(cppa::intrusive_ptr<cppa::context> m_self)
void scheduler_helper::time_emitter(scheduler_helper::ptr_type m_self)
{
// setup & local variables
set_self(m_self.get());
auto& queue = m_self->m_mailbox.queue();
typedef std::pair<cppa::actor_ptr, cppa::any_tuple> future_msg;
std::multimap<boost::system_time, future_msg> messages;
decltype(queue.pop()) msg_ptr = nullptr;
boost::system_time now;
bool done = false;
// message handling rules
auto rules =
(
on<util::duration, any_type*>() >> [&](util::duration d)
{
any_tuple tup = msg_ptr->msg.content().tail(1);
if (!tup.empty())
{
// calculate timeout
boost::system_time timeout = boost::get_system_time();
switch (d.unit)
{
case util::time_unit::seconds:
timeout += boost::posix_time::seconds(d.count);
break;
case util::time_unit::milliseconds:
timeout += boost::posix_time::milliseconds(d.count);
break;
case util::time_unit::microseconds:
timeout += boost::posix_time::microseconds(d.count);
break;
default:
// unsupported duration type
return;
}
future_msg fmsg(msg_ptr->msg.sender(), tup);
messages.insert(std::make_pair(std::move(timeout),
std::move(fmsg)));
}
},
on<atom(":_DIE")>() >> [&]()
{
done = true;
}
);
// loop
while (!done)
{
while (msg_ptr == nullptr)
{
if (messages.empty())
{
msg_ptr = queue.pop();
}
else
{
now = boost::get_system_time();
// handle timeouts (send messages)
auto it = messages.begin();
while (it != messages.end() && (it->first) <= now)
{
auto& whom = (it->second).first;
auto& what = (it->second).second;
whom->enqueue(message(whom, whom, what));
messages.erase(it);
it = messages.begin();
}
// wait for next message or next timeout
if (it != messages.end())
{
msg_ptr = queue.try_pop(it->first);
}
}
}
rules(msg_ptr->msg.content());
delete msg_ptr;
msg_ptr = nullptr;
}
}
scheduler::scheduler() : m_helper(new scheduler_helper)
......@@ -75,6 +164,11 @@ scheduler::~scheduler()
delete m_helper;
}
channel* scheduler::future_send_helper()
{
return m_helper->m_worker.get();
}
void scheduler::await_others_done()
{
detail::actor_count::get().wait_until((unchecked_self() == nullptr) ? 0 : 1);
......
......@@ -109,6 +109,7 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
{
workers.push_back(worker_ptr(new worker(&wqueue, jqueue)));
}
boost::system_time timeout;
bool done = false;
// loop
do
......@@ -123,9 +124,11 @@ void thread_pool_scheduler::supervisor_loop(job_queue* jqueue,
{
// fetch waiting worker (wait up to 500ms)
worker* w = nullptr;
timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(500);
while (!w)
{
w = wqueue.try_pop(500);
w = wqueue.try_pop(timeout);
// all workers are blocked since 500ms, start a new one
if (!w)
{
......
#include <chrono>
#include <iostream>
#include <functional>
......@@ -7,6 +8,7 @@
#include "cppa/on.hpp"
#include "cppa/cppa.hpp"
#include "cppa/actor.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/to_string.hpp"
#include "cppa/exit_reason.hpp"
......@@ -29,18 +31,27 @@ size_t test__spawn()
monitor(pong_actor);
link(pong_actor);
int i = 0;
int flags = 0;
get_scheduler()->future_send(self(), std::chrono::seconds(1),
atom("FooBar"));
// wait for :Down and :Exit messages of pong
receive_while([&i]() { return ++i <= 2; })
receive_while([&i]() { return ++i <= 3; })
(
on<atom(":Exit"), std::uint32_t>() >> [&](std::uint32_t reason)
{
CPPA_CHECK_EQUAL(reason, exit_reason::user_defined);
CPPA_CHECK_EQUAL(last_received().sender(), pong_actor);
flags |= 0x01;
},
on<atom(":Down"), std::uint32_t>() >> [&](std::uint32_t reason)
{
CPPA_CHECK_EQUAL(reason, exit_reason::user_defined);
CPPA_CHECK_EQUAL(last_received().sender(), pong_actor);
flags |= 0x02;
},
on<atom("FooBar")>() >> [&]()
{
flags |= 0x04;
},
others() >> [&]()
{
......@@ -49,6 +60,7 @@ size_t test__spawn()
);
// wait for termination of all spawned actors
await_all_others_done();
CPPA_CHECK_EQUAL(flags, 0x07);
// mailbox has to be empty
message msg;
while (try_receive(msg))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment