Commit aa1db2b5 authored by neverlord's avatar neverlord

maintenance

parent 783cd02d
......@@ -35,80 +35,12 @@
#include "utility.hpp"
//#include "boost/threadpool.hpp"
#include "cppa/cppa.hpp"
#include "cppa/match.hpp"
#include "cppa/fsm_actor.hpp"
#include "cppa/detail/mock_scheduler.hpp"
#include "cppa/detail/yielding_actor.hpp"
/*
namespace cppa { namespace detail {
struct pool_job
{
abstract_event_based_actor* ptr;
pool_job(abstract_event_based_actor* mptr) : ptr(mptr) { }
void operator()()
{
struct handler : abstract_scheduled_actor::resume_callback
{
abstract_event_based_actor* job;
handler(abstract_event_based_actor* mjob) : job(mjob) { }
void exec_done()
{
if (!job->deref()) delete job;
dec_actor_count();
}
};
handler h{ptr};
ptr->resume(nullptr, &h);
}
};
class boost_threadpool_scheduler : public scheduler
{
boost::threadpool::thread_pool<pool_job> m_pool;
//boost::threadpool::pool m_pool;
public:
void start()
{
m_pool.size_controller().resize(std::max(num_cores(), 4));
}
void stop()
{
m_pool.wait();
}
void enqueue(abstract_scheduled_actor* what)
{
auto job = static_cast<abstract_event_based_actor*>(what);
boost::threadpool::schedule(m_pool, pool_job{job});
}
actor_ptr spawn(abstract_event_based_actor* what)
{
what->attach_to_scheduler(this);
inc_actor_count();
CPPA_MEMORY_BARRIER();
intrusive_ptr<abstract_event_based_actor> ctx(what);
ctx->ref();
return std::move(ctx);
}
actor_ptr spawn(scheduled_actor* bhvr, scheduling_hint)
{
return mock_scheduler::spawn(bhvr);
}
};
} } // namespace cppa::detail
*/
using std::cout;
using std::cerr;
using std::endl;
......@@ -191,7 +123,7 @@ struct fsm_chain_master : fsm_actor<fsm_chain_master>
{
init_state =
(
on<atom("init"), int, int, int>() >> [=](int rs, int itv, int n)
on(atom("init"), arg_match) >> [=](int rs, int itv, int n)
{
worker = spawn(new fsm_worker(msgcollector));
iteration = 0;
......@@ -287,7 +219,7 @@ void chain_master(actor_ptr msgcollector)
auto worker = spawn(worker_fun, msgcollector);
receive
(
on<atom("init"), int, int, int>() >> [&](int rs, int itv, int n)
on(atom("init"), arg_match) >> [&](int rs, int itv, int n)
{
int iteration = 0;
auto next = new_ring(self, rs);
......@@ -337,7 +269,7 @@ void supervisor(int num_msgs)
}
template<typename F>
void run_test(F&& spawn_impl,
void run_test(F spawn_impl,
int num_rings, int ring_size,
int initial_token_value, int repetitions)
{
......@@ -358,7 +290,7 @@ void run_test(F&& spawn_impl,
void usage()
{
cout << "usage: mailbox_performance [--boost_pool] "
cout << "usage: mailbox_performance "
"(stacked|event-based) (num rings) (ring size) "
"(initial token value) (repetitions)"
<< endl
......@@ -368,46 +300,50 @@ void usage()
enum mode_type { event_based, fiber_based };
option<int> _2i(std::string const& str)
{
char* endptr = nullptr;
int result = static_cast<int>(strtol(str.c_str(), &endptr, 10));
if (endptr == nullptr || *endptr != '\0')
{
return {};
}
return result;
}
int main(int argc, char** argv)
{
announce<factors>();
if (argc != 6) usage();
auto iter = argv;
++iter; // argv[0] (app name)
/*
if (argc == 7)
{
if (strcmp(*iter++, "--boost_pool") == 0)
cppa::set_scheduler(new cppa::detail::boost_threadpool_scheduler);
else usage();
}
*/
mode_type mode;
std::string mode_str = *iter++;
if (mode_str == "event-based") mode = event_based;
else if (mode_str == "stacked") mode = fiber_based;
else usage();
int num_rings = rd<int>(*iter++);
int ring_size = rd<int>(*iter++);
int initial_token_value = rd<int>(*iter++);
int repetitions = rd<int>(*iter++);
int num_msgs = num_rings + (num_rings * repetitions);
switch (mode)
// skip argv[0] (app name)
std::vector<std::string> args{argv + 1, argv + argc};
match(args)
(
on(val<std::string>, _2i, _2i, _2i, _2i) >> [](std::string const& mode,
int num_rings,
int ring_size,
int initial_token_value,
int repetitions)
{
case event_based:
int num_msgs = num_rings + (num_rings * repetitions);
if (mode == "event-based")
{
auto mc = spawn(new fsm_supervisor(num_msgs));
run_test([&]() { return spawn(new fsm_chain_master(mc)); },
num_rings, ring_size, initial_token_value, repetitions);
break;
}
case fiber_based:
else if (mode == "stacked")
{
auto mc = spawn(supervisor, num_msgs);
run_test([&]() { return spawn(chain_master, mc); },
num_rings, ring_size, initial_token_value, repetitions);
break;
}
else
{
usage();
}
},
others() >> usage
);
return 0;
}
......@@ -32,13 +32,17 @@
#define ACTOR_PROXY_CACHE_HPP
#include <string>
#include <limits>
#include <vector>
#include <functional>
#include "cppa/actor_proxy.hpp"
#include "cppa/process_information.hpp"
#include "cppa/util/shared_spinlock.hpp"
#include "cppa/detail/thread.hpp"
namespace cppa { namespace detail {
class actor_proxy_cache
......@@ -46,22 +50,52 @@ class actor_proxy_cache
public:
typedef std::tuple<std::uint32_t, // actor id
actor_proxy_ptr get(actor_id aid, std::uint32_t process_id,
process_information::node_id_type const& node_id);
// @returns true if pptr was successfully removed, false otherwise
bool erase(actor_proxy_ptr const& pptr);
template<typename Fun>
void erase_all(process_information::node_id_type const& nid,
std::uint32_t process_id,
Fun fun)
{
key_tuple lb{nid, process_id, std::numeric_limits<actor_id>::min()};
key_tuple ub{nid, process_id, std::numeric_limits<actor_id>::max()};
{
lock_guard<util::shared_spinlock> guard{m_lock};
auto e = m_entries.end();
auto first = m_entries.lower_bound(lb);
if (first != e)
{
auto last = m_entries.upper_bound(ub);
for (auto i = first; i != last; ++i)
{
fun(i->second);
}
m_entries.erase(first, last);
}
}
}
private:
typedef std::tuple<process_information::node_id_type, // node id
std::uint32_t, // process id
process_information::node_id_type> // node id
actor_id> // (remote) actor id
key_tuple;
private:
struct key_tuple_less
{
bool operator()(key_tuple const& lhs, key_tuple const& rhs) const;
};
util::shared_spinlock m_lock;
std::map<key_tuple, actor_proxy_ptr> m_entries;
std::map<key_tuple, actor_proxy_ptr, key_tuple_less> m_entries;
public:
actor_proxy_ptr get_impl(key_tuple const& key);
actor_proxy_ptr get(key_tuple const& key);
// @returns true if pptr was successfully removed, false otherwise
bool erase(actor_proxy_ptr const& pptr);
};
......
......@@ -31,9 +31,7 @@
#ifndef THREAD_HPP
#define THREAD_HPP
//#ifdef __APPLE__
#if 1
#ifdef __APPLE__
#include <boost/thread.hpp>
#include "cppa/util/duration.hpp"
......
......@@ -28,6 +28,8 @@
\******************************************************************************/
#include <cstring>
#include "cppa/atom.hpp"
#include "cppa/any_tuple.hpp"
......@@ -40,7 +42,7 @@
#include "cppa/detail/singleton_manager.hpp"
// thread_specific_ptr
#include <boost/thread/tss.hpp>
//#include <boost/thread/tss.hpp>
namespace {
......@@ -64,7 +66,15 @@ actor_proxy_cache& get_actor_proxy_cache()
return s_proxy_cache;
}
actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key)
actor_proxy_ptr actor_proxy_cache::get(actor_id aid,
std::uint32_t process_id,
process_information::node_id_type const& node_id)
{
key_tuple k{node_id, process_id, aid};
return get_impl(k);
}
actor_proxy_ptr actor_proxy_cache::get_impl(key_tuple const& key)
{
// lifetime scope of shared guard
{
......@@ -75,7 +85,7 @@ actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key)
return i->second;
}
}
actor_proxy_ptr result{new actor_proxy(std::get<0>(key), new process_information(std::get<1>(key), std::get<2>(key)))};
actor_proxy_ptr result{new actor_proxy(std::get<2>(key), new process_information(std::get<1>(key), std::get<0>(key)))};
// lifetime scope of exclusive guard
{
lock_guard<util::shared_spinlock> guard{m_lock};
......@@ -97,7 +107,7 @@ actor_proxy_ptr actor_proxy_cache::get(key_tuple const& key)
bool actor_proxy_cache::erase(actor_proxy_ptr const& pptr)
{
auto pinfo = pptr->parent_process_ptr();
key_tuple key(pptr->id(), pinfo->process_id(), pinfo->node_id());
key_tuple key(pinfo->node_id(), pinfo->process_id(), pptr->id());
{
lock_guard<util::shared_spinlock> guard{m_lock};
return m_entries.erase(key) > 0;
......@@ -105,4 +115,29 @@ bool actor_proxy_cache::erase(actor_proxy_ptr const& pptr)
return false;
}
bool actor_proxy_cache::key_tuple_less::operator()(key_tuple const& lhs,
key_tuple const& rhs) const
{
int cmp_res = strncmp(reinterpret_cast<char const*>(std::get<0>(lhs).data()),
reinterpret_cast<char const*>(std::get<0>(rhs).data()),
process_information::node_id_size);
if (cmp_res < 0)
{
return true;
}
else if (cmp_res == 0)
{
if (std::get<1>(lhs) < std::get<1>(rhs))
{
return true;
}
else if (std::get<1>(lhs) == std::get<1>(rhs))
{
return std::get<2>(lhs) < std::get<2>(rhs);
}
}
return false;
}
} } // namespace cppa::detail
......@@ -197,6 +197,25 @@ class po_peer : public po_socket_handler
~po_peer()
{
closesocket(m_socket);
if (m_peer)
{
// collect all children (proxies to actors of m_peer)
std::vector<actor_proxy_ptr> children;
children.reserve(20);
get_actor_proxy_cache().erase_all(m_peer->node_id(),
m_peer->process_id(),
[&](actor_proxy_ptr& pptr)
{
children.push_back(std::move(pptr));
});
// kill all proxies
for (actor_proxy_ptr& pptr: children)
{
pptr->enqueue(nullptr,
make_any_tuple(atom("KILL_PROXY"),
exit_reason::remote_link_unreachable));
}
}
}
inline native_socket_type get_socket() const { return m_socket; }
......
......@@ -100,8 +100,7 @@ struct thread_pool_scheduler::worker
{
return result;
}
# if 1
//# ifdef __APPLE__
# ifdef __APPLE__
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(1);
boost::this_thread::sleep(timeout);
......@@ -122,8 +121,7 @@ struct thread_pool_scheduler::worker
{
return result;
}
# if 1
//# ifdef __APPLE__
# ifdef __APPLE__
auto timeout = boost::get_system_time();
timeout += boost::posix_time::milliseconds(10);
boost::this_thread::sleep(timeout);
......@@ -156,9 +154,6 @@ struct thread_pool_scheduler::worker
handler h;
for (;;)
{
h.job = aggressive_polling();
while (!h.job) h.job = less_aggressive_polling();
/*
h.job = aggressive_polling();
if (!h.job)
{
......@@ -168,7 +163,6 @@ struct thread_pool_scheduler::worker
h.job = relaxed_polling();
}
}
*/
if (h.job == m_dummy)
{
// dummy of doom received ...
......
......@@ -208,11 +208,13 @@ actor_ptr remote_actor(const char* host, std::uint16_t port)
auto peer_pinf = new process_information(peer_pid, peer_node_id);
process_information_ptr pinfptr(peer_pinf);
auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
//auto key = std::make_tuple(remote_actor_id, pinfptr->process_id(), pinfptr->node_id());
detail::singleton_manager::get_network_manager()
->send_to_mailman(make_any_tuple(sockfd, pinfptr));
detail::post_office_add_peer(sockfd, pinfptr);
return detail::get_actor_proxy_cache().get(key);
return detail::get_actor_proxy_cache().get(remote_actor_id,
pinfptr->process_id(),
pinfptr->node_id());
//auto ptr = get_scheduler()->register_hidden_context();
}
......
......@@ -240,11 +240,18 @@ class actor_ptr_tinfo : public util::abstract_uniform_type_info<actor_ptr>
}
else
{
/*
actor_proxy_cache::key_tuple key;
std::get<0>(key) = get<std::uint32_t>(ptup[0]);
std::get<1>(key) = get<std::uint32_t>(ptup[1]);
node_id_from_string(nstr, std::get<2>(key));
ptrref = detail::get_actor_proxy_cache().get(key);
*/
process_information::node_id_type nid;
node_id_from_string(nstr, nid);
ptrref = detail::get_actor_proxy_cache().get(get<std::uint32_t>(ptup[0]),
get<std::uint32_t>(ptup[1]),
nid);
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment