Commit bbc45bc2 authored by Dominik Charousset's avatar Dominik Charousset

renamed {util::fiber => detail::cs_thread}

parent e7154b0b
......@@ -153,6 +153,7 @@ set(LIBCPPA_SRC
src/context_switching_resume.cpp
src/continuable.cpp
src/continue_helper.cpp
src/cs_thread.cpp
src/decorated_tuple.cpp
src/actor_proxy.cpp
src/peer.cpp
......@@ -165,7 +166,6 @@ set(LIBCPPA_SRC
src/exception.cpp
src/exit_reason.cpp
src/fd_util.cpp
src/fiber.cpp
src/functor_based_actor.cpp
src/functor_based_blocking_actor.cpp
src/get_root_uuid.cpp
......
......@@ -191,7 +191,7 @@ cppa/util/comparable.hpp
cppa/util/compare_tuples.hpp
cppa/util/dptr.hpp
cppa/util/duration.hpp
cppa/util/fiber.hpp
cppa/detail/cs_thread.hpp
cppa/util/get_mac_addresses.hpp
cppa/util/get_root_uuid.hpp
cppa/util/guard.hpp
......@@ -270,7 +270,7 @@ src/exception.cpp
src/exit_reason.cpp
src/factory.cpp
src/fd_util.cpp
src/fiber.cpp
src/cs_thread.cpp
src/functor_based_actor.cpp
src/functor_based_blocking_actor.cpp
src/get_mac_addresses.cpp
......
......@@ -31,42 +31,29 @@
#ifndef CPPA_FIBER_HPP
#define CPPA_FIBER_HPP
namespace cppa { namespace util {
namespace cppa { namespace detail {
struct fiber_impl;
struct cst_impl;
/**
* @brief A 'lightweight thread' supporting manual context switching.
*/
struct fiber {
// A cooperatively scheduled thread implementation
struct cs_thread {
/**
* @brief Queries whether libcppa was compiled without
* fiber support on this platform.
*/
// Queries whether libcppa was compiled without cs threads on this platform.
static const bool is_disabled_feature;
/**
* @brief Creates a new fiber that describes stores the context
* of the calling (kernel) thread.
*/
fiber();
// Creates a new cs_thread storing the context of the calling thread.
cs_thread();
/**
* @brief Creates a fiber that executes the given function @p func
* using the argument @p arg1.
*/
fiber(void (*func)(void*), void* arg1);
// Creates a cs_thread that executes @p func(arg1)
cs_thread(void (*func)(void*), void* arg1);
~fiber();
~cs_thread();
/**
* @brief Swaps the context from @p source to @p target.
*/
static void swap(fiber& source, fiber& target);
// Swaps the context from @p source to @p target.
static void swap(cs_thread& source, cs_thread& target);
// pimpl
fiber_impl* m_impl;
cst_impl* m_impl;
};
......
......@@ -4,7 +4,6 @@
#include <type_traits>
#include "cppa/logging.hpp"
#include "cppa/resumable.hpp"
#include "cppa/blocking_actor.hpp"
#include "cppa/mailbox_element.hpp"
......@@ -12,11 +11,7 @@
#include "cppa/util/duration.hpp"
namespace cppa {
namespace util {
struct fiber;
} // namespace util
} // namespace cppa
#include "cppa/detail/resumable.hpp"
namespace cppa {
namespace detail {
......
......@@ -32,12 +32,13 @@
#define CPPA_RESUMABLE_HPP
namespace cppa {
namespace detail {
namespace util {
struct fiber;
} // namespace util
struct cs_thread;
struct resumable {
class resumable {
public:
enum resume_result {
resume_later,
......@@ -50,10 +51,11 @@ struct resumable {
virtual ~resumable();
virtual resume_result resume(util::fiber*) = 0;
virtual resume_result resume(detail::cs_thread*) = 0;
};
} // namespace detail
} // namespace cppa
#endif // CPPA_RESUMABLE_HPP
......@@ -33,13 +33,16 @@
#include <thread>
#include "cppa/resumable.hpp"
#include "cppa/scheduler.hpp"
#include "cppa/util/producer_consumer_list.hpp"
#include "cppa/detail/resumable.hpp"
namespace cppa { namespace detail {
struct cs_thread;
class thread_pool_scheduler : public scheduler {
typedef scheduler super;
......@@ -47,7 +50,7 @@ class thread_pool_scheduler : public scheduler {
public:
struct dummy : resumable {
resume_result resume(util::fiber*) override;
resume_result resume(detail::cs_thread*) override;
};
struct worker;
......
......@@ -33,10 +33,10 @@
#include <string>
#include "cppa/util/fiber.hpp"
namespace cppa { namespace detail {
struct cs_thread;
enum class yield_state : int {
// yield() wasn't called yet
invalid,
......@@ -52,7 +52,7 @@ enum class yield_state : int {
void yield(yield_state);
// switches to @p what and returns to @p from after yield(...)
yield_state call(util::fiber* what, util::fiber* from);
yield_state call(detail::cs_thread* what, detail::cs_thread* from);
} } // namespace cppa::detail
......
......@@ -71,10 +71,6 @@ class scheduler;
class local_scheduler;
class sync_handle_helper;
namespace util {
struct fiber;
} // namespace util
/**
* @brief Base class for local running Actors.
* @extends abstract_actor
......
......@@ -31,11 +31,11 @@
#define CPPA_CONTEXT_SWITCHING_ACTOR_HPP
#include "cppa/config.hpp"
#include "cppa/resumable.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/mailbox_element.hpp"
#include "cppa/util/fiber.hpp"
#include "cppa/detail/cs_thread.hpp"
#include "cppa/detail/resumable.hpp"
#include "cppa/policy/resume_policy.hpp"
......@@ -56,25 +56,25 @@ class context_switching_resume {
public:
// required by util::fiber
// required by detail::cs_thread
static void trampoline(void* _this);
// Base must be a mailbox-based actor
template<class Base, class Derived>
struct mixin : Base, resumable {
struct mixin : Base, detail::resumable {
template<typename... Ts>
mixin(Ts&&... args)
: Base(std::forward<Ts>(args)...)
, m_fiber(context_switching_resume::trampoline,
, m_cs_thread(context_switching_resume::trampoline,
static_cast<blocking_actor*>(this)) { }
resumable::resume_result resume(util::fiber* from) override {
detail::resumable::resume_result resume(detail::cs_thread* from) override {
CPPA_REQUIRE(from != nullptr);
CPPA_PUSH_AID(this->id());
using namespace detail;
for (;;) {
switch (call(&m_fiber, from)) {
switch (call(&m_cs_thread, from)) {
case yield_state::done: {
return resumable::done;
}
......@@ -99,7 +99,7 @@ class context_switching_resume {
}
}
util::fiber m_fiber;
detail::cs_thread m_cs_thread;
};
......@@ -120,7 +120,7 @@ class context_switching_resume {
private:
// members
util::fiber m_fiber;
detail::cs_thread m_cs_thread;
};
......
......@@ -34,7 +34,6 @@
#include <atomic>
#include "cppa/any_tuple.hpp"
#include "cppa/resumable.hpp"
#include "cppa/actor_state.hpp"
#include "cppa/message_header.hpp"
......
......@@ -44,6 +44,8 @@
#include "cppa/policy/resume_policy.hpp"
#include "cppa/detail/cs_thread.hpp"
namespace cppa { namespace policy {
class event_based_resume {
......@@ -52,7 +54,7 @@ class event_based_resume {
// Base must be a mailbox-based actor
template<class Base, class Derived>
struct mixin : Base, resumable {
struct mixin : Base, detail::resumable {
template<typename... Ts>
mixin(Ts&&... args) : Base(std::forward<Ts>(args)...) { }
......@@ -61,7 +63,7 @@ class event_based_resume {
return static_cast<Derived*>(this);
}
resumable::resume_result resume(util::fiber*) override {
resumable::resume_result resume(detail::cs_thread*) override {
auto d = dptr();
CPPA_LOG_TRACE("id = " << d->id()
<< ", state = " << static_cast<int>(d->state()));
......@@ -169,7 +171,7 @@ class event_based_resume {
}
}
done_cb();
return resumable::done;
return detail::resumable::done;
}
};
......
......@@ -9,9 +9,9 @@
#include "cppa/policy/resume_policy.hpp"
namespace cppa {
namespace util {
struct fiber;
} // namespace util
namespace detail {
struct cs_thread;
} // namespace detail
} // namespace cppa
namespace cppa { namespace policy {
......@@ -29,7 +29,7 @@ class no_resume {
template<typename... Ts>
mixin(Ts&&... args) : Base(std::forward<Ts>(args)...) { }
inline resumable::resume_result resume(util::fiber*) {
inline detail::resumable::resume_result resume(detail::cs_thread*) {
auto done_cb = [=](std::uint32_t reason) {
this->planned_exit_reason(reason);
this->on_exit();
......@@ -45,7 +45,7 @@ class no_resume {
catch (...) {
done_cb(exit_reason::unhandled_exception);
}
return resumable::done;
return detail::resumable::done;
}
};
......
......@@ -41,7 +41,7 @@
#include "cppa/actor_state.hpp"
#include "cppa/exit_reason.hpp"
#include "cppa/util/fiber.hpp"
#include "cppa/detail/cs_thread.hpp"
#include "cppa/util/duration.hpp"
#include "cppa/util/scope_guard.hpp"
......@@ -116,10 +116,10 @@ class no_scheduling {
auto guard = util::make_scope_guard([is_hidden] {
if (!is_hidden) get_actor_registry()->dec_running();
});
util::fiber fself;
detail::cs_thread fself;
for (;;) {
mself->set_state(actor_state::ready);
if (mself->resume(&fself) == resumable::done) {
if (mself->resume(&fself) == detail::resumable::done) {
return;
}
// await new data before resuming actor
......
......@@ -31,12 +31,19 @@
#ifndef CPPA_RESUME_POLICY_HPP
#define CPPA_RESUME_POLICY_HPP
#include "cppa/resumable.hpp"
#include "cppa/detail/resumable.hpp"
// this header consists all type definitions needed to
// implement the resume_policy trait
namespace cppa { namespace util { class duration; struct fiber; } }
namespace cppa {
namespace util {
class duration;
} // namespace util
namespace detail {
struct cs_thread;
} // namespace detail
} // namespace cppa
namespace cppa {
namespace policy {
......@@ -57,7 +64,8 @@ class resume_policy {
* actor finishes execution.
*/
template<class Actor>
resumable::resume_result resume(Actor* self, util::fiber* from);
detail::resumable::resume_result resume(Actor* self,
detail::cs_thread* from);
/**
* @brief Waits unconditionally until the actor is ready to resume.
......
......@@ -42,7 +42,6 @@
#include "cppa/channel.hpp"
#include "cppa/any_tuple.hpp"
#include "cppa/cow_tuple.hpp"
#include "cppa/resumable.hpp"
#include "cppa/attachable.hpp"
#include "cppa/spawn_options.hpp"
#include "cppa/message_header.hpp"
......@@ -55,7 +54,11 @@ class event_based_actor;
class scheduled_actor;
class scheduler_helper;
typedef intrusive_ptr<scheduled_actor> scheduled_actor_ptr;
namespace detail { class singleton_manager; } // namespace detail
namespace detail {
class resumable;
class singleton_manager;
} // namespace detail
/**
* @brief This abstract class allows to create (spawn) new actors
......@@ -86,7 +89,7 @@ class scheduler {
actor printer() const;
virtual void enqueue(resumable*) = 0;
virtual void enqueue(detail::resumable*) = 0;
template<typename Duration, typename... Data>
void delayed_send(message_header hdr,
......
......@@ -41,7 +41,7 @@
#include "cppa/spawn_options.hpp"
#include "cppa/typed_event_based_actor.hpp"
#include "cppa/util/fiber.hpp"
#include "cppa/detail/cs_thread.hpp"
#include "cppa/util/type_traits.hpp"
#include "cppa/detail/proper_actor.hpp"
......@@ -65,10 +65,10 @@ intrusive_ptr<C> spawn_impl(BeforeLaunch before_launch_fun, Ts&&... args) {
CPPA_LOGF_TRACE("spawn " << detail::demangle<C>());
// runtime check wheter context_switching_resume can be used,
// i.e., add the detached flag if libcppa was compiled
// without fiber support when using the blocking API
// without cs_thread support when using the blocking API
if (has_blocking_api_flag(Os)
&& !has_detach_flag(Os)
&& util::fiber::is_disabled_feature) {
&& detail::cs_thread::is_disabled_feature) {
return spawn_impl<C, Os + detached>(before_launch_fun,
std::forward<Ts>(args)...);
}
......
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011-2013 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation; either version 2.1 of the License, *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#include <cstdint>
#include <stdexcept>
#include "cppa/detail/cs_thread.hpp"
namespace {
typedef void* vptr;
typedef void (*cst_fun)(vptr);
} // namespace <anonmyous>
#ifdef CPPA_DISABLE_CONTEXT_SWITCHING
namespace cppa { namespace detail {
cs_thread::cs_thread() : m_impl(nullptr) { }
cs_thread::cs_thread(cst_fun, vptr) : m_impl(nullptr) { }
cs_thread::~cs_thread() { }
void cs_thread::swap(cs_thread&, cs_thread&) {
throw std::logic_error("libcppa was compiled using "
"CPPA_DISABLE_CONTEXT_SWITCHING");
}
const bool cs_thread::is_disabled_feature = true;
} } // namespace cppa::detail
#else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
// optional valgrind include
#ifdef CPPA_ANNOTATE_VALGRIND
# include <valgrind/valgrind.h>
#endif
// boost includes
#include <boost/version.hpp>
#include <boost/context/all.hpp>
#if BOOST_VERSION >= 105300
# include <boost/coroutine/all.hpp>
#endif
namespace cppa { namespace detail {
void cst_trampoline(intptr_t iptr);
namespace {
#if CPPA_ANNOTATE_VALGRIND
typedef int vg_member;
inline void vg_register(vg_member& stack_id, vptr ptr1, vptr ptr2) {
stack_id = VALGRIND_STACK_REGISTER(ptr1, ptr2);
}
inline void vg_deregister(vg_member stack_id) {
VALGRIND_STACK_DEREGISTER(stack_id);
}
#else
struct vg_member { };
inline void vg_register(vg_member&, vptr, vptr) {/*NOP*/}
inline void vg_deregister(const vg_member&) { }
#endif
/* Interface dependent on Boost version:
*
* === namespace aliases ===
*
* ctxn:
* namespace of context library; alias for boost::context or boost::ctx
*
* === types ===
*
* context:
* execution context; either fcontext_t or fcontext_t*
*
* converted_context:
* additional member for converted_cs_thread;
* needed if context is defined as fcontext_t*
*
* ctx_stack_info:
* result of new_stack(), needed to delete stack properly in some versions
*
* stack_allocator:
* a stack allocator for cs_thread instances
*
* === functions ===
*
* void init_converted_context(converted_context&, context&)
*
* void ctx_switch(context&, context&, cst_impl*):
* implements the context switching from one cs_thread to another
*
* ctx_stack_info new_stack(context&, stack_allocator&, vg_member&):
* allocates a stack, prepares execution of context
* and (optionally) registers the new stack to valgrind
*
* void del_stack(stack_allocator&, ctx_stack_info, vg_member&):
* destroys the stack and (optionally) deregisters it from valgrind
*/
#if BOOST_VERSION == 105100
// === namespace aliases ===
namespace ctxn = boost::ctx;
// === types ===
typedef ctxn::fcontext_t context;
struct converted_context { };
typedef int ctx_stack_info;
typedef ctxn::stack_allocator stack_allocator;
// === functions ===
inline void init_converted_context(converted_context&, context&) {/*NOP*/}
inline void ctx_switch(context& from, context& to, cst_impl* ptr) {
ctxn::jump_fcontext(&from, &to, (intptr_t) ptr);
}
ctx_stack_info new_stack(context& ctx,
stack_allocator& alloc,
vg_member& vgm) {
size_t mss = ctxn::minimum_stacksize();
ctx.fc_stack.base = alloc.allocate(mss);
ctx.fc_stack.limit = reinterpret_cast<vptr>(
reinterpret_cast<intptr_t>(ctx.fc_stack.base) - mss);
ctxn::make_fcontext(&ctx, cst_trampoline);
vg_register(vgm,
ctx.fc_stack.base,
reinterpret_cast<vptr>(
reinterpret_cast<intptr_t>(ctx.fc_stack.base) - mss));
return 0; // dummy value
}
inline void del_stack(stack_allocator&, ctx_stack_info, vg_member& vgm) {
vg_deregister(vgm);
}
#elif BOOST_VERSION < 105400
// === namespace aliases ===
namespace ctxn = boost::context;
// === types ===
typedef ctxn::fcontext_t* context;
typedef ctxn::fcontext_t converted_context;
typedef int ctx_stack_info;
# if BOOST_VERSION < 105300
typedef ctxn::guarded_stack_allocator stack_allocator;
# else
typedef boost::coroutines::stack_allocator stack_allocator;
# endif
// === functions ===
inline void init_converted_context(converted_context& cctx, context& ctx) {
ctx = &cctx;
}
inline void ctx_switch(context& from, context& to, cst_impl* ptr) {
ctxn::jump_fcontext(from, to, (intptr_t) ptr);
}
ctx_stack_info new_stack(context& ctx,
stack_allocator& alloc,
vg_member& vgm) {
size_t mss = stack_allocator::minimum_stacksize();
ctx = ctxn::make_fcontext(alloc.allocate(mss), mss, cst_trampoline);
vg_register(vgm,
ctx->fc_stack.sp,
reinterpret_cast<vptr>(
reinterpret_cast<intptr_t>(ctx->fc_stack.sp) - mss));
return 0; // dummy value
}
inline void del_stack(stack_allocator&, ctx_stack_info, vg_member& vgm) {
vg_deregister(vgm);
}
#else // BOOST_VERSION >= 105400
// === namespace aliases ===
namespace ctxn = boost::context;
// === types ===
typedef ctxn::fcontext_t* context;
typedef ctxn::fcontext_t converted_context;
typedef boost::coroutines::stack_context ctx_stack_info;
typedef boost::coroutines::stack_allocator stack_allocator;
// === functions ===
inline void init_converted_context(converted_context& cctx, context& ctx) {
ctx = &cctx;
}
inline void ctx_switch(context& from, context& to, cst_impl* ptr) {
ctxn::jump_fcontext(from, to, (intptr_t) ptr);
}
ctx_stack_info new_stack(context& ctx,
stack_allocator& alloc,
vg_member& vgm) {
size_t mss = stack_allocator::minimum_stacksize();
ctx_stack_info sinf;
alloc.allocate(sinf, mss);
ctx = ctxn::make_fcontext(sinf.sp, sinf.size, cst_trampoline);
vg_register(vgm,
ctx->fc_stack.sp,
reinterpret_cast<vptr>(
reinterpret_cast<intptr_t>(ctx->fc_stack.sp) - mss));
return sinf;
}
inline void del_stack(stack_allocator& alloc,
ctx_stack_info sctx,
vg_member& vgm) {
vg_deregister(vgm);
alloc.deallocate(sctx);
}
#endif
} // namespace <anonymous>
// base class for cs_thread pimpls
struct cst_impl {
cst_impl() : m_ctx() { }
virtual ~cst_impl() { }
virtual void run() = 0;
inline void swap(cst_impl* to) {
ctx_switch(m_ctx, to->m_ctx, to);
}
context m_ctx;
};
// a cs_thread representing a thread ('converts' the thread to a cs_thread)
struct converted_cs_thread : cst_impl {
converted_cs_thread() {
init_converted_context(m_converted, m_ctx);
}
void run() override {
throw std::logic_error("converted_cs_thread::run called");
}
converted_context m_converted;
};
// a cs_thread executing a function
struct fun_cs_thread : cst_impl {
fun_cs_thread(cst_fun fun, vptr arg) : m_fun(fun), m_arg(arg) {
m_stack_info = new_stack(m_ctx, m_alloc, m_vgm);
}
~fun_cs_thread() {
del_stack(m_alloc, m_stack_info, m_vgm);
}
void run() override {
m_fun(m_arg);
}
cst_fun m_fun; // thread function
vptr m_arg; // argument for thread function invocation
stack_allocator m_alloc; // allocates memory for our stack
vg_member m_vgm; // valgrind meta informations (optionally)
ctx_stack_info m_stack_info; // needed to delete stack in destructor
};
void cst_trampoline(intptr_t iptr) {
auto ptr = (cst_impl*) iptr;
ptr->run();
}
cs_thread::cs_thread() : m_impl(new converted_cs_thread) { }
cs_thread::cs_thread(cst_fun f, vptr x) : m_impl(new fun_cs_thread(f, x)) { }
void cs_thread::swap(cs_thread& from, cs_thread& to) {
from.m_impl->swap(to.m_impl);
}
cs_thread::~cs_thread() {
delete m_impl;
}
const bool cs_thread::is_disabled_feature = false;
} } // namespace cppa::detail
#endif // CPPA_DISABLE_CONTEXT_SWITCHING
/******************************************************************************\
* ___ __ *
* /\_ \ __/\ \ *
* \//\ \ /\_\ \ \____ ___ _____ _____ __ *
* \ \ \ \/\ \ \ '__`\ /'___\/\ '__`\/\ '__`\ /'__`\ *
* \_\ \_\ \ \ \ \L\ \/\ \__/\ \ \L\ \ \ \L\ \/\ \L\.\_ *
* /\____\\ \_\ \_,__/\ \____\\ \ ,__/\ \ ,__/\ \__/.\_\ *
* \/____/ \/_/\/___/ \/____/ \ \ \/ \ \ \/ \/__/\/_/ *
* \ \_\ \ \_\ *
* \/_/ \/_/ *
* *
* Copyright (C) 2011-2013 *
* Dominik Charousset <dominik.charousset@haw-hamburg.de> *
* *
* This file is part of libcppa. *
* libcppa is free software: you can redistribute it and/or modify it under *
* the terms of the GNU Lesser General Public License as published by the *
* Free Software Foundation; either version 2.1 of the License, *
* or (at your option) any later version. *
* *
* libcppa is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with libcppa. If not, see <http://www.gnu.org/licenses/>. *
\******************************************************************************/
#include <cstdint>
#include <stdexcept>
#include "cppa/util/fiber.hpp"
#ifdef CPPA_DISABLE_CONTEXT_SWITCHING
namespace cppa { namespace util {
fiber::fiber() : m_impl(nullptr) { }
fiber::fiber(void (*)(void*), void*) : m_impl(nullptr) { }
fiber::~fiber() { }
void fiber::swap(fiber&, fiber&) {
throw std::logic_error("libcppa was compiled using "
"CPPA_DISABLE_CONTEXT_SWITCHING");
}
const bool fiber::is_disabled_feature = true;
} } // namespace cppa::util
#else // ifdef CPPA_DISABLE_CONTEXT_SWITCHING
#ifdef CPPA_ANNOTATE_VALGRIND
#include <valgrind/valgrind.h>
#endif
#include <boost/version.hpp>
#include <boost/context/all.hpp>
#if BOOST_VERSION >= 105300
#include <boost/coroutine/all.hpp>
#endif
namespace cppa { namespace util {
struct fiber_impl;
void fiber_trampoline(intptr_t iptr);
namespace {
#if CPPA_ANNOTATE_VALGRIND
typedef int vg_member;
inline void vg_register(vg_member& stack_id, void* ptr1, void* ptr2) {
stack_id = VALGRIND_STACK_REGISTER(ptr1, ptr2);
}
inline void vg_deregister(vg_member stack_id) {
VALGRIND_STACK_DEREGISTER(stack_id);
}
#else
struct vg_member { };
template<typename... Ts> inline void vg_register(const Ts&...) { }
inline void vg_deregister(const vg_member&) { }
#endif
#if BOOST_VERSION == 105100
namespace ctx = boost::ctx;
typedef ctx::fcontext_t fc_member;
typedef ctx::stack_allocator fc_allocator;
inline void fc_jump(fc_member& from, fc_member& to, fiber_impl* ptr) {
ctx::jump_fcontext(&from, &to, (intptr_t) ptr);
}
inline void fc_make(fc_member& storage, fc_allocator& alloc, vg_member& vgm) {
size_t mss = ctx::minimum_stacksize();
storage.fc_stack.base = alloc.allocate(mss);
storage.fc_stack.limit = reinterpret_cast<void*>(
reinterpret_cast<intptr_t>(storage.fc_stack.base) - mss);
ctx::make_fcontext(&storage, fiber_trampoline);
vg_register(vgm,
storage.fc_stack.base,
reinterpret_cast<void*>(
reinterpret_cast<intptr_t>(storage.fc_stack.base) - mss));
}
#elif BOOST_VERSION < 105400
namespace ctx = boost::context;
typedef ctx::fcontext_t* fc_member;
# if BOOST_VERSION < 105300
typedef ctx::guarded_stack_allocator fc_allocator;
# else
typedef boost::coroutines::stack_allocator fc_allocator;
# endif
inline void fc_jump(fc_member& from, fc_member& to, fiber_impl* ptr) {
ctx::jump_fcontext(from, to, (intptr_t) ptr);
}
inline void fc_make(fc_member& storage, fc_allocator& alloc, vg_member& vgm) {
size_t mss = fc_allocator::minimum_stacksize();
storage = ctx::make_fcontext(alloc.allocate(mss), mss, fiber_trampoline);
vg_register(vgm,
storage->fc_stack.sp,
reinterpret_cast<void*>(
reinterpret_cast<intptr_t>(storage->fc_stack.sp) - mss));
}
#else // BOOST_VERSION >= 105400
namespace ctx = boost::context;
typedef ctx::fcontext_t* fc_member;
typedef boost::coroutines::stack_context fc_make_result;
typedef boost::coroutines::stack_allocator fc_allocator;
inline void fc_jump(fc_member& from, fc_member& to, fiber_impl* ptr) {
ctx::jump_fcontext(from, to, (intptr_t) ptr);
}
inline fc_make_result fc_make(fc_member& storage, fc_allocator& alloc, vg_member& vgm) {
size_t mss = fc_allocator::minimum_stacksize();
fc_make_result sctx;
alloc.allocate(sctx, mss);
storage = ctx::make_fcontext(sctx.sp, sctx.size, fiber_trampoline);
vg_register(vgm,
storage->fc_stack.sp,
reinterpret_cast<void*>(
reinterpret_cast<intptr_t>(storage->fc_stack.sp) - mss));
return sctx;
}
inline void fc_destroy(fc_allocator& alloc, fc_make_result sctx) {
alloc.deallocate(sctx);
}
#endif
} // namespace <anonymous>
struct fiber_impl {
fiber_impl() : m_ctx() { }
virtual ~fiber_impl() { }
virtual void run() { }
void swap(fiber_impl* to) {
fc_jump(m_ctx, to->m_ctx, to);
}
fc_member m_ctx;
};
// a fiber representing a thread ('converts' the thread to a fiber)
struct converted_fiber : fiber_impl {
converted_fiber() {
# if BOOST_VERSION > 105100
m_ctx = &m_ctx_obj;
# endif
}
# if BOOST_VERSION > 105100
ctx::fcontext_t m_ctx_obj;
# endif
};
// a fiber executing a function
struct fun_fiber : fiber_impl {
fun_fiber(void (*fun)(void*), void* arg) : m_arg(arg), m_fun(fun) {
m_make_res = fc_make(m_ctx, m_alloc, m_vgm);
}
~fun_fiber() {
vg_deregister(m_vgm);
fc_destroy(m_alloc, m_make_res);
}
virtual void run() {
m_fun(m_arg);
}
void* m_arg;
void (*m_fun)(void*);
fc_make_result m_make_res;
fc_allocator m_alloc;
vg_member m_vgm;
};
void fiber_trampoline(intptr_t iptr) {
auto ptr = (fiber_impl*) iptr;
ptr->run();
}
fiber::fiber() : m_impl(new converted_fiber) { }
fiber::fiber(void (*f)(void*), void* arg) : m_impl(new fun_fiber(f, arg)) { }
void fiber::swap(fiber& from, fiber& to) {
from.m_impl->swap(to.m_impl);
}
fiber::~fiber() {
delete m_impl;
}
const bool fiber::is_disabled_feature = false;
} } // namespace cppa::util
#endif // CPPA_DISABLE_CONTEXT_SWITCHING
......@@ -28,10 +28,12 @@
\******************************************************************************/
#include "cppa/resumable.hpp"
#include "cppa/detail/resumable.hpp"
namespace cppa {
namespace detail {
resumable::~resumable() { }
} // namespace detail
} // namespace cppa
......@@ -37,7 +37,7 @@
#include "cppa/on.hpp"
#include "cppa/logging.hpp"
#include "cppa/util/fiber.hpp"
#include "cppa/detail/cs_thread.hpp"
#include "cppa/detail/actor_registry.hpp"
#include "cppa/detail/thread_pool_scheduler.hpp"
......@@ -47,7 +47,7 @@ using std::endl;
namespace cppa { namespace detail {
resumable::resume_result thread_pool_scheduler::dummy::resume(util::fiber*) {
resumable::resume_result thread_pool_scheduler::dummy::resume(detail::cs_thread*) {
throw std::logic_error("thread_pool_scheduler::dummy::resume");
}
......@@ -97,7 +97,7 @@ struct thread_pool_scheduler::worker {
void operator()() {
CPPA_LOG_TRACE("");
util::fiber fself;
detail::cs_thread fself;
job_ptr job = nullptr;
for (;;) {
aggressive(job) || moderate(job) || relaxed(job);
......
......@@ -30,6 +30,7 @@
#include <memory>
#include "cppa/detail/cs_thread.hpp"
#include "cppa/detail/yield_interface.hpp"
namespace {
......@@ -37,8 +38,8 @@ namespace {
using namespace cppa;
__thread detail::yield_state* t_ystate = nullptr;
__thread util::fiber* t_caller = nullptr;
__thread util::fiber* t_callee = nullptr;
__thread detail::cs_thread* t_caller = nullptr;
__thread detail::cs_thread* t_callee = nullptr;
constexpr const char* names_table[] = {
"yield_state::invalid",
......@@ -53,15 +54,15 @@ namespace cppa { namespace detail {
void yield(yield_state ystate) {
*t_ystate = ystate;
util::fiber::swap(*t_callee, *t_caller);
detail::cs_thread::swap(*t_callee, *t_caller);
}
yield_state call(util::fiber* what, util::fiber* from) {
yield_state call(detail::cs_thread* what, detail::cs_thread* from) {
yield_state result;
t_ystate = &result;
t_caller = from;
t_callee = what;
util::fiber::swap(*from, *what);
detail::cs_thread::swap(*from, *what);
return result;
}
......
......@@ -5,7 +5,7 @@
#include <type_traits>
#include "test.hpp"
#include "cppa/util/fiber.hpp"
#include "cppa/detail/cs_thread.hpp"
#include "cppa/detail/yield_interface.hpp"
using namespace cppa;
......@@ -19,7 +19,7 @@ struct pseudo_worker {
pseudo_worker() : m_count(0), m_blocked(true) { }
void operator()() {
void run() {
for (;;) {
if (m_blocked) {
yield(yield_state::blocked);
......@@ -33,17 +33,19 @@ struct pseudo_worker {
};
void coroutine(void* worker) { (*reinterpret_cast<pseudo_worker*>(worker))(); }
void coroutine(void* worker) {
reinterpret_cast<pseudo_worker*>(worker)->run();
}
int main() {
CPPA_TEST(test_yield_interface);
# ifdef CPPA_DISABLE_CONTEXT_SWITCHING
CPPA_PRINT("WARNING: context switching was explicitly disabled "
"using CPPA_DISABLE_CONTEXT_SWITCHING");
CPPA_PRINT("WARNING: context switching was explicitly "
"disabled by defining CPPA_DISABLE_CONTEXT_SWITCHING");
# else
fiber fself;
cs_thread fself;
pseudo_worker worker;
fiber fcoroutine(coroutine, &worker);
cs_thread fcoroutine(coroutine, &worker);
yield_state ys;
int i = 0;
do {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment