Commit 8d450870 authored by Dominik Charousset's avatar Dominik Charousset

Fix ASIO multiplexer

parent 709b793f
...@@ -40,6 +40,9 @@ namespace caf { ...@@ -40,6 +40,9 @@ namespace caf {
namespace io { namespace io {
namespace network { namespace network {
/// Low-level error code.
using error_code = boost::system::error_code;
/// Low-level backend for IO multiplexing. /// Low-level backend for IO multiplexing.
using io_service = boost::asio::io_service; using io_service = boost::asio::io_service;
...@@ -134,10 +137,12 @@ public: ...@@ -134,10 +137,12 @@ public:
using buffer_type = std::vector<char>; using buffer_type = std::vector<char>;
asio_stream(asio_multiplexer& ref) asio_stream(asio_multiplexer& ref)
: writing_(false), : reading_(false),
writing_(false),
ack_writes_(false), ack_writes_(false),
fd_(ref.service()), fd_(ref.service()),
backend_(ref) { backend_(ref),
rd_buf_ready_(false) {
configure_read(receive_policy::at_most(1024)); configure_read(receive_policy::at_most(1024));
} }
...@@ -157,9 +162,9 @@ public: ...@@ -157,9 +162,9 @@ public:
} }
/// Starts reading data from the socket, forwarding incoming data to `mgr`. /// Starts reading data from the socket, forwarding incoming data to `mgr`.
void start(const manager_ptr& mgr) { void start(stream_manager* mgr) {
CAF_ASSERT(mgr != nullptr); CAF_ASSERT(mgr != nullptr);
read_loop(mgr); activate(mgr);
} }
/// Configures how much data will be provided for the next `consume` callback. /// Configures how much data will be provided for the next `consume` callback.
...@@ -215,7 +220,7 @@ public: ...@@ -215,7 +220,7 @@ public:
void stop_reading() { void stop_reading() {
CAF_LOG_TRACE(""); CAF_LOG_TRACE("");
boost::system::error_code ec; // ignored error_code ec; // ignored
fd_.shutdown(boost::asio::ip::tcp::socket::shutdown_receive, ec); fd_.shutdown(boost::asio::ip::tcp::socket::shutdown_receive, ec);
} }
...@@ -223,13 +228,45 @@ public: ...@@ -223,13 +228,45 @@ public:
return backend_; return backend_;
} }
/// Activates the stream.
void activate(stream_manager* mgr) {
read_loop(mgr);
}
/// Stops activity of the stream.
void passivate() {
reading_ = false;
}
private: private:
void read_loop(const manager_ptr& mgr) { bool read_one(stream_manager* ptr, size_t num_bytes) {
auto cb = [=](const boost::system::error_code& ec, size_t read_bytes) { if (!reading_) {
// broker was passivated while async read was on its way
rd_buf_ready_ = true;
// make sure buf size matches read_bytes in case of async_read
if (rd_buf_.size() != num_bytes)
rd_buf_.resize(num_bytes);
return false;
}
if (ptr->consume(&backend(), rd_buf_.data(), num_bytes))
return reading_;
return false;
}
void read_loop(manager_ptr mgr) {
reading_ = true;
if (rd_buf_ready_) {
rd_buf_ready_ = false;
if (read_one(mgr.get(), rd_buf_.size()))
read_loop(std::move(mgr));
return;
}
auto cb = [=](const error_code& ec, size_t read_bytes) mutable {
CAF_LOG_TRACE(""); CAF_LOG_TRACE("");
if (!ec) { if (!ec) {
mgr->consume(&backend(), rd_buf_.data(), read_bytes); // bail out early in case broker passivated stream in the meantime
read_loop(mgr); if (read_one(mgr.get(), read_bytes))
read_loop(std::move(mgr));
} else { } else {
mgr->io_failure(&backend(), operation::read); mgr->io_failure(&backend(), operation::read);
} }
...@@ -243,9 +280,8 @@ private: ...@@ -243,9 +280,8 @@ private:
cb); cb);
break; break;
case receive_policy_flag::at_most: case receive_policy_flag::at_most:
if (rd_buf_.size() < rd_size_) { if (rd_buf_.size() < rd_size_)
rd_buf_.resize(rd_size_); rd_buf_.resize(rd_size_);
}
fd_.async_read_some(boost::asio::buffer(rd_buf_, rd_size_), cb); fd_.async_read_some(boost::asio::buffer(rd_buf_, rd_size_), cb);
break; break;
case receive_policy_flag::at_least: { case receive_policy_flag::at_least: {
...@@ -269,7 +305,7 @@ private: ...@@ -269,7 +305,7 @@ private:
wr_buf_.swap(wr_offline_buf_); wr_buf_.swap(wr_offline_buf_);
boost::asio::async_write( boost::asio::async_write(
fd_, boost::asio::buffer(wr_buf_), fd_, boost::asio::buffer(wr_buf_),
[=](const boost::system::error_code& ec, size_t nb) { [=](const error_code& ec, size_t nb) {
CAF_LOG_TRACE(""); CAF_LOG_TRACE("");
if (ec) { if (ec) {
CAF_LOG_DEBUG(CAF_ARG(ec.message())); CAF_LOG_DEBUG(CAF_ARG(ec.message()));
...@@ -284,18 +320,18 @@ private: ...@@ -284,18 +320,18 @@ private:
}); });
} }
void collect_data(const manager_ptr& mgr, size_t collected_bytes) { void collect_data(manager_ptr mgr, size_t collected_bytes) {
fd_.async_read_some(boost::asio::buffer(rd_buf_.data() + collected_bytes, fd_.async_read_some(boost::asio::buffer(rd_buf_.data() + collected_bytes,
rd_buf_.size() - collected_bytes), rd_buf_.size() - collected_bytes),
[=](const boost::system::error_code& ec, size_t nb) { [=](const error_code& ec, size_t nb) mutable {
CAF_LOG_TRACE(CAF_ARG(nb)); CAF_LOG_TRACE(CAF_ARG(nb));
if (!ec) { if (!ec) {
auto sum = collected_bytes + nb; auto sum = collected_bytes + nb;
if (sum >= rd_size_) { if (sum >= rd_size_) {
mgr->consume(&backend(), rd_buf_.data(), sum); if (read_one(mgr.get(), sum))
read_loop(mgr); read_loop(std::move(mgr));
} else { } else {
collect_data(mgr, sum); collect_data(std::move(mgr), sum);
} }
} else { } else {
mgr->io_failure(&backend(), operation::write); mgr->io_failure(&backend(), operation::write);
...@@ -303,6 +339,7 @@ private: ...@@ -303,6 +339,7 @@ private:
}); });
} }
bool reading_;
bool writing_; bool writing_;
bool ack_writes_; bool ack_writes_;
Socket fd_; Socket fd_;
...@@ -312,6 +349,7 @@ private: ...@@ -312,6 +349,7 @@ private:
buffer_type wr_buf_; buffer_type wr_buf_;
buffer_type wr_offline_buf_; buffer_type wr_offline_buf_;
asio_multiplexer& backend_; asio_multiplexer& backend_;
bool rd_buf_ready_;
}; };
/// An acceptor is responsible for accepting incoming connections. /// An acceptor is responsible for accepting incoming connections.
...@@ -328,8 +366,10 @@ public: ...@@ -328,8 +366,10 @@ public:
using manager_ptr = intrusive_ptr<manager_type>; using manager_ptr = intrusive_ptr<manager_type>;
asio_acceptor(asio_multiplexer& am, io_service& io) asio_acceptor(asio_multiplexer& am, io_service& io)
: backend_(am), : accepting_(false),
backend_(am),
accept_fd_(io), accept_fd_(io),
fd_valid_(false),
fd_(io) { fd_(io) {
// nop // nop
} }
...@@ -361,32 +401,62 @@ public: ...@@ -361,32 +401,62 @@ public:
/// Starts this acceptor, forwarding all incoming connections to /// Starts this acceptor, forwarding all incoming connections to
/// `manager`. The intrusive pointer will be released after the /// `manager`. The intrusive pointer will be released after the
/// acceptor has been closed or an IO error occured. /// acceptor has been closed or an IO error occured.
void start(const manager_ptr& mgr) { void start(manager_type* mgr) {
activate(mgr);
}
/// Starts the accept loop.
void activate(manager_type* mgr) {
accept_loop(mgr); accept_loop(mgr);
} }
/// Starts the accept loop.
void passivate() {
accepting_ = false;
}
/// Closes the network connection, thus stopping this acceptor. /// Closes the network connection, thus stopping this acceptor.
void stop() { void stop() {
accept_fd_.close(); accept_fd_.close();
} }
private: private:
void accept_loop(const manager_ptr& mgr) { bool accept_one(manager_type* mgr) {
accept_fd_.async_accept(fd_, [=](const boost::system::error_code& ec) { auto res = mgr->new_connection(); // moves fd_
// reset fd_ for next accept operation
fd_ = socket_type{accept_fd_.get_io_service()};
return res && accepting_;
}
void accept_loop(manager_ptr mgr) {
accepting_ = true;
// accept "cached" connection first
if (fd_valid_) {
fd_valid_ = false;
if (accept_one(mgr.get()))
accept_loop(std::move(mgr));
return;
}
accept_fd_.async_accept(fd_, [=](const error_code& ec) mutable {
CAF_LOG_TRACE(""); CAF_LOG_TRACE("");
if (!ec) { if (!ec) {
mgr->new_connection(); // probably moves fd_ // if broker has passivated this in the meantime, cache fd_ for later
// reset fd_ for next accept operation if (!accepting_) {
fd_ = socket_type{accept_fd_.get_io_service()}; fd_valid_ = true;
accept_loop(mgr); return;
}
if (accept_one(mgr.get()))
accept_loop(std::move(mgr));
} else { } else {
mgr->io_failure(&backend(), operation::read); mgr->io_failure(&backend(), operation::read);
} }
}); });
} }
bool accepting_;
asio_multiplexer& backend_; asio_multiplexer& backend_;
SocketAcceptor accept_fd_; SocketAcceptor accept_fd_;
bool fd_valid_;
socket_type fd_; socket_type fd_;
}; };
......
...@@ -168,6 +168,12 @@ connection_handle asio_multiplexer::add_tcp_scribe(abstract_broker* self, ...@@ -168,6 +168,12 @@ connection_handle asio_multiplexer::add_tcp_scribe(abstract_broker* self,
launched_ = true; launched_ = true;
stream_.start(this); stream_.start(this);
} }
void add_to_loop() override {
stream_.activate(this);
}
void remove_from_loop() override {
stream_.passivate();
}
private: private:
bool launched_; bool launched_;
asio_stream<Socket> stream_; asio_stream<Socket> stream_;
...@@ -263,6 +269,12 @@ asio_multiplexer::add_tcp_doorman(abstract_broker* self, ...@@ -263,6 +269,12 @@ asio_multiplexer::add_tcp_doorman(abstract_broker* self,
uint16_t port() const override { uint16_t port() const override {
return acceptor_.socket_handle().local_endpoint().port(); return acceptor_.socket_handle().local_endpoint().port();
} }
void add_to_loop() override {
acceptor_.activate(this);
}
void remove_from_loop() override {
acceptor_.passivate();
}
private: private:
network::asio_acceptor<asio_tcp_socket_acceptor> acceptor_; network::asio_acceptor<asio_tcp_socket_acceptor> acceptor_;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment