Touch up comments on echo, add connect example

This commit is contained in:
Alex Crichton 2016-12-20 17:59:46 -08:00
parent 50f007a49b
commit 99078c5cc1
2 changed files with 209 additions and 24 deletions

119
examples/connect.rs Normal file
View File

@ -0,0 +1,119 @@
//! A simple example of hooking up stdin/stdout to a TCP stream.
//!
//! This example will connect to a server specified in the argument list and
//! then forward all data read on stdin to the server, printing out all data
//! received on stdout.
//!
//! Note that this is not currently optimized for performance, especially around
//! buffer management. Rather it's intended to show an example of working with a
//! client.
extern crate futures;
extern crate tokio_core;
use std::env;
use std::io::{self, Read, Write};
use std::net::SocketAddr;
use std::thread;
use futures::{Sink, Future, Stream};
use futures::sync::mpsc;
use tokio_core::reactor::Core;
use tokio_core::io::{Io, EasyBuf, Codec};
use tokio_core::net::TcpStream;
fn main() {
// Parse what address we're going to connect to
let addr = env::args().nth(1).unwrap_or_else(|| {
panic!("this program requires at least one argument")
});
let addr = addr.parse::<SocketAddr>().unwrap();
// Create the event loop and initiate the connection to the remote server
let mut core = Core::new().unwrap();
let handle = core.handle();
let tcp = TcpStream::connect(&addr, &handle);
// Right now Tokio doesn't support a handle to stdin running on the event
// loop, so we farm out that work to a separate thread. This thread will
// read data from stdin and then send it to the event loop over a standard
// futures channel.
let (stdin_tx, stdin_rx) = mpsc::channel(0);
thread::spawn(|| read_stdin(stdin_tx));
let stdin_rx = stdin_rx.map_err(|_| panic!()); // errors not possible on rx
// After the TCP connection has been established, we set up our client to
// start forwarding data.
//
// First we use the `Io::framed` method with a simple implementation of a
// `Codec` (listed below) that just ships bytes around. We then split that
// in two to work with the stream and sink separately.
//
// Half of the work we're going to do is to take all data we receive on
// stdin (`stdin_rx`) and send that along the TCP stream (`sink`). The
// second half is to take all the data we receive (`stream`) and then write
// that to stdout. Currently we just write to stdout in a synchronous
// fashion.
//
// Finally we set the client to terminate once either half of this work
// finishes. If we don't have any more data to read or we won't receive any
// more work from the remote then we can exit.
let mut stdout = io::stdout();
let client = tcp.and_then(|(sink, stream)| {
let (sink, stream) = stream.framed(Bytes).split();
let send_stdin = stdin_rx.forward(sink);
let write_stdout = stream.for_each(move |buf| {
stdout.write_all(buf.as_slice())
});
send_stdin.map(|_| ())
.select(write_stdout.map(|_| ()))
.then(|_| Ok(()))
});
// And now that we've got our client, we execute it in the event loop!
core.run(client).unwrap();
}
/// A simple `Codec` implementation that just ships bytes around.
///
/// This type is used for "framing" a TCP stream of bytes but it's really just a
/// convenient method for us to work with streams/sinks for now. This'll just
/// take any data read and interpret it as a "frame" and conversely just shove
/// data into the output location without looking at it.
struct Bytes;
impl Codec for Bytes {
type In = EasyBuf;
type Out = Vec<u8>;
fn decode(&mut self, buf: &mut EasyBuf) -> io::Result<Option<EasyBuf>> {
if buf.len() > 0 {
let len = buf.len();
Ok(Some(buf.drain_to(len)))
} else {
Ok(None)
}
}
fn encode(&mut self, data: Vec<u8>, buf: &mut Vec<u8>) -> io::Result<()> {
buf.extend(data);
Ok(())
}
}
// Our helper method which will read data from stdin and send it along the
// sender provided.
fn read_stdin(mut rx: mpsc::Sender<Vec<u8>>) {
let mut stdin = io::stdin();
loop {
let mut buf = vec![0; 1024];
let n = match stdin.read(&mut buf) {
Err(_) |
Ok(0) => break,
Ok(n) => n,
};
buf.truncate(n);
rx = rx.send(buf).wait().unwrap();
}
}

View File

@ -1,16 +1,22 @@
//! An echo server that just writes back everything that's written to it.
//! An "hello world" echo server with tokio-core
//!
//! If you're on unix you can test this out by in one terminal executing:
//! This server will create a TCP listener, accept connections in a loop, and
//! simply write back everything that's read off of each TCP connection. Each
//! TCP connection is processed concurrently with all other TCP connections, and
//! each connection will have its own buffer that it's reading in/out of.
//!
//! To see this server in action, you can run this in one terminal:
//!
//! cargo run --example echo
//!
//! and in another terminal you can run:
//!
//! nc -4 localhost 8080
//! cargo run --example connect 127.0.0.1:8080
//!
//! Each line you type in to the `nc` terminal should be echo'd back to you!
//! Each line you type in to the `connect` terminal should be echo'd back to
//! you! If you open up multiple terminals running the `connect` example you
//! should be able to see them all make progress simultaneously.
extern crate env_logger;
extern crate futures;
extern crate tokio_core;
@ -24,40 +30,100 @@ use tokio_core::net::TcpListener;
use tokio_core::reactor::Core;
fn main() {
env_logger::init().unwrap();
// Allow passing an address to listen on as the first argument of this
// program, but otherwise we'll just set up our TCP listener on
// 127.0.0.1:8080 for connections.
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server
let mut l = Core::new().unwrap();
let handle = l.handle();
// First up we'll create the event loop that's going to drive this server.
// This is done by creating an instance of the `Core` type, tokio-core's
// event loop. Most functions in tokio-core return an `io::Result`, and
// `Core::new` is no exception. For this example, though, we're mostly just
// ignoring errors, so we unwrap the return value.
//
// After the event loop is created we acquire a handle to it through the
// `handle` method. With this handle we'll then later be able to create I/O
// objects and spawn futures.
let mut core = Core::new().unwrap();
let handle = core.handle();
// Create a TCP listener which will listen for incoming connections
// Next up we create a TCP listener which will listen for incoming
// connections. This TCP listener is bound to the address we determined
// above and must be associated with an event loop, so we pass in a handle
// to our event loop. After the socket's created we inform that we're ready
// to go and start accepting connections.
let socket = TcpListener::bind(&addr, &handle).unwrap();
// Once we've got the TCP listener, inform that we have it
println!("Listening on: {}", addr);
// Pull out the stream of incoming connections and then for each new
// one spin up a new task copying data.
// Here we convert the `TcpListener` to a stream of incoming connections
// with the `incoming` method. We then define how to process each element in
// the stream with the `for_each` method.
//
// We use the `io::copy` future to copy all data from the
// reading half onto the writing half.
// This combinator, defined on the `Stream` trait, will allow us to define a
// computation to happen for all items on the stream (in this case TCP
// connections made to the server). The return value of the `for_each`
// method is itself a future representing processing the entire stream of
// connections, and ends up being our server.
let done = socket.incoming().for_each(move |(socket, addr)| {
// Once we're inside this closure this represents an accepted client
// from our server. The `socket` is the client connection and `addr` is
// the remote address of the client (similar to how the standard library
// operates).
//
// We just want to copy all data read from the socket back onto the
// socket itself (e.g. "echo"). We can use the standard `io::copy`
// combinator in the `tokio-core` crate to do precisely this!
//
// The `copy` function takes two arguments, where to read from and where
// to write to. We only have one argument, though, with `socket`.
// Luckily there's a method, `Io::split`, which will split an Read/Write
// stream into its two halves. This operation allows us to work with
// each stream independently, such as pass them as two arguments to the
// `copy` function.
//
// The `copy` function then returns a future, and this future will be
// resolved when the copying operation is complete, resolving to the
// amount of data that was copied.
let (reader, writer) = socket.split();
let amt = copy(reader, writer);
// Once all that is done we print out how much we wrote, and then
// critically we *spawn* this future which allows it to run
// concurrently with other connections.
let msg = amt.map(move |amt| {
println!("wrote {} bytes to {}", amt, addr)
}).map_err(|e| {
panic!("error: {}", e);
// After our copy operation is complete we just print out some helpful
// information.
let msg = amt.then(move |result| {
match result {
Ok(amt) => println!("wrote {} bytes to {}", amt, addr),
Err(e) => println!("error on {}: {}", addr, e),
}
Ok(())
});
// And this is where much of the magic of this server happens. We
// crucially want all clients to make progress concurrently, rather than
// blocking one on completion of another. To achieve this was use the
// `spawn` function on `Handle` to essentially execute some work in the
// background.
//
// This function will transfer ownership of the future (`msg` in this
// case) to the event loop that `handle` points to. The event loop will
// then drive the future to completion.
//
// Essentially here we're spawning a new task to run concurrently, which
// will allow all of our clients to be processed concurrently.
handle.spawn(msg);
Ok(())
});
l.run(done).unwrap();
// And finally now that we've define what our server is, we run it! We
// didn't actually do much I/O up to this point and this `Core::run` method
// is responsible for driving the entire server to completion.
//
// The `run` method will return the result of the future that it's running,
// but in our case the `done` future won't ever finish because a TCP
// listener is never done accepting clients. That basically just means that
// we're going to be running the server until it's killed (e.g. ctrl-c).
core.run(done).unwrap();
}