mirror of
https://github.com/tokio-rs/tokio.git
synced 2025-10-01 12:20:39 +00:00
Improve latency benchmarks.
This commit is contained in:
parent
307ba7a867
commit
7ae124077e
@ -6,8 +6,6 @@ extern crate futures;
|
||||
extern crate tokio_core;
|
||||
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use futures::{Future, Poll};
|
||||
@ -16,20 +14,20 @@ use tokio_core::reactor::Core;
|
||||
|
||||
use test::Bencher;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::stream::Stream;
|
||||
use futures::{oneshot, Oneshot};
|
||||
|
||||
/// UDP echo server
|
||||
struct EchoServer {
|
||||
socket : UdpSocket,
|
||||
buf : Vec<u8>,
|
||||
to_send : Option<(usize, SocketAddr)>,
|
||||
stop : Arc<AtomicBool>,
|
||||
stop : Oneshot<()>,
|
||||
}
|
||||
|
||||
impl EchoServer {
|
||||
fn new(s : UdpSocket, stop : Arc<AtomicBool>) -> Self {
|
||||
fn new(s : UdpSocket, stop : Oneshot<()>) -> Self {
|
||||
EchoServer {
|
||||
socket: s,
|
||||
to_send: None,
|
||||
@ -45,19 +43,14 @@ impl Future for EchoServer {
|
||||
|
||||
fn poll(&mut self) -> Poll<(), io::Error> {
|
||||
loop {
|
||||
if self.stop.load(Ordering::SeqCst) {
|
||||
if self.stop.poll() != Ok(futures::Async::NotReady) {
|
||||
return Ok(futures::Async::Ready(()))
|
||||
}
|
||||
|
||||
if let Some((size, peer)) = self.to_send.take() {
|
||||
match self.socket.send_to(&self.buf[..size], &peer) {
|
||||
Err(e) => {
|
||||
self.to_send = Some((size, peer));
|
||||
return try_nb!(Err(e));
|
||||
},
|
||||
Ok(_) => { }
|
||||
}
|
||||
if let Some(&(size, peer)) = self.to_send.as_ref() {
|
||||
let _ = try_nb!(self.socket.send_to(&self.buf[..size], &peer));
|
||||
}
|
||||
self.to_send = None;
|
||||
|
||||
self.to_send = Some(
|
||||
try_nb!(self.socket.recv_from(&mut self.buf))
|
||||
@ -67,20 +60,20 @@ impl Future for EchoServer {
|
||||
}
|
||||
|
||||
/// UDP echo server
|
||||
///
|
||||
/// TODO: This may be replace-able with the newly-minted Sink::send_all function in the futures crate
|
||||
struct ChanServer {
|
||||
rx : tokio_core::channel::Receiver<usize>,
|
||||
tx : tokio_core::channel::Sender<usize>,
|
||||
buf : Option<usize>,
|
||||
stop : Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl ChanServer {
|
||||
fn new(tx : tokio_core::channel::Sender<usize>, rx : tokio_core::channel::Receiver<usize>, stop : Arc<AtomicBool>) -> Self {
|
||||
fn new(tx : tokio_core::channel::Sender<usize>, rx : tokio_core::channel::Receiver<usize>) -> Self {
|
||||
ChanServer {
|
||||
rx: rx,
|
||||
tx: tx,
|
||||
buf: None,
|
||||
stop: stop,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -91,10 +84,6 @@ impl Future for ChanServer {
|
||||
|
||||
fn poll(&mut self) -> Poll<(), io::Error> {
|
||||
loop {
|
||||
if self.stop.load(Ordering::SeqCst) {
|
||||
return Ok(futures::Async::Ready(()))
|
||||
}
|
||||
|
||||
if let Some(u) = self.buf.take() {
|
||||
match self.tx.send(u) {
|
||||
Err(e) => {
|
||||
@ -114,15 +103,17 @@ impl Future for ChanServer {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn udp_echo_latency(b: &mut Bencher) {
|
||||
let server_addr= "127.0.0.1:7398".to_string();
|
||||
let server_addr= "127.0.0.1:0".to_string();
|
||||
let server_addr = server_addr.parse::<SocketAddr>().unwrap();
|
||||
let client_addr= "127.0.0.1:7399".to_string();
|
||||
let client_addr= "127.0.0.1:0".to_string();
|
||||
let client_addr = client_addr.parse::<SocketAddr>().unwrap();
|
||||
|
||||
let stop = Arc::new(AtomicBool::new(false));
|
||||
let stop2 = stop.clone();
|
||||
let (stop_c, stop_p) = oneshot::<()>();
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
let child = thread::spawn(move || {
|
||||
let mut l = Core::new().unwrap();
|
||||
@ -130,24 +121,23 @@ fn udp_echo_latency(b: &mut Bencher) {
|
||||
|
||||
let socket = tokio_core::net::UdpSocket::bind(&server_addr, &handle).unwrap();
|
||||
|
||||
let server = EchoServer::new(socket, stop);
|
||||
tx.send(socket.local_addr().unwrap()).unwrap();
|
||||
let server = EchoServer::new(socket, stop_p);
|
||||
|
||||
l.run(server).unwrap();
|
||||
});
|
||||
|
||||
// TODO: More reliable way to bind server socket and start server
|
||||
// first
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
|
||||
let client = std::net::UdpSocket::bind(client_addr).unwrap();
|
||||
|
||||
let server_addr = rx.recv().unwrap();
|
||||
let mut buf = [0u8; 1000];
|
||||
|
||||
// warmup phase; for some reason initial couple of
|
||||
// rounds is much slower
|
||||
// runs are much slower
|
||||
//
|
||||
// TODO: Describe the exact reasons; caching? branch predictor? lazy closures?
|
||||
for _ in 0..1000 {
|
||||
for _ in 0..8 {
|
||||
client.send_to(&buf, &server_addr).unwrap();
|
||||
let _ = client.recv_from(&mut buf).unwrap();
|
||||
}
|
||||
@ -157,21 +147,14 @@ fn udp_echo_latency(b: &mut Bencher) {
|
||||
let _ = client.recv_from(&mut buf).unwrap();
|
||||
});
|
||||
|
||||
// Stop the server; TODO: Use better method
|
||||
stop2.store(true, Ordering::SeqCst);
|
||||
thread::sleep(Duration::from_millis(1));
|
||||
client.send_to(&buf, &server_addr).unwrap();
|
||||
stop_c.complete(());
|
||||
|
||||
child.join().unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn channel_latency(b: &mut Bencher) {
|
||||
let stop = Arc::new(AtomicBool::new(false));
|
||||
let stop2 = stop.clone();
|
||||
|
||||
// TODO: Any way to start Loop on a separate thread and yet get
|
||||
// a tokio_core::channel to it?
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
let child = thread::spawn(move || {
|
||||
@ -181,7 +164,7 @@ fn channel_latency(b: &mut Bencher) {
|
||||
let (in_tx, in_rx) = tokio_core::channel::channel(&handle).unwrap();
|
||||
let (out_tx, out_rx) = tokio_core::channel::channel(&handle).unwrap();
|
||||
|
||||
let server = ChanServer::new(out_tx, in_rx, stop);
|
||||
let server = ChanServer::new(out_tx, in_rx);
|
||||
|
||||
tx.send((in_tx, out_rx)).unwrap();
|
||||
l.run(server).unwrap();
|
||||
@ -189,17 +172,12 @@ fn channel_latency(b: &mut Bencher) {
|
||||
|
||||
let (in_tx, out_rx) = rx.recv().unwrap();
|
||||
|
||||
// TODO: More reliable way to bind server socket and start server
|
||||
// first
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
|
||||
let mut rx_iter = out_rx.wait();
|
||||
|
||||
// warmup phase; for some reason initial couple of
|
||||
// rounds is much slower
|
||||
// warmup phase; for some reason initial couple of runs are much slower
|
||||
//
|
||||
// TODO: Describe the exact reasons; caching? branch predictor? lazy closures?
|
||||
for _ in 0..1000 {
|
||||
for _ in 0..8 {
|
||||
in_tx.send(1usize).unwrap();
|
||||
let _ = rx_iter.next();
|
||||
}
|
||||
@ -209,10 +187,7 @@ fn channel_latency(b: &mut Bencher) {
|
||||
let _ = rx_iter.next();
|
||||
});
|
||||
|
||||
// Stop the server; TODO: Use better method
|
||||
stop2.store(true, Ordering::SeqCst);
|
||||
thread::sleep(Duration::from_millis(1));
|
||||
in_tx.send(1usize).unwrap();
|
||||
drop(in_tx);
|
||||
|
||||
child.join().unwrap();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user