Add stress test (#3222)

Created a simple echo TCP server that on two different runtimes that is
called from a GitHub action using Valgrind to ensure that there are
no memory leaks.

Fixes: #3022
This commit is contained in:
Blas Rodriguez Irizar 2020-12-08 06:12:22 +01:00 committed by GitHub
parent 57dffb9dfe
commit e01391351b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 102 additions and 0 deletions

29
.github/workflows/stress-test.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: Stress Test
on:
push:
branches:
- master
jobs:
stess-test:
name: Stress Test
runs-on: ubuntu-latest
strategy:
matrix:
stress-test:
- simple_echo_tcp
steps:
- uses: actions/checkout@v2
- name: Install Rust
run: rustup update stable
- name: Install Valgrind
run: apt install -y valgrind
# Compiles each of the stress test examples.
- name: Compile stress test examples
run: cargo build -p stress-test --release --examples ${{ matrix.stress-test }}
# Runs each of the examples using Valgrind. Detects leaks and displays them.
- name: Run valgrind
run: valgrind --leak-check=full --show-leak-kinds=all ./target/release/${{ matrix.stress-test }}

View File

@ -9,6 +9,7 @@ members = [
# Internal
"benches",
"examples",
"stress-test",
"tests-build",
"tests-integration",
]

14
stress-test/Cargo.toml Normal file
View File

@ -0,0 +1,14 @@
[package]
name = "stress-test"
version = "0.1.0"
authors = ["Tokio Contributors <team@tokio.rs>"]
edition = "2018"
publish = false
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = {path = "../tokio/", features = ["full"]}
[dev-dependencies]
rand = "0.7.3"

View File

@ -0,0 +1,58 @@
//! Simple TCP echo server to check memory leaks using Valgrind.
use std::{thread::sleep, time::Duration};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::{TcpListener, TcpSocket},
runtime::Builder,
sync::oneshot,
};
const TCP_ENDPOINT: &str = "127.0.0.1:8080";
const NUM_MSGS: usize = 10_000;
const MSG_SIZE: usize = 1024;
fn main() {
let rt = Builder::new_multi_thread().enable_io().build().unwrap();
let rt2 = Builder::new_multi_thread().enable_io().build().unwrap();
rt.spawn(async {
let listener = TcpListener::bind(TCP_ENDPOINT).await.unwrap();
let (mut socket, _) = listener.accept().await.unwrap();
let (mut rd, mut wr) = socket.split();
while tokio::io::copy(&mut rd, &mut wr).await.is_ok() {}
});
// wait a bit so that the listener binds.
sleep(Duration::from_millis(100));
// create a channel to let the main thread know that all the messages were sent and received.
let (tx, mut rx) = oneshot::channel();
rt2.spawn(async {
let addr = TCP_ENDPOINT.parse().unwrap();
let socket = TcpSocket::new_v4().unwrap();
let mut stream = socket.connect(addr).await.unwrap();
let mut buff = [0; MSG_SIZE];
for _ in 0..NUM_MSGS {
let one_mega_random_bytes: Vec<u8> =
(0..MSG_SIZE).map(|_| rand::random::<u8>()).collect();
stream
.write_all(one_mega_random_bytes.as_slice())
.await
.unwrap();
stream.read(&mut buff).await.unwrap();
}
tx.send(()).unwrap();
});
loop {
// check that we're done.
match rx.try_recv() {
Err(oneshot::error::TryRecvError::Empty) => (),
Err(oneshot::error::TryRecvError::Closed) => panic!("channel got closed..."),
Ok(()) => break,
}
}
}