mirror of
https://github.com/tokio-rs/tokio.git
synced 2025-09-25 12:00:35 +00:00

Some of the benchhmarks were broken and/or using deprecated APIs. This patch updates the benches and requires them all to compile without warnings in order to pass CI.
163 lines
4.3 KiB
Rust
163 lines
4.3 KiB
Rust
#![feature(test)]
|
|
#![deny(warnings)]
|
|
|
|
extern crate tokio_threadpool;
|
|
extern crate futures;
|
|
extern crate futures_cpupool;
|
|
extern crate num_cpus;
|
|
extern crate test;
|
|
|
|
const NUM_SPAWN: usize = 10_000;
|
|
const NUM_YIELD: usize = 1_000;
|
|
const TASKS_PER_CPU: usize = 50;
|
|
|
|
mod threadpool {
|
|
use futures::{future, task, Async};
|
|
use tokio_threadpool::*;
|
|
use num_cpus;
|
|
use test;
|
|
use std::sync::{mpsc, Arc};
|
|
use std::sync::atomic::AtomicUsize;
|
|
use std::sync::atomic::Ordering::SeqCst;
|
|
|
|
#[bench]
|
|
fn spawn_many(b: &mut test::Bencher) {
|
|
let threadpool = ThreadPool::new();
|
|
|
|
let (tx, rx) = mpsc::sync_channel(10);
|
|
let rem = Arc::new(AtomicUsize::new(0));
|
|
|
|
b.iter(move || {
|
|
rem.store(super::NUM_SPAWN, SeqCst);
|
|
|
|
for _ in 0..super::NUM_SPAWN {
|
|
let tx = tx.clone();
|
|
let rem = rem.clone();
|
|
|
|
threadpool.spawn(future::lazy(move || {
|
|
if 1 == rem.fetch_sub(1, SeqCst) {
|
|
tx.send(()).unwrap();
|
|
}
|
|
|
|
Ok(())
|
|
}));
|
|
}
|
|
|
|
let _ = rx.recv().unwrap();
|
|
});
|
|
}
|
|
|
|
#[bench]
|
|
fn yield_many(b: &mut test::Bencher) {
|
|
let threadpool = ThreadPool::new();
|
|
let tasks = super::TASKS_PER_CPU * num_cpus::get();
|
|
|
|
let (tx, rx) = mpsc::sync_channel(tasks);
|
|
|
|
b.iter(move || {
|
|
for _ in 0..tasks {
|
|
let mut rem = super::NUM_YIELD;
|
|
let tx = tx.clone();
|
|
|
|
threadpool.spawn(future::poll_fn(move || {
|
|
rem -= 1;
|
|
|
|
if rem == 0 {
|
|
tx.send(()).unwrap();
|
|
Ok(Async::Ready(()))
|
|
} else {
|
|
// Notify the current task
|
|
task::current().notify();
|
|
|
|
// Not ready
|
|
Ok(Async::NotReady)
|
|
}
|
|
}));
|
|
}
|
|
|
|
for _ in 0..tasks {
|
|
let _ = rx.recv().unwrap();
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
// In this case, CPU pool completes the benchmark faster, but this is due to how
|
|
// CpuPool currently behaves, starving other futures. This completes the
|
|
// benchmark quickly but results in poor runtime characteristics for a thread
|
|
// pool.
|
|
//
|
|
// See alexcrichton/futures-rs#617
|
|
//
|
|
mod cpupool {
|
|
use futures::{task, Async};
|
|
use futures::future::{self, Executor};
|
|
use futures_cpupool::*;
|
|
use num_cpus;
|
|
use test;
|
|
use std::sync::{mpsc, Arc};
|
|
use std::sync::atomic::AtomicUsize;
|
|
use std::sync::atomic::Ordering::SeqCst;
|
|
|
|
#[bench]
|
|
fn spawn_many(b: &mut test::Bencher) {
|
|
let pool = CpuPool::new(num_cpus::get());
|
|
|
|
let (tx, rx) = mpsc::sync_channel(10);
|
|
let rem = Arc::new(AtomicUsize::new(0));
|
|
|
|
b.iter(move || {
|
|
rem.store(super::NUM_SPAWN, SeqCst);
|
|
|
|
for _ in 0..super::NUM_SPAWN {
|
|
let tx = tx.clone();
|
|
let rem = rem.clone();
|
|
|
|
pool.execute(future::lazy(move || {
|
|
if 1 == rem.fetch_sub(1, SeqCst) {
|
|
tx.send(()).unwrap();
|
|
}
|
|
|
|
Ok(())
|
|
})).ok().unwrap();
|
|
}
|
|
|
|
let _ = rx.recv().unwrap();
|
|
});
|
|
}
|
|
|
|
#[bench]
|
|
fn yield_many(b: &mut test::Bencher) {
|
|
let pool = CpuPool::new(num_cpus::get());
|
|
let tasks = super::TASKS_PER_CPU * num_cpus::get();
|
|
|
|
let (tx, rx) = mpsc::sync_channel(tasks);
|
|
|
|
b.iter(move || {
|
|
for _ in 0..tasks {
|
|
let mut rem = super::NUM_YIELD;
|
|
let tx = tx.clone();
|
|
|
|
pool.execute(future::poll_fn(move || {
|
|
rem -= 1;
|
|
|
|
if rem == 0 {
|
|
tx.send(()).unwrap();
|
|
Ok(Async::Ready(()))
|
|
} else {
|
|
// Notify the current task
|
|
task::current().notify();
|
|
|
|
// Not ready
|
|
Ok(Async::NotReady)
|
|
}
|
|
})).ok().unwrap();
|
|
}
|
|
|
|
for _ in 0..tasks {
|
|
let _ = rx.recv().unwrap();
|
|
}
|
|
});
|
|
}
|
|
}
|