Merge branch 'master' and 'merge-1.8.3'

This commit is contained in:
Alice Ryhl 2021-07-26 21:07:52 +02:00
commit 0de05422ce
68 changed files with 2430 additions and 1571 deletions

View File

@ -9,7 +9,7 @@ name: CI
env:
RUSTFLAGS: -Dwarnings
RUST_BACKTRACE: 1
nightly: nightly-2021-04-25
nightly: nightly-2021-07-09
minrust: 1.45.2
jobs:

View File

@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml:
```toml
[dependencies]
tokio = { version = "1.8.0", features = ["full"] }
tokio = { version = "1.9.0", features = ["full"] }
```
Then, on your main.rs:
@ -140,8 +140,7 @@ several other libraries, including:
* [`tower`]: A library of modular and reusable components for building robust networking clients and servers.
* [`tracing`] (formerly `tokio-trace`): A framework for application-level
tracing and async-aware diagnostics.
* [`tracing`] (formerly `tokio-trace`): A framework for application-level tracing and async-aware diagnostics.
* [`rdbc`]: A Rust database connectivity library for MySQL, Postgres and SQLite.
@ -164,9 +163,35 @@ several other libraries, including:
## Supported Rust Versions
Tokio is built against the latest stable release. The minimum supported version is 1.45.
The current Tokio version is not guaranteed to build on Rust versions earlier than the
minimum supported version.
Tokio is built against the latest stable release. The minimum supported version
is 1.45. The current Tokio version is not guaranteed to build on Rust versions
earlier than the minimum supported version.
## Release schedule
Tokio doesn't follow a fixed release schedule, but we typically make one to two
new minor releases each month. We make patch releases for bugfixes as necessary.
## Bug patching policy
For the purposes of making patch releases with bugfixes, we have designated
certain minor releases as LTS (long term support) releases. Whenever a bug
warrants a patch release with a fix for the bug, it will be backported and
released as a new patch release for each LTS minor version. Our current LTS
releases are:
* `1.8.x` - LTS release until February 2022.
Each LTS release will continue to receive backported fixes for at least half a
year. If you wish to use a fixed minor release in your project, we recommend
that you use an LTS release.
To use a fixed minor version, you can specify the version with a tilde. For
example, to specify that you wish to use the newest `1.8.x` patch release, you
can use the following dependency specification:
```text
tokio = { version = "~1.8", features = [...] }
```
## License

View File

@ -2,63 +2,83 @@
//! This essentially measure the time to enqueue a task in the local and remote
//! case.
#[macro_use]
extern crate bencher;
use bencher::{black_box, Bencher};
async fn work() -> usize {
let val = 1 + 1;
tokio::task::yield_now().await;
black_box(val)
}
fn basic_scheduler_local_spawn(bench: &mut Bencher) {
fn basic_scheduler_spawn(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
runtime.block_on(async {
bench.iter(|| {
let h = tokio::spawn(work());
black_box(h);
})
});
}
fn threaded_scheduler_local_spawn(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
runtime.block_on(async {
bench.iter(|| {
let h = tokio::spawn(work());
black_box(h);
})
});
}
fn basic_scheduler_remote_spawn(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
bench.iter(|| {
let h = runtime.spawn(work());
black_box(h);
runtime.block_on(async {
let h = tokio::spawn(work());
assert_eq!(h.await.unwrap(), 2);
});
});
}
fn threaded_scheduler_remote_spawn(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap();
fn basic_scheduler_spawn10(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
bench.iter(|| {
let h = runtime.spawn(work());
black_box(h);
runtime.block_on(async {
let mut handles = Vec::with_capacity(10);
for _ in 0..10 {
handles.push(tokio::spawn(work()));
}
for handle in handles {
assert_eq!(handle.await.unwrap(), 2);
}
});
});
}
fn threaded_scheduler_spawn(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
bench.iter(|| {
runtime.block_on(async {
let h = tokio::spawn(work());
assert_eq!(h.await.unwrap(), 2);
});
});
}
fn threaded_scheduler_spawn10(bench: &mut Bencher) {
let runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
bench.iter(|| {
runtime.block_on(async {
let mut handles = Vec::with_capacity(10);
for _ in 0..10 {
handles.push(tokio::spawn(work()));
}
for handle in handles {
assert_eq!(handle.await.unwrap(), 2);
}
});
});
}
bencher::benchmark_group!(
spawn,
basic_scheduler_local_spawn,
threaded_scheduler_local_spawn,
basic_scheduler_remote_spawn,
threaded_scheduler_remote_spawn
basic_scheduler_spawn,
basic_scheduler_spawn10,
threaded_scheduler_spawn,
threaded_scheduler_spawn10,
);
bencher::benchmark_main!(spawn);

View File

@ -1,3 +1,9 @@
# 1.3.0 (July 7, 2021)
- macros: don't trigger `clippy::unwrap_used` ([#3926])
[#3926]: https://github.com/tokio-rs/tokio/pull/3926
# 1.2.0 (May 14, 2021)
- macros: forward input arguments in `#[tokio::test]` ([#3691])

View File

@ -6,13 +6,13 @@ name = "tokio-macros"
# - Cargo.toml
# - Update CHANGELOG.md.
# - Create "tokio-macros-1.0.x" git tag.
version = "1.2.0"
version = "1.3.0"
edition = "2018"
authors = ["Tokio Contributors <team@tokio.rs>"]
license = "MIT"
repository = "https://github.com/tokio-rs/tokio"
homepage = "https://tokio.rs"
documentation = "https://docs.rs/tokio-macros/1.2.0/tokio_macros"
documentation = "https://docs.rs/tokio-macros/1.3.0/tokio_macros"
description = """
Tokio's proc macros.
"""

View File

@ -201,12 +201,15 @@ fn parse_knobs(
for arg in args {
match arg {
syn::NestedMeta::Meta(syn::Meta::NameValue(namevalue)) => {
let ident = namevalue.path.get_ident();
if ident.is_none() {
let msg = "Must have specified ident";
return Err(syn::Error::new_spanned(namevalue, msg));
}
match ident.unwrap().to_string().to_lowercase().as_str() {
let ident = namevalue
.path
.get_ident()
.ok_or_else(|| {
syn::Error::new_spanned(&namevalue, "Must have specified ident")
})?
.to_string()
.to_lowercase();
match ident.as_str() {
"worker_threads" => {
config.set_worker_threads(
namevalue.lit.clone(),
@ -239,12 +242,11 @@ fn parse_knobs(
}
}
syn::NestedMeta::Meta(syn::Meta::Path(path)) => {
let ident = path.get_ident();
if ident.is_none() {
let msg = "Must have specified ident";
return Err(syn::Error::new_spanned(path, msg));
}
let name = ident.unwrap().to_string().to_lowercase();
let name = path
.get_ident()
.ok_or_else(|| syn::Error::new_spanned(&path, "Must have specified ident"))?
.to_string()
.to_lowercase();
let msg = match name.as_str() {
"threaded_scheduler" | "multi_thread" => {
format!(
@ -326,11 +328,11 @@ fn parse_knobs(
#rt
.enable_all()
.build()
.unwrap()
.expect("Failed building the Runtime")
.block_on(async #body)
}
})
.unwrap();
.expect("Parsing failure");
input.block.brace_token = brace_token;
let result = quote! {

View File

@ -5,7 +5,7 @@
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(docsrs, deny(broken_intra_doc_links))]
#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))

View File

@ -1,3 +1,13 @@
# 0.1.7 (July 7, 2021)
### Fixed
- sync: fix watch wrapper ([#3914])
- time: fix `Timeout::size_hint` ([#3902])
[#3902]: https://github.com/tokio-rs/tokio/pull/3902
[#3914]: https://github.com/tokio-rs/tokio/pull/3914
# 0.1.6 (May 14, 2021)
### Added

View File

@ -6,13 +6,13 @@ name = "tokio-stream"
# - Cargo.toml
# - Update CHANGELOG.md.
# - Create "tokio-stream-0.1.x" git tag.
version = "0.1.6"
version = "0.1.7"
edition = "2018"
authors = ["Tokio Contributors <team@tokio.rs>"]
license = "MIT"
repository = "https://github.com/tokio-rs/tokio"
homepage = "https://tokio.rs"
documentation = "https://docs.rs/tokio-stream/0.1.6/tokio_stream"
documentation = "https://docs.rs/tokio-stream/0.1.7/tokio_stream"
description = """
Utilities to work with `Stream` and `tokio`.
"""
@ -30,7 +30,7 @@ signal = ["tokio/signal"]
[dependencies]
futures-core = { version = "0.3.0" }
pin-project-lite = "0.2.0"
tokio = { version = "1.2.0", path = "../tokio", features = ["sync"] }
tokio = { version = "1.8.0", path = "../tokio", features = ["sync"] }
tokio-util = { version = "0.6.3", path = "../tokio-util", optional = true }
[dev-dependencies]

View File

@ -10,7 +10,7 @@
unreachable_pub
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, deny(broken_intra_doc_links))]
#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))

View File

@ -113,7 +113,7 @@ impl<T: AsRef<str>> sealed::FromStreamPriv<T> for String {
}
fn finalize(_: sealed::Internal, collection: &mut String) -> String {
mem::replace(collection, String::new())
mem::take(collection)
}
}
@ -132,7 +132,7 @@ impl<T> sealed::FromStreamPriv<T> for Vec<T> {
}
fn finalize(_: sealed::Internal, collection: &mut Vec<T>) -> Vec<T> {
mem::replace(collection, vec![])
mem::take(collection)
}
}

View File

@ -364,11 +364,11 @@ impl<K, V> StreamMap<K, V> {
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use tokio_stream::{StreamMap, pending};
///
/// let mut a = HashMap::new();
/// let mut a = StreamMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a");
/// a.insert(1, pending::<i32>());
/// assert!(!a.is_empty());
/// ```
pub fn is_empty(&self) -> bool {

View File

@ -72,10 +72,10 @@ impl<T: Clone + 'static + Send + Sync> Stream for WatchStream<T> {
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let (result, rx) = ready!(self.inner.poll(cx));
let (result, mut rx) = ready!(self.inner.poll(cx));
match result {
Ok(_) => {
let received = (*rx.borrow()).clone();
let received = (*rx.borrow_and_update()).clone();
self.inner.set(make_future(rx));
Poll::Ready(Some(received))
}

View File

@ -0,0 +1,27 @@
use tokio::sync::watch;
use tokio_stream::wrappers::WatchStream;
use tokio_stream::StreamExt;
#[tokio::test]
async fn message_not_twice() {
let (tx, rx) = watch::channel("hello");
let mut counter = 0;
let mut stream = WatchStream::new(rx).map(move |payload| {
println!("{}", payload);
if payload == "goodbye" {
counter += 1;
}
if counter >= 2 {
panic!("too many goodbyes");
}
});
let task = tokio::spawn(async move { while stream.next().await.is_some() {} });
// Send goodbye just once
tx.send("goodbye").unwrap();
drop(tx);
task.await.unwrap();
}

View File

@ -4,7 +4,7 @@
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(docsrs, deny(broken_intra_doc_links))]
#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))

View File

@ -5,7 +5,7 @@
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(docsrs, deny(broken_intra_doc_links))]
#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))

View File

@ -35,7 +35,6 @@ use std::{io, mem::MaybeUninit};
/// [`Sink`]: futures_sink::Sink
/// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split
#[must_use = "sinks do nothing unless polled"]
#[cfg_attr(docsrs, doc(cfg(all(feature = "codec", feature = "udp"))))]
#[derive(Debug)]
pub struct UdpFramed<C, T = UdpSocket> {
socket: T,

View File

@ -1,3 +1,36 @@
# 1.9.0 (July 22, 2021)
### Added
- net: allow customized I/O operations for `TcpStream` ([#3888])
- sync: add getter for the mutex from a guard ([#3928])
- task: expose nameable future for `TaskLocal::scope` ([#3273])
### Fixed
- Fix leak if output of future panics on drop ([#3967])
- Fix leak in `LocalSet` ([#3978])
### Changes
- runtime: reorganize parts of the runtime ([#3909], [#3939], [#3950], [#3955], [#3980])
- sync: clean up `OnceCell` ([#3945])
- task: remove mutex in `JoinError` ([#3959])
[#3273]: https://github.com/tokio-rs/tokio/pull/3273
[#3888]: https://github.com/tokio-rs/tokio/pull/3888
[#3909]: https://github.com/tokio-rs/tokio/pull/3909
[#3928]: https://github.com/tokio-rs/tokio/pull/3928
[#3934]: https://github.com/tokio-rs/tokio/pull/3934
[#3939]: https://github.com/tokio-rs/tokio/pull/3939
[#3945]: https://github.com/tokio-rs/tokio/pull/3945
[#3950]: https://github.com/tokio-rs/tokio/pull/3950
[#3955]: https://github.com/tokio-rs/tokio/pull/3955
[#3959]: https://github.com/tokio-rs/tokio/pull/3959
[#3967]: https://github.com/tokio-rs/tokio/pull/3967
[#3978]: https://github.com/tokio-rs/tokio/pull/3978
[#3980]: https://github.com/tokio-rs/tokio/pull/3980
# 1.8.3 (July 26, 2021)
This release backports two fixes from 1.9.0

View File

@ -7,12 +7,12 @@ name = "tokio"
# - README.md
# - Update CHANGELOG.md.
# - Create "v1.0.x" git tag.
version = "1.8.3"
version = "1.9.0"
edition = "2018"
authors = ["Tokio Contributors <team@tokio.rs>"]
license = "MIT"
readme = "README.md"
documentation = "https://docs.rs/tokio/1.8.3/tokio/"
documentation = "https://docs.rs/tokio/1.9.0/tokio/"
repository = "https://github.com/tokio-rs/tokio"
homepage = "https://tokio.rs"
description = """
@ -109,7 +109,7 @@ signal-hook-registry = { version = "1.1.1", optional = true }
[target.'cfg(unix)'.dev-dependencies]
libc = { version = "0.2.42" }
nix = { version = "0.19.0" }
nix = { version = "0.22.0" }
[target.'cfg(windows)'.dependencies.winapi]
version = "0.3.8"

View File

@ -50,7 +50,15 @@ an asynchronous application.
## Example
A basic TCP echo server with Tokio:
A basic TCP echo server with Tokio.
Make sure you activated the full features of the tokio crate on Cargo.toml:
```toml
[dependencies]
tokio = { version = "1.8.0", features = ["full"] }
```
Then, on your main.rs:
```rust,no_run
use tokio::net::TcpListener;
@ -58,7 +66,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
let listener = TcpListener::bind("127.0.0.1:8080").await?;
loop {
let (mut socket, _) = listener.accept().await?;
@ -132,7 +140,7 @@ several other libraries, including:
* [`tower`]: A library of modular and reusable components for building robust networking clients and servers.
* [`tracing`]: A framework for application-level tracing and async-aware diagnostics.
* [`tracing`] (formerly `tokio-trace`): A framework for application-level tracing and async-aware diagnostics.
* [`rdbc`]: A Rust database connectivity library for MySQL, Postgres and SQLite.
@ -155,9 +163,35 @@ several other libraries, including:
## Supported Rust Versions
Tokio is built against the latest stable release. The minimum supported version is 1.45.
The current Tokio version is not guaranteed to build on Rust versions earlier than the
minimum supported version.
Tokio is built against the latest stable release. The minimum supported version
is 1.45. The current Tokio version is not guaranteed to build on Rust versions
earlier than the minimum supported version.
## Release schedule
Tokio doesn't follow a fixed release schedule, but we typically make one to two
new minor releases each month. We make patch releases for bugfixes as necessary.
## Bug patching policy
For the purposes of making patch releases with bugfixes, we have designated
certain minor releases as LTS (long term support) releases. Whenever a bug
warrants a patch release with a fix for the bug, it will be backported and
released as a new patch release for each LTS minor version. Our current LTS
releases are:
* `1.8.x` - LTS release until February 2022.
Each LTS release will continue to receive backported fixes for at least half a
year. If you wish to use a fixed minor release in your project, we recommend
that you use an LTS release.
To use a fixed minor version, you can specify the version with a tilde. For
example, to specify that you wish to use the newest `1.8.x` patch release, you
can use the following dependency specification:
```text
tokio = { version = "~1.8", features = [...] }
```
## License

View File

@ -13,8 +13,12 @@ use std::{io, path::Path};
/// buffer based on the file size when available, so it is generally faster than
/// reading into a vector created with `Vec::new()`.
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`File::open`]: super::File::open
/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end
/// [`spawn_blocking`]: crate::task::spawn_blocking
///
/// # Errors
///

View File

@ -13,6 +13,11 @@ use std::task::Poll;
/// Returns a stream over the entries within a directory.
///
/// This is an async version of [`std::fs::read_dir`](std::fs::read_dir)
///
/// This operation is implemented by running the equivalent blocking
/// operation on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
pub async fn read_dir(path: impl AsRef<Path>) -> io::Result<ReadDir> {
let path = path.as_ref().to_owned();
let std = asyncify(|| std::fs::read_dir(path)).await?;

View File

@ -7,6 +7,10 @@ use std::{io, path::Path};
///
/// This is the async equivalent of [`std::fs::read_to_string`][std].
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
/// [std]: fn@std::fs::read_to_string
///
/// # Examples

View File

@ -7,6 +7,10 @@ use std::{io, path::Path};
///
/// This is the async equivalent of [`std::fs::write`][std].
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
/// [std]: fn@std::fs::write
///
/// # Examples

View File

@ -40,9 +40,8 @@ cfg_io_driver! {
/// [`poll_read_ready`] again will also indicate read readiness.
///
/// When the operation is attempted and is unable to succeed due to the I/O
/// resource not being ready, the caller must call `clear_read_ready` or
/// `clear_write_ready`. This clears the readiness state until a new
/// readiness event is received.
/// resource not being ready, the caller must call `clear_readiness`.
/// This clears the readiness state until a new readiness event is received.
///
/// This allows the caller to implement additional functions. For example,
/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and

View File

@ -10,7 +10,7 @@
unreachable_pub
)]
#![deny(unused_must_use)]
#![cfg_attr(docsrs, deny(broken_intra_doc_links))]
#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))

View File

@ -936,6 +936,41 @@ impl TcpStream {
.try_io(Interest::WRITABLE, || (&*self.io).write_vectored(bufs))
}
/// Try to read or write from the socket using a user-provided IO operation.
///
/// If the socket is ready, the provided closure is called. The closure
/// should attempt to perform IO operation from the socket by manually
/// calling the appropriate syscall. If the operation fails because the
/// socket is not actually ready, then the closure should return a
/// `WouldBlock` error and the readiness flag is cleared. The return value
/// of the closure is then returned by `try_io`.
///
/// If the socket is not ready, then the closure is not called
/// and a `WouldBlock` error is returned.
///
/// The closure should only return a `WouldBlock` error if it has performed
/// an IO operation on the socket that failed due to the socket not being
/// ready. Returning a `WouldBlock` error in any other situation will
/// incorrectly clear the readiness flag, which can cause the socket to
/// behave incorrectly.
///
/// The closure should not perform the IO operation using any of the methods
/// defined on the Tokio `TcpStream` type, as this will mess with the
/// readiness flag and can cause the socket to behave incorrectly.
///
/// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: TcpStream::readable()
/// [`writable()`]: TcpStream::writable()
/// [`ready()`]: TcpStream::ready()
pub fn try_io<R>(
&self,
interest: Interest,
f: impl FnOnce() -> io::Result<R>,
) -> io::Result<R> {
self.io.registration().try_io(interest, f)
}
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue. On success,
/// returns the number of bytes peeked.

View File

@ -1170,6 +1170,41 @@ impl UdpSocket {
.try_io(Interest::READABLE, || self.io.recv_from(buf))
}
/// Try to read or write from the socket using a user-provided IO operation.
///
/// If the socket is ready, the provided closure is called. The closure
/// should attempt to perform IO operation from the socket by manually
/// calling the appropriate syscall. If the operation fails because the
/// socket is not actually ready, then the closure should return a
/// `WouldBlock` error and the readiness flag is cleared. The return value
/// of the closure is then returned by `try_io`.
///
/// If the socket is not ready, then the closure is not called
/// and a `WouldBlock` error is returned.
///
/// The closure should only return a `WouldBlock` error if it has performed
/// an IO operation on the socket that failed due to the socket not being
/// ready. Returning a `WouldBlock` error in any other situation will
/// incorrectly clear the readiness flag, which can cause the socket to
/// behave incorrectly.
///
/// The closure should not perform the IO operation using any of the methods
/// defined on the Tokio `UdpSocket` type, as this will mess with the
/// readiness flag and can cause the socket to behave incorrectly.
///
/// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: UdpSocket::readable()
/// [`writable()`]: UdpSocket::writable()
/// [`ready()`]: UdpSocket::ready()
pub fn try_io<R>(
&self,
interest: Interest,
f: impl FnOnce() -> io::Result<R>,
) -> io::Result<R> {
self.io.registration().try_io(interest, f)
}
/// Receives data from the socket, without removing it from the input queue.
/// On success, returns the number of bytes read and the address from whence
/// the data came.

View File

@ -1143,6 +1143,41 @@ impl UnixDatagram {
Ok((n, SocketAddr(addr)))
}
/// Try to read or write from the socket using a user-provided IO operation.
///
/// If the socket is ready, the provided closure is called. The closure
/// should attempt to perform IO operation from the socket by manually
/// calling the appropriate syscall. If the operation fails because the
/// socket is not actually ready, then the closure should return a
/// `WouldBlock` error and the readiness flag is cleared. The return value
/// of the closure is then returned by `try_io`.
///
/// If the socket is not ready, then the closure is not called
/// and a `WouldBlock` error is returned.
///
/// The closure should only return a `WouldBlock` error if it has performed
/// an IO operation on the socket that failed due to the socket not being
/// ready. Returning a `WouldBlock` error in any other situation will
/// incorrectly clear the readiness flag, which can cause the socket to
/// behave incorrectly.
///
/// The closure should not perform the IO operation using any of the methods
/// defined on the Tokio `UnixDatagram` type, as this will mess with the
/// readiness flag and can cause the socket to behave incorrectly.
///
/// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: UnixDatagram::readable()
/// [`writable()`]: UnixDatagram::writable()
/// [`ready()`]: UnixDatagram::ready()
pub fn try_io<R>(
&self,
interest: Interest,
f: impl FnOnce() -> io::Result<R>,
) -> io::Result<R> {
self.io.registration().try_io(interest, f)
}
/// Returns the local address that this socket is bound to.
///
/// # Examples

View File

@ -653,6 +653,41 @@ impl UnixStream {
.try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf))
}
/// Try to read or write from the socket using a user-provided IO operation.
///
/// If the socket is ready, the provided closure is called. The closure
/// should attempt to perform IO operation from the socket by manually
/// calling the appropriate syscall. If the operation fails because the
/// socket is not actually ready, then the closure should return a
/// `WouldBlock` error and the readiness flag is cleared. The return value
/// of the closure is then returned by `try_io`.
///
/// If the socket is not ready, then the closure is not called
/// and a `WouldBlock` error is returned.
///
/// The closure should only return a `WouldBlock` error if it has performed
/// an IO operation on the socket that failed due to the socket not being
/// ready. Returning a `WouldBlock` error in any other situation will
/// incorrectly clear the readiness flag, which can cause the socket to
/// behave incorrectly.
///
/// The closure should not perform the IO operation using any of the methods
/// defined on the Tokio `UnixStream` type, as this will mess with the
/// readiness flag and can cause the socket to behave incorrectly.
///
/// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: UnixStream::readable()
/// [`writable()`]: UnixStream::writable()
/// [`ready()`]: UnixStream::ready()
pub fn try_io<R>(
&self,
interest: Interest,
f: impl FnOnce() -> io::Result<R>,
) -> io::Result<R> {
self.io.registration().try_io(interest, f)
}
/// Creates new `UnixStream` from a `std::os::unix::net::UnixStream`.
///
/// This function is intended to be used to wrap a UnixStream from the

View File

@ -31,15 +31,13 @@ impl UCred {
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd"))]
pub(crate) use self::impl_linux::get_peer_cred;
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
#[cfg(any(target_os = "netbsd"))]
pub(crate) use self::impl_netbsd::get_peer_cred;
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
pub(crate) use self::impl_bsd::get_peer_cred;
#[cfg(any(target_os = "macos", target_os = "ios"))]
@ -48,13 +46,16 @@ pub(crate) use self::impl_macos::get_peer_cred;
#[cfg(any(target_os = "solaris", target_os = "illumos"))]
pub(crate) use self::impl_solaris::get_peer_cred;
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd"))]
pub(crate) mod impl_linux {
use crate::net::unix::UnixStream;
use libc::{c_void, getsockopt, socklen_t, SOL_SOCKET, SO_PEERCRED};
use std::{io, mem};
#[cfg(target_os = "openbsd")]
use libc::sockpeercred as ucred;
#[cfg(any(target_os = "linux", target_os = "android"))]
use libc::ucred;
pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> {
@ -97,12 +98,49 @@ pub(crate) mod impl_linux {
}
}
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
#[cfg(any(target_os = "netbsd"))]
pub(crate) mod impl_netbsd {
use crate::net::unix::UnixStream;
use libc::{c_void, getsockopt, socklen_t, unpcbid, LOCAL_PEEREID, SOL_SOCKET};
use std::io;
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> {
unsafe {
let raw_fd = sock.as_raw_fd();
let mut unpcbid = unpcbid {
unp_pid: 0,
unp_euid: 0,
unp_egid: 0,
};
let unpcbid_size = size_of::<unpcbid>();
let mut unpcbid_size = unpcbid_size as socklen_t;
let ret = getsockopt(
raw_fd,
SOL_SOCKET,
LOCAL_PEEREID,
&mut unpcbid as *mut unpcbid as *mut c_void,
&mut unpcbid_size,
);
if ret == 0 && unpcbid_size as usize == size_of::<unpcbid>() {
Ok(super::UCred {
uid: unpcbid.unp_euid,
gid: unpcbid.unp_egid,
pid: Some(unpcbid.unp_pid),
})
} else {
Err(io::Error::last_os_error())
}
}
}
}
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
pub(crate) mod impl_bsd {
use crate::net::unix::UnixStream;

View File

@ -723,6 +723,41 @@ impl NamedPipeServer {
.registration()
.try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf))
}
/// Try to read or write from the socket using a user-provided IO operation.
///
/// If the socket is ready, the provided closure is called. The closure
/// should attempt to perform IO operation from the socket by manually
/// calling the appropriate syscall. If the operation fails because the
/// socket is not actually ready, then the closure should return a
/// `WouldBlock` error and the readiness flag is cleared. The return value
/// of the closure is then returned by `try_io`.
///
/// If the socket is not ready, then the closure is not called
/// and a `WouldBlock` error is returned.
///
/// The closure should only return a `WouldBlock` error if it has performed
/// an IO operation on the socket that failed due to the socket not being
/// ready. Returning a `WouldBlock` error in any other situation will
/// incorrectly clear the readiness flag, which can cause the socket to
/// behave incorrectly.
///
/// The closure should not perform the IO operation using any of the
/// methods defined on the Tokio `NamedPipeServer` type, as this will mess with
/// the readiness flag and can cause the socket to behave incorrectly.
///
/// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: NamedPipeServer::readable()
/// [`writable()`]: NamedPipeServer::writable()
/// [`ready()`]: NamedPipeServer::ready()
pub fn try_io<R>(
&self,
interest: Interest,
f: impl FnOnce() -> io::Result<R>,
) -> io::Result<R> {
self.io.registration().try_io(interest, f)
}
}
impl AsyncRead for NamedPipeServer {
@ -1343,6 +1378,41 @@ impl NamedPipeClient {
.registration()
.try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf))
}
/// Try to read or write from the socket using a user-provided IO operation.
///
/// If the socket is ready, the provided closure is called. The closure
/// should attempt to perform IO operation from the socket by manually
/// calling the appropriate syscall. If the operation fails because the
/// socket is not actually ready, then the closure should return a
/// `WouldBlock` error and the readiness flag is cleared. The return value
/// of the closure is then returned by `try_io`.
///
/// If the socket is not ready, then the closure is not called
/// and a `WouldBlock` error is returned.
///
/// The closure should only return a `WouldBlock` error if it has performed
/// an IO operation on the socket that failed due to the socket not being
/// ready. Returning a `WouldBlock` error in any other situation will
/// incorrectly clear the readiness flag, which can cause the socket to
/// behave incorrectly.
///
/// The closure should not perform the IO operation using any of the methods
/// defined on the Tokio `NamedPipeClient` type, as this will mess with the
/// readiness flag and can cause the socket to behave incorrectly.
///
/// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: NamedPipeClient::readable()
/// [`writable()`]: NamedPipeClient::writable()
/// [`ready()`]: NamedPipeClient::ready()
pub fn try_io<R>(
&self,
interest: Interest,
f: impl FnOnce() -> io::Result<R>,
) -> io::Result<R> {
self.io.registration().try_io(interest, f)
}
}
impl AsyncRead for NamedPipeClient {

View File

@ -551,6 +551,7 @@ impl Command {
///
/// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx
#[cfg(windows)]
#[cfg_attr(docsrs, doc(cfg(windows)))]
pub fn creation_flags(&mut self, flags: u32) -> &mut Command {
self.std.creation_flags(flags);
self
@ -560,6 +561,7 @@ impl Command {
/// `setuid` call in the child process. Failure in the `setuid`
/// call will cause the spawn to fail.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn uid(&mut self, id: u32) -> &mut Command {
self.std.uid(id);
self
@ -568,11 +570,26 @@ impl Command {
/// Similar to `uid` but sets the group ID of the child process. This has
/// the same semantics as the `uid` field.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn gid(&mut self, id: u32) -> &mut Command {
self.std.gid(id);
self
}
/// Set executable argument
///
/// Set the first process argument, `argv[0]`, to something other than the
/// default executable path.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub fn arg0<S>(&mut self, arg: S) -> &mut Command
where
S: AsRef<OsStr>,
{
self.std.arg0(arg);
self
}
/// Schedules a closure to be run just before the `exec` function is
/// invoked.
///
@ -603,6 +620,7 @@ impl Command {
/// working directory have successfully been changed, so output to these
/// locations may not appear where intended.
#[cfg(unix)]
#[cfg_attr(docsrs, doc(cfg(unix)))]
pub unsafe fn pre_exec<F>(&mut self, f: F) -> &mut Command
where
F: FnMut() -> io::Result<()> + Send + Sync + 'static,

View File

@ -2,16 +2,14 @@ use crate::future::poll_fn;
use crate::loom::sync::atomic::AtomicBool;
use crate::loom::sync::Mutex;
use crate::park::{Park, Unpark};
use crate::runtime::task::{self, JoinHandle, Schedule, Task};
use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task};
use crate::sync::notify::Notify;
use crate::util::linked_list::{Link, LinkedList};
use crate::util::{waker_ref, Wake, WakerRef};
use std::cell::RefCell;
use std::collections::VecDeque;
use std::fmt;
use std::future::Future;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release};
use std::sync::Arc;
use std::task::Poll::{Pending, Ready};
@ -57,9 +55,6 @@ pub(crate) struct Spawner {
}
struct Tasks {
/// Collection of all active tasks spawned onto this executor.
owned: LinkedList<Task<Arc<Shared>>, <Task<Arc<Shared>> as Link>::Target>,
/// Local run queue.
///
/// Tasks notified from the current thread are pushed into this queue.
@ -69,23 +64,23 @@ struct Tasks {
/// A remote scheduler entry.
///
/// These are filled in by remote threads sending instructions to the scheduler.
enum Entry {
enum RemoteMsg {
/// A remote thread wants to spawn a task.
Schedule(task::Notified<Arc<Shared>>),
/// A remote thread wants a task to be released by the scheduler. We only
/// have access to its header.
Release(NonNull<task::Header>),
}
// Safety: Used correctly, the task header is "thread safe". Ultimately the task
// is owned by the current thread executor, for which this instruction is being
// sent.
unsafe impl Send for Entry {}
unsafe impl Send for RemoteMsg {}
/// Scheduler state shared between threads.
struct Shared {
/// Remote run queue. None if the `Runtime` has been dropped.
queue: Mutex<Option<VecDeque<Entry>>>,
queue: Mutex<Option<VecDeque<RemoteMsg>>>,
/// Collection of all active tasks spawned onto this executor.
owned: OwnedTasks<Arc<Shared>>,
/// Unpark the blocked thread.
unpark: Box<dyn Unpark>,
@ -125,6 +120,7 @@ impl<P: Park> BasicScheduler<P> {
let spawner = Spawner {
shared: Arc::new(Shared {
queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))),
owned: OwnedTasks::new(),
unpark: unpark as Box<dyn Unpark>,
woken: AtomicBool::new(false),
}),
@ -132,7 +128,6 @@ impl<P: Park> BasicScheduler<P> {
let inner = Mutex::new(Some(Inner {
tasks: Some(Tasks {
owned: LinkedList::new(),
queue: VecDeque::with_capacity(INITIAL_CAPACITY),
}),
spawner: spawner.clone(),
@ -227,7 +222,7 @@ impl<P: Park> Inner<P> {
.borrow_mut()
.queue
.pop_front()
.map(Entry::Schedule)
.map(RemoteMsg::Schedule)
})
} else {
context
@ -235,7 +230,7 @@ impl<P: Park> Inner<P> {
.borrow_mut()
.queue
.pop_front()
.map(Entry::Schedule)
.map(RemoteMsg::Schedule)
.or_else(|| scheduler.spawner.pop())
};
@ -251,26 +246,7 @@ impl<P: Park> Inner<P> {
};
match entry {
Entry::Schedule(task) => crate::coop::budget(|| task.run()),
Entry::Release(ptr) => {
// Safety: the task header is only legally provided
// internally in the header, so we know that it is a
// valid (or in particular *allocated*) header that
// is part of the linked list.
unsafe {
let removed = context.tasks.borrow_mut().owned.remove(ptr);
// TODO: This seems like it should hold, because
// there doesn't seem to be an avenue for anyone
// else to fiddle with the owned tasks
// collection *after* a remote thread has marked
// it as released, and at that point, the only
// location at which it can be removed is here
// or in the Drop implementation of the
// scheduler.
debug_assert!(removed.is_some());
}
}
RemoteMsg::Schedule(task) => crate::coop::budget(|| task.run()),
}
}
@ -335,14 +311,10 @@ impl<P: Park> Drop for BasicScheduler<P> {
};
enter(&mut inner, |scheduler, context| {
// Loop required here to ensure borrow is dropped between iterations
#[allow(clippy::while_let_loop)]
loop {
let task = match context.tasks.borrow_mut().owned.pop_back() {
Some(task) => task,
None => break,
};
// By closing the OwnedTasks, no new tasks can be spawned on it.
context.shared.owned.close();
// Drain the OwnedTasks collection.
while let Some(task) = context.shared.owned.pop_back() {
task.shutdown();
}
@ -358,13 +330,9 @@ impl<P: Park> Drop for BasicScheduler<P> {
if let Some(remote_queue) = remote_queue.take() {
for entry in remote_queue {
match entry {
Entry::Schedule(task) => {
RemoteMsg::Schedule(task) => {
task.shutdown();
}
Entry::Release(..) => {
// Do nothing, each entry in the linked list was *just*
// dropped by the scheduler above.
}
}
}
}
@ -375,7 +343,7 @@ impl<P: Park> Drop for BasicScheduler<P> {
// The assert below is unrelated to this mutex.
drop(remote_queue);
assert!(context.tasks.borrow().owned.is_empty());
assert!(context.shared.owned.is_empty());
});
}
}
@ -389,18 +357,22 @@ impl<P: Park> fmt::Debug for BasicScheduler<P> {
// ===== impl Spawner =====
impl Spawner {
/// Spawns a future onto the thread pool
/// Spawns a future onto the basic scheduler
pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: crate::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let (task, handle) = task::joinable(future);
self.shared.schedule(task);
let (handle, notified) = self.shared.owned.bind(future, self.shared.clone());
if let Some(notified) = notified {
self.shared.schedule(notified);
}
handle
}
fn pop(&self) -> Option<Entry> {
fn pop(&self) -> Option<RemoteMsg> {
match self.shared.queue.lock().as_mut() {
Some(queue) => queue.pop_front(),
None => None,
@ -427,42 +399,9 @@ impl fmt::Debug for Spawner {
// ===== impl Shared =====
impl Schedule for Arc<Shared> {
fn bind(task: Task<Self>) -> Arc<Shared> {
CURRENT.with(|maybe_cx| {
let cx = maybe_cx.expect("scheduler context missing");
cx.tasks.borrow_mut().owned.push_front(task);
cx.shared.clone()
})
}
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
CURRENT.with(|maybe_cx| {
let ptr = NonNull::from(task.header());
if let Some(cx) = maybe_cx {
// safety: the task is inserted in the list in `bind`.
unsafe { cx.tasks.borrow_mut().owned.remove(ptr) }
} else {
// By sending an `Entry::Release` to the runtime, we ask the
// runtime to remove this task from the linked list in
// `Tasks::owned`.
//
// If the queue is `None`, then the task was already removed
// from that list in the destructor of `BasicScheduler`. We do
// not do anything in this case for the same reason that
// `Entry::Release` messages are ignored in the remote queue
// drain loop of `BasicScheduler`'s destructor.
if let Some(queue) = self.queue.lock().as_mut() {
queue.push_back(Entry::Release(ptr));
}
self.unpark.unpark();
// Returning `None` here prevents the task plumbing from being
// freed. It is then up to the scheduler through the queue we
// just added to, or its Drop impl to free the task.
None
}
})
// SAFETY: Inserted into the list in bind above.
unsafe { self.owned.remove(task) }
}
fn schedule(&self, task: task::Notified<Self>) {
@ -471,16 +410,13 @@ impl Schedule for Arc<Shared> {
cx.tasks.borrow_mut().queue.push_back(task);
}
_ => {
// If the queue is None, then the runtime has shut down. We
// don't need to do anything with the notification in that case.
let mut guard = self.queue.lock();
if let Some(queue) = guard.as_mut() {
queue.push_back(Entry::Schedule(task));
queue.push_back(RemoteMsg::Schedule(task));
drop(guard);
self.unpark.unpark();
} else {
// The runtime has shut down. We drop the new task
// immediately.
drop(guard);
task.shutdown();
}
}
});

View File

@ -8,7 +8,9 @@ pub(crate) use pool::{spawn_blocking, BlockingPool, Spawner};
mod schedule;
mod shutdown;
pub(crate) mod task;
mod task;
pub(crate) use schedule::NoopSchedule;
pub(crate) use task::BlockingTask;
use crate::runtime::Builder;

View File

@ -9,11 +9,6 @@ use crate::runtime::task::{self, Task};
pub(crate) struct NoopSchedule;
impl task::Schedule for NoopSchedule {
fn bind(_task: Task<Self>) -> NoopSchedule {
// Do nothing w/ the task
NoopSchedule
}
fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
None
}

View File

@ -1,4 +1,4 @@
use crate::runtime::blocking::task::BlockingTask;
use crate::runtime::blocking::{BlockingTask, NoopSchedule};
use crate::runtime::task::{self, JoinHandle};
use crate::runtime::{blocking, context, driver, Spawner};
use crate::util::error::CONTEXT_MISSING_ERROR;
@ -213,7 +213,7 @@ impl Handle {
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let _ = name;
let (task, handle) = task::joinable(fut);
let (task, handle) = task::unowned(fut, NoopSchedule);
let _ = self.blocking_spawner.spawn(task, &self);
handle
}

View File

@ -1,13 +1,12 @@
//! Run-queue structures to support a work-stealing scheduler
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize};
use crate::loom::sync::{Arc, Mutex};
use crate::runtime::task;
use crate::loom::sync::atomic::{AtomicU16, AtomicU32};
use crate::loom::sync::Arc;
use crate::runtime::task::{self, Inject};
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ptr::{self, NonNull};
use std::ptr;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
/// Producer handle. May only be used from a single thread.
@ -18,19 +17,6 @@ pub(super) struct Local<T: 'static> {
/// Consumer handle. May be used from many threads.
pub(super) struct Steal<T: 'static>(Arc<Inner<T>>);
/// Growable, MPMC queue used to inject new tasks into the scheduler and as an
/// overflow queue when the local, fixed-size, array queue overflows.
pub(super) struct Inject<T: 'static> {
/// Pointers to the head and tail of the queue
pointers: Mutex<Pointers>,
/// Number of pending tasks in the queue. This helps prevent unnecessary
/// locking in the hot path.
len: AtomicUsize,
_p: PhantomData<T>,
}
pub(super) struct Inner<T: 'static> {
/// Concurrently updated by many threads.
///
@ -49,24 +35,11 @@ pub(super) struct Inner<T: 'static> {
tail: AtomicU16,
/// Elements
buffer: Box<[UnsafeCell<MaybeUninit<task::Notified<T>>>]>,
}
struct Pointers {
/// True if the queue is closed
is_closed: bool,
/// Linked-list head
head: Option<NonNull<task::Header>>,
/// Linked-list tail
tail: Option<NonNull<task::Header>>,
buffer: Box<[UnsafeCell<MaybeUninit<task::Notified<T>>>; LOCAL_QUEUE_CAPACITY]>,
}
unsafe impl<T> Send for Inner<T> {}
unsafe impl<T> Sync for Inner<T> {}
unsafe impl<T> Send for Inject<T> {}
unsafe impl<T> Sync for Inject<T> {}
#[cfg(not(loom))]
const LOCAL_QUEUE_CAPACITY: usize = 256;
@ -79,6 +52,17 @@ const LOCAL_QUEUE_CAPACITY: usize = 4;
const MASK: usize = LOCAL_QUEUE_CAPACITY - 1;
// Constructing the fixed size array directly is very awkward. The only way to
// do it is to repeat `UnsafeCell::new(MaybeUninit::uninit())` 256 times, as
// the contents are not Copy. The trick with defining a const doesn't work for
// generic types.
fn make_fixed_size<T>(buffer: Box<[T]>) -> Box<[T; LOCAL_QUEUE_CAPACITY]> {
assert_eq!(buffer.len(), LOCAL_QUEUE_CAPACITY);
// safety: We check that the length is correct.
unsafe { Box::from_raw(Box::into_raw(buffer).cast()) }
}
/// Create a new local run-queue
pub(super) fn local<T: 'static>() -> (Steal<T>, Local<T>) {
let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY);
@ -90,7 +74,7 @@ pub(super) fn local<T: 'static>() -> (Steal<T>, Local<T>) {
let inner = Arc::new(Inner {
head: AtomicU32::new(0),
tail: AtomicU16::new(0),
buffer: buffer.into(),
buffer: make_fixed_size(buffer.into_boxed_slice()),
});
let local = Local {
@ -109,10 +93,7 @@ impl<T> Local<T> {
}
/// Pushes a task to the back of the local queue, skipping the LIFO slot.
pub(super) fn push_back(&mut self, mut task: task::Notified<T>, inject: &Inject<T>)
where
T: crate::runtime::task::Schedule,
{
pub(super) fn push_back(&mut self, mut task: task::Notified<T>, inject: &Inject<T>) {
let tail = loop {
let head = self.inner.head.load(Acquire);
let (steal, real) = unpack(head);
@ -125,13 +106,8 @@ impl<T> Local<T> {
break tail;
} else if steal != real {
// Concurrently stealing, this will free up capacity, so only
// push the new task onto the inject queue
//
// If the task fails to be pushed on the injection queue, there
// is nothing to be done at this point as the task cannot be a
// newly spawned task. Shutting down this task is handled by the
// worker shutdown process.
let _ = inject.push(task);
// push the task onto the inject queue
inject.push(task);
return;
} else {
// Push the current task and half of the queue into the
@ -179,9 +155,12 @@ impl<T> Local<T> {
tail: u16,
inject: &Inject<T>,
) -> Result<(), task::Notified<T>> {
const BATCH_LEN: usize = LOCAL_QUEUE_CAPACITY / 2 + 1;
/// How many elements are we taking from the local queue.
///
/// This is one less than the number of tasks pushed to the inject
/// queue as we are also inserting the `task` argument.
const NUM_TASKS_TAKEN: u16 = (LOCAL_QUEUE_CAPACITY / 2) as u16;
let n = (LOCAL_QUEUE_CAPACITY / 2) as u16;
assert_eq!(
tail.wrapping_sub(head) as usize,
LOCAL_QUEUE_CAPACITY,
@ -207,7 +186,10 @@ impl<T> Local<T> {
.head
.compare_exchange(
prev,
pack(head.wrapping_add(n), head.wrapping_add(n)),
pack(
head.wrapping_add(NUM_TASKS_TAKEN),
head.wrapping_add(NUM_TASKS_TAKEN),
),
Release,
Relaxed,
)
@ -219,41 +201,41 @@ impl<T> Local<T> {
return Err(task);
}
// link the tasks
for i in 0..n {
let j = i + 1;
/// An iterator that takes elements out of the run queue.
struct BatchTaskIter<'a, T: 'static> {
buffer: &'a [UnsafeCell<MaybeUninit<task::Notified<T>>>; LOCAL_QUEUE_CAPACITY],
head: u32,
i: u32,
}
impl<'a, T: 'static> Iterator for BatchTaskIter<'a, T> {
type Item = task::Notified<T>;
let i_idx = i.wrapping_add(head) as usize & MASK;
let j_idx = j.wrapping_add(head) as usize & MASK;
#[inline]
fn next(&mut self) -> Option<task::Notified<T>> {
if self.i == u32::from(NUM_TASKS_TAKEN) {
None
} else {
let i_idx = self.i.wrapping_add(self.head) as usize & MASK;
let slot = &self.buffer[i_idx];
// Get the next pointer
let next = if j == n {
// The last task in the local queue being moved
task.header().into()
} else {
// safety: The above CAS prevents a stealer from accessing these
// tasks and we are the only producer.
self.inner.buffer[j_idx].with(|ptr| unsafe {
let value = (*ptr).as_ptr();
(*value).header().into()
})
};
// safety: Our CAS from before has assumed exclusive ownership
// of the task pointers in this range.
let task = slot.with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) });
// safety: the above CAS prevents a stealer from accessing these
// tasks and we are the only producer.
self.inner.buffer[i_idx].with_mut(|ptr| unsafe {
let ptr = (*ptr).as_ptr();
(*ptr).header().set_next(Some(next))
});
self.i += 1;
Some(task)
}
}
}
// safety: the above CAS prevents a stealer from accessing these tasks
// and we are the only producer.
let head = self.inner.buffer[head as usize & MASK]
.with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) });
// Push the tasks onto the inject queue
inject.push_batch(head, task, BATCH_LEN);
// safety: The CAS above ensures that no consumer will look at these
// values again, and we are the only producer.
let batch_iter = BatchTaskIter {
buffer: &*self.inner.buffer,
head: head as u32,
i: 0,
};
inject.push_batch(batch_iter.chain(std::iter::once(task)));
Ok(())
}
@ -473,159 +455,6 @@ impl<T> Inner<T> {
}
}
impl<T: 'static> Inject<T> {
pub(super) fn new() -> Inject<T> {
Inject {
pointers: Mutex::new(Pointers {
is_closed: false,
head: None,
tail: None,
}),
len: AtomicUsize::new(0),
_p: PhantomData,
}
}
pub(super) fn is_empty(&self) -> bool {
self.len() == 0
}
/// Close the injection queue, returns `true` if the queue is open when the
/// transition is made.
pub(super) fn close(&self) -> bool {
let mut p = self.pointers.lock();
if p.is_closed {
return false;
}
p.is_closed = true;
true
}
pub(super) fn is_closed(&self) -> bool {
self.pointers.lock().is_closed
}
pub(super) fn len(&self) -> usize {
self.len.load(Acquire)
}
/// Pushes a value into the queue.
///
/// Returns `Err(task)` if pushing fails due to the queue being shutdown.
/// The caller is expected to call `shutdown()` on the task **if and only
/// if** it is a newly spawned task.
pub(super) fn push(&self, task: task::Notified<T>) -> Result<(), task::Notified<T>>
where
T: crate::runtime::task::Schedule,
{
// Acquire queue lock
let mut p = self.pointers.lock();
if p.is_closed {
return Err(task);
}
// safety: only mutated with the lock held
let len = unsafe { self.len.unsync_load() };
let task = task.into_raw();
// The next pointer should already be null
debug_assert!(get_next(task).is_none());
if let Some(tail) = p.tail {
set_next(tail, Some(task));
} else {
p.head = Some(task);
}
p.tail = Some(task);
self.len.store(len + 1, Release);
Ok(())
}
pub(super) fn push_batch(
&self,
batch_head: task::Notified<T>,
batch_tail: task::Notified<T>,
num: usize,
) {
let batch_head = batch_head.into_raw();
let batch_tail = batch_tail.into_raw();
debug_assert!(get_next(batch_tail).is_none());
let mut p = self.pointers.lock();
if let Some(tail) = p.tail {
set_next(tail, Some(batch_head));
} else {
p.head = Some(batch_head);
}
p.tail = Some(batch_tail);
// Increment the count.
//
// safety: All updates to the len atomic are guarded by the mutex. As
// such, a non-atomic load followed by a store is safe.
let len = unsafe { self.len.unsync_load() };
self.len.store(len + num, Release);
}
pub(super) fn pop(&self) -> Option<task::Notified<T>> {
// Fast path, if len == 0, then there are no values
if self.is_empty() {
return None;
}
let mut p = self.pointers.lock();
// It is possible to hit null here if another thread popped the last
// task between us checking `len` and acquiring the lock.
let task = p.head?;
p.head = get_next(task);
if p.head.is_none() {
p.tail = None;
}
set_next(task, None);
// Decrement the count.
//
// safety: All updates to the len atomic are guarded by the mutex. As
// such, a non-atomic load followed by a store is safe.
self.len
.store(unsafe { self.len.unsync_load() } - 1, Release);
// safety: a `Notified` is pushed into the queue and now it is popped!
Some(unsafe { task::Notified::from_raw(task) })
}
}
impl<T: 'static> Drop for Inject<T> {
fn drop(&mut self) {
if !std::thread::panicking() {
assert!(self.pop().is_none(), "queue not empty");
}
}
}
fn get_next(header: NonNull<task::Header>) -> Option<NonNull<task::Header>> {
unsafe { header.as_ref().queue_next.with(|ptr| *ptr) }
}
fn set_next(header: NonNull<task::Header>, val: Option<NonNull<task::Header>>) {
unsafe {
header.as_ref().set_next(val);
}
}
/// Split the head value into the real head and the index a stealer is working
/// on.
fn unpack(n: u32) -> (u16, u16) {

View File

@ -13,7 +13,7 @@ use crate::future::Future;
use crate::loom::cell::UnsafeCell;
use crate::runtime::task::raw::{self, Vtable};
use crate::runtime::task::state::State;
use crate::runtime::task::{Notified, Schedule, Task};
use crate::runtime::task::Schedule;
use crate::util::linked_list;
use std::pin::Pin;
@ -36,10 +36,6 @@ pub(super) struct Cell<T: Future, S> {
pub(super) trailer: Trailer,
}
pub(super) struct Scheduler<S> {
scheduler: UnsafeCell<Option<S>>,
}
pub(super) struct CoreStage<T: Future> {
stage: UnsafeCell<Stage<T>>,
}
@ -49,7 +45,7 @@ pub(super) struct CoreStage<T: Future> {
/// Holds the future or output, depending on the stage of execution.
pub(super) struct Core<T: Future, S> {
/// Scheduler used to drive this future
pub(super) scheduler: Scheduler<S>,
pub(super) scheduler: S,
/// Either the future or the output
pub(super) stage: CoreStage<T>,
@ -66,9 +62,6 @@ pub(crate) struct Header {
/// Pointer to next task, used with the injection queue
pub(crate) queue_next: UnsafeCell<Option<NonNull<Header>>>,
/// Pointer to the next task in the transfer stack
pub(super) stack_next: UnsafeCell<Option<NonNull<Header>>>,
/// Table of function pointers for executing actions on the task.
pub(super) vtable: &'static Vtable,
@ -96,7 +89,7 @@ pub(super) enum Stage<T: Future> {
impl<T: Future, S: Schedule> Cell<T, S> {
/// Allocates a new task cell, containing the header, trailer, and core
/// structures.
pub(super) fn new(future: T, state: State) -> Box<Cell<T, S>> {
pub(super) fn new(future: T, scheduler: S, state: State) -> Box<Cell<T, S>> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let id = future.id();
Box::new(Cell {
@ -104,15 +97,12 @@ impl<T: Future, S: Schedule> Cell<T, S> {
state,
owned: UnsafeCell::new(linked_list::Pointers::new()),
queue_next: UnsafeCell::new(None),
stack_next: UnsafeCell::new(None),
vtable: raw::vtable::<T, S>(),
#[cfg(all(tokio_unstable, feature = "tracing"))]
id,
},
core: Core {
scheduler: Scheduler {
scheduler: UnsafeCell::new(None),
},
scheduler,
stage: CoreStage {
stage: UnsafeCell::new(Stage::Running(future)),
},
@ -124,92 +114,6 @@ impl<T: Future, S: Schedule> Cell<T, S> {
}
}
impl<S: Schedule> Scheduler<S> {
pub(super) fn with_mut<R>(&self, f: impl FnOnce(*mut Option<S>) -> R) -> R {
self.scheduler.with_mut(f)
}
/// Bind a scheduler to the task.
///
/// This only happens on the first poll and must be preceded by a call to
/// `is_bound` to determine if binding is appropriate or not.
///
/// # Safety
///
/// Binding must not be done concurrently since it will mutate the task
/// core through a shared reference.
pub(super) fn bind_scheduler(&self, task: Task<S>) {
// This function may be called concurrently, but the __first__ time it
// is called, the caller has unique access to this field. All subsequent
// concurrent calls will be via the `Waker`, which will "happens after"
// the first poll.
//
// In other words, it is always safe to read the field and it is safe to
// write to the field when it is `None`.
debug_assert!(!self.is_bound());
// Bind the task to the scheduler
let scheduler = S::bind(task);
// Safety: As `scheduler` is not set, this is the first poll
self.scheduler.with_mut(|ptr| unsafe {
*ptr = Some(scheduler);
});
}
/// Returns true if the task is bound to a scheduler.
pub(super) fn is_bound(&self) -> bool {
// Safety: never called concurrently w/ a mutation.
self.scheduler.with(|ptr| unsafe { (*ptr).is_some() })
}
/// Schedule the future for execution
pub(super) fn schedule(&self, task: Notified<S>) {
self.scheduler.with(|ptr| {
// Safety: Can only be called after initial `poll`, which is the
// only time the field is mutated.
match unsafe { &*ptr } {
Some(scheduler) => scheduler.schedule(task),
None => panic!("no scheduler set"),
}
});
}
/// Schedule the future for execution in the near future, yielding the
/// thread to other tasks.
pub(super) fn yield_now(&self, task: Notified<S>) {
self.scheduler.with(|ptr| {
// Safety: Can only be called after initial `poll`, which is the
// only time the field is mutated.
match unsafe { &*ptr } {
Some(scheduler) => scheduler.yield_now(task),
None => panic!("no scheduler set"),
}
});
}
/// Release the task
///
/// If the `Scheduler` implementation is able to, it returns the `Task`
/// handle immediately. The caller of this function will batch a ref-dec
/// with a state change.
pub(super) fn release(&self, task: Task<S>) -> Option<Task<S>> {
use std::mem::ManuallyDrop;
let task = ManuallyDrop::new(task);
self.scheduler.with(|ptr| {
// Safety: Can only be called after initial `poll`, which is the
// only time the field is mutated.
match unsafe { &*ptr } {
Some(scheduler) => scheduler.release(&*task),
// Task was never polled
None => None,
}
})
}
}
impl<T: Future> CoreStage<T> {
pub(super) fn with_mut<R>(&self, f: impl FnOnce(*mut Stage<T>) -> R) -> R {
self.stage.with_mut(f)
@ -299,13 +203,6 @@ impl<T: Future> CoreStage<T> {
cfg_rt_multi_thread! {
impl Header {
pub(crate) fn shutdown(&self) {
use crate::runtime::task::RawTask;
let task = unsafe { RawTask::from_raw(self.into()) };
task.shutdown();
}
pub(crate) unsafe fn set_next(&self, next: Option<NonNull<Header>>) {
self.queue_next.with_mut(|ptr| *ptr = next);
}

View File

@ -1,7 +1,8 @@
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Mutex;
use crate::util::SyncWrapper;
cfg_rt! {
/// Task failed to execute to completion.
@ -12,7 +13,7 @@ cfg_rt! {
enum Repr {
Cancelled,
Panic(Mutex<Box<dyn Any + Send + 'static>>),
Panic(SyncWrapper<Box<dyn Any + Send + 'static>>),
}
impl JoinError {
@ -24,7 +25,7 @@ impl JoinError {
pub(crate) fn panic(err: Box<dyn Any + Send + 'static>) -> JoinError {
JoinError {
repr: Repr::Panic(Mutex::new(err)),
repr: Repr::Panic(SyncWrapper::new(err)),
}
}
@ -106,7 +107,7 @@ impl JoinError {
/// ```
pub fn try_into_panic(self) -> Result<Box<dyn Any + Send + 'static>, JoinError> {
match self.repr {
Repr::Panic(p) => Ok(p.into_inner().expect("Extracting panic from mutex")),
Repr::Panic(p) => Ok(p.into_inner()),
_ => Err(self),
}
}

View File

@ -1,5 +1,5 @@
use crate::future::Future;
use crate::runtime::task::core::{Cell, Core, CoreStage, Header, Scheduler, Trailer};
use crate::runtime::task::core::{Cell, Core, CoreStage, Header, Trailer};
use crate::runtime::task::state::Snapshot;
use crate::runtime::task::waker::waker_ref;
use crate::runtime::task::{JoinError, Notified, Schedule, Task};
@ -95,7 +95,6 @@ where
// Check causality
self.core().stage.with_mut(drop);
self.core().scheduler.with_mut(drop);
unsafe {
drop(Box::from_raw(self.cell.as_ptr()));
@ -238,7 +237,7 @@ enum TransitionToRunning {
struct SchedulerView<'a, S> {
header: &'a Header,
scheduler: &'a Scheduler<S>,
scheduler: &'a S,
}
impl<'a, S> SchedulerView<'a, S>
@ -252,17 +251,17 @@ where
/// Returns true if the task should be deallocated.
fn transition_to_terminal(&self, is_join_interested: bool) -> bool {
let ref_dec = if self.scheduler.is_bound() {
if let Some(task) = self.scheduler.release(self.to_task()) {
mem::forget(task);
true
} else {
false
}
let me = self.to_task();
let ref_dec = if let Some(task) = self.scheduler.release(&me) {
mem::forget(task);
true
} else {
false
};
mem::forget(me);
// This might deallocate
let snapshot = self
.header
@ -273,16 +272,11 @@ where
}
fn transition_to_running(&self) -> TransitionToRunning {
// If this is the first time the task is polled, the task will be bound
// to the scheduler, in which case the task ref count must be
// incremented.
let is_not_bound = !self.scheduler.is_bound();
// Transition the task to the running state.
//
// A failure to transition here indicates the task has been cancelled
// while in the run queue pending execution.
let snapshot = match self.header.state.transition_to_running(is_not_bound) {
let snapshot = match self.header.state.transition_to_running() {
Ok(snapshot) => snapshot,
Err(_) => {
// The task was shutdown while in the run queue. At this point,
@ -292,20 +286,6 @@ where
}
};
if is_not_bound {
// Ensure the task is bound to a scheduler instance. Since this is
// the first time polling the task, a scheduler instance is pulled
// from the local context and assigned to the task.
//
// The scheduler maintains ownership of the task and responds to
// `wake` calls.
//
// The task reference count has been incremented.
//
// Safety: Since we have unique access to the task so that we can
// safely call `bind_scheduler`.
self.scheduler.bind_scheduler(self.to_task());
}
TransitionToRunning::Ok(snapshot)
}
}

View File

@ -0,0 +1,218 @@
//! Inject queue used to send wakeups to a work-stealing scheduler
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::Mutex;
use crate::runtime::task;
use std::marker::PhantomData;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::{Acquire, Release};
/// Growable, MPMC queue used to inject new tasks into the scheduler and as an
/// overflow queue when the local, fixed-size, array queue overflows.
pub(crate) struct Inject<T: 'static> {
/// Pointers to the head and tail of the queue
pointers: Mutex<Pointers>,
/// Number of pending tasks in the queue. This helps prevent unnecessary
/// locking in the hot path.
len: AtomicUsize,
_p: PhantomData<T>,
}
struct Pointers {
/// True if the queue is closed
is_closed: bool,
/// Linked-list head
head: Option<NonNull<task::Header>>,
/// Linked-list tail
tail: Option<NonNull<task::Header>>,
}
unsafe impl<T> Send for Inject<T> {}
unsafe impl<T> Sync for Inject<T> {}
impl<T: 'static> Inject<T> {
pub(crate) fn new() -> Inject<T> {
Inject {
pointers: Mutex::new(Pointers {
is_closed: false,
head: None,
tail: None,
}),
len: AtomicUsize::new(0),
_p: PhantomData,
}
}
pub(crate) fn is_empty(&self) -> bool {
self.len() == 0
}
/// Close the injection queue, returns `true` if the queue is open when the
/// transition is made.
pub(crate) fn close(&self) -> bool {
let mut p = self.pointers.lock();
if p.is_closed {
return false;
}
p.is_closed = true;
true
}
pub(crate) fn is_closed(&self) -> bool {
self.pointers.lock().is_closed
}
pub(crate) fn len(&self) -> usize {
self.len.load(Acquire)
}
/// Pushes a value into the queue.
///
/// This does nothing if the queue is closed.
pub(crate) fn push(&self, task: task::Notified<T>) {
// Acquire queue lock
let mut p = self.pointers.lock();
if p.is_closed {
return;
}
// safety: only mutated with the lock held
let len = unsafe { self.len.unsync_load() };
let task = task.into_raw();
// The next pointer should already be null
debug_assert!(get_next(task).is_none());
if let Some(tail) = p.tail {
set_next(tail, Some(task));
} else {
p.head = Some(task);
}
p.tail = Some(task);
self.len.store(len + 1, Release);
}
/// Pushes several values into the queue.
///
/// SAFETY: The caller should ensure that we have exclusive access to the
/// `queue_next` field in the provided tasks.
#[inline]
pub(crate) fn push_batch<I>(&self, mut iter: I)
where
I: Iterator<Item = task::Notified<T>>,
{
let first = match iter.next() {
Some(first) => first.into_raw(),
None => return,
};
// Link up all the tasks.
let mut prev = first;
let mut counter = 1;
// We are going to be called with an `std::iter::Chain`, and that
// iterator overrides `for_each` to something that is easier for the
// compiler to optimize than a loop.
iter.map(|next| next.into_raw()).for_each(|next| {
// safety: The caller guarantees exclusive access to this field.
set_next(prev, Some(next));
prev = next;
counter += 1;
});
// Now that the tasks are linked together, insert them into the
// linked list.
self.push_batch_inner(first, prev, counter);
}
/// Insert several tasks that have been linked together into the queue.
///
/// The provided head and tail may be be the same task. In this case, a
/// single task is inserted.
#[inline]
fn push_batch_inner(
&self,
batch_head: NonNull<task::Header>,
batch_tail: NonNull<task::Header>,
num: usize,
) {
debug_assert!(get_next(batch_tail).is_none());
let mut p = self.pointers.lock();
if let Some(tail) = p.tail {
set_next(tail, Some(batch_head));
} else {
p.head = Some(batch_head);
}
p.tail = Some(batch_tail);
// Increment the count.
//
// safety: All updates to the len atomic are guarded by the mutex. As
// such, a non-atomic load followed by a store is safe.
let len = unsafe { self.len.unsync_load() };
self.len.store(len + num, Release);
}
pub(crate) fn pop(&self) -> Option<task::Notified<T>> {
// Fast path, if len == 0, then there are no values
if self.is_empty() {
return None;
}
let mut p = self.pointers.lock();
// It is possible to hit null here if another thread popped the last
// task between us checking `len` and acquiring the lock.
let task = p.head?;
p.head = get_next(task);
if p.head.is_none() {
p.tail = None;
}
set_next(task, None);
// Decrement the count.
//
// safety: All updates to the len atomic are guarded by the mutex. As
// such, a non-atomic load followed by a store is safe.
self.len
.store(unsafe { self.len.unsync_load() } - 1, Release);
// safety: a `Notified` is pushed into the queue and now it is popped!
Some(unsafe { task::Notified::from_raw(task) })
}
}
impl<T: 'static> Drop for Inject<T> {
fn drop(&mut self) {
if !std::thread::panicking() {
assert!(self.pop().is_none(), "queue not empty");
}
}
}
fn get_next(header: NonNull<task::Header>) -> Option<NonNull<task::Header>> {
unsafe { header.as_ref().queue_next.with(|ptr| *ptr) }
}
fn set_next(header: NonNull<task::Header>, val: Option<NonNull<task::Header>>) {
unsafe {
header.as_ref().set_next(val);
}
}

View File

@ -0,0 +1,141 @@
//! This module has containers for storing the tasks spawned on a scheduler. The
//! `OwnedTasks` container is thread-safe but can only store tasks that
//! implement Send. The `LocalOwnedTasks` container is not thread safe, but can
//! store non-Send tasks.
//!
//! The collections can be closed to prevent adding new tasks during shutdown of
//! the scheduler with the collection.
use crate::future::Future;
use crate::loom::sync::Mutex;
use crate::runtime::task::{JoinHandle, Notified, Schedule, Task};
use crate::util::linked_list::{Link, LinkedList};
use std::marker::PhantomData;
pub(crate) struct OwnedTasks<S: 'static> {
inner: Mutex<OwnedTasksInner<S>>,
}
struct OwnedTasksInner<S: 'static> {
list: LinkedList<Task<S>, <Task<S> as Link>::Target>,
closed: bool,
}
pub(crate) struct LocalOwnedTasks<S: 'static> {
list: LinkedList<Task<S>, <Task<S> as Link>::Target>,
closed: bool,
_not_send: PhantomData<*const ()>,
}
impl<S: 'static> OwnedTasks<S> {
pub(crate) fn new() -> Self {
Self {
inner: Mutex::new(OwnedTasksInner {
list: LinkedList::new(),
closed: false,
}),
}
}
/// Bind the provided task to this OwnedTasks instance. This fails if the
/// OwnedTasks has been closed.
pub(crate) fn bind<T>(
&self,
task: T,
scheduler: S,
) -> (JoinHandle<T::Output>, Option<Notified<S>>)
where
S: Schedule,
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let (task, notified, join) = super::new_task(task, scheduler);
let mut lock = self.inner.lock();
if lock.closed {
drop(lock);
drop(task);
notified.shutdown();
(join, None)
} else {
lock.list.push_front(task);
(join, Some(notified))
}
}
pub(crate) fn pop_back(&self) -> Option<Task<S>> {
self.inner.lock().list.pop_back()
}
/// The caller must ensure that if the provided task is stored in a
/// linked list, then it is in this linked list.
pub(crate) unsafe fn remove(&self, task: &Task<S>) -> Option<Task<S>> {
self.inner.lock().list.remove(task.header().into())
}
pub(crate) fn is_empty(&self) -> bool {
self.inner.lock().list.is_empty()
}
#[cfg(feature = "rt-multi-thread")]
pub(crate) fn is_closed(&self) -> bool {
self.inner.lock().closed
}
/// Close the OwnedTasks. This prevents adding new tasks to the collection.
pub(crate) fn close(&self) {
self.inner.lock().closed = true;
}
}
impl<S: 'static> LocalOwnedTasks<S> {
pub(crate) fn new() -> Self {
Self {
list: LinkedList::new(),
closed: false,
_not_send: PhantomData,
}
}
pub(crate) fn bind<T>(
&mut self,
task: T,
scheduler: S,
) -> (JoinHandle<T::Output>, Option<Notified<S>>)
where
S: Schedule,
T: Future + 'static,
T::Output: 'static,
{
let (task, notified, join) = super::new_task(task, scheduler);
if self.closed {
drop(task);
notified.shutdown();
(join, None)
} else {
self.list.push_front(task);
(join, Some(notified))
}
}
pub(crate) fn pop_back(&mut self) -> Option<Task<S>> {
self.list.pop_back()
}
/// The caller must ensure that if the provided task is stored in a
/// linked list, then it is in this linked list.
pub(crate) unsafe fn remove(&mut self, task: &Task<S>) -> Option<Task<S>> {
self.list.remove(task.header().into())
}
pub(crate) fn is_empty(&self) -> bool {
self.list.is_empty()
}
/// Close the LocalOwnedTasks. This prevents adding new tasks to the
/// collection.
pub(crate) fn close(&mut self) {
self.closed = true;
}
}

View File

@ -9,10 +9,18 @@ pub use self::error::JoinError;
mod harness;
use self::harness::Harness;
cfg_rt_multi_thread! {
mod inject;
pub(super) use self::inject::Inject;
}
mod join;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::join::JoinHandle;
mod list;
pub(crate) use self::list::{LocalOwnedTasks, OwnedTasks};
mod raw;
use self::raw::RawTask;
@ -21,11 +29,6 @@ use self::state::State;
mod waker;
cfg_rt_multi_thread! {
mod stack;
pub(crate) use self::stack::TransferStack;
}
use crate::future::Future;
use crate::util::linked_list;
@ -54,19 +57,11 @@ unsafe impl<S: Schedule> Sync for Notified<S> {}
pub(crate) type Result<T> = std::result::Result<T, JoinError>;
pub(crate) trait Schedule: Sync + Sized + 'static {
/// Bind a task to the executor.
///
/// Guaranteed to be called from the thread that called `poll` on the task.
/// The returned `Schedule` instance is associated with the task and is used
/// as `&self` in the other methods on this trait.
fn bind(task: Task<Self>) -> Self;
/// The task has completed work and is ready to be released. The scheduler
/// is free to drop it whenever.
/// should release it immediately and return it. The task module will batch
/// the ref-dec with setting other options.
///
/// If the scheduler can immediately release the task, it should return
/// it as part of the function. This enables the task module to batch
/// the ref-dec with other options.
/// If the scheduler has already released the task, then None is returned.
fn release(&self, task: &Task<Self>) -> Option<Task<Self>>;
/// Schedule the task
@ -80,42 +75,46 @@ pub(crate) trait Schedule: Sync + Sized + 'static {
}
cfg_rt! {
/// Create a new task with an associated join handle
pub(crate) fn joinable<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>)
/// This is the constructor for a new task. Three references to the task are
/// created. The first task reference is usually put into an OwnedTasks
/// immediately. The Notified is sent to the scheduler as an ordinary
/// notification.
fn new_task<T, S>(
task: T,
scheduler: S
) -> (Task<S>, Notified<S>, JoinHandle<T::Output>)
where
T: Future + Send + 'static,
S: Schedule,
{
let raw = RawTask::new::<_, S>(task);
let task = Task {
raw,
_p: PhantomData,
};
let join = JoinHandle::new(raw);
(Notified(task), join)
}
}
cfg_rt! {
/// Create a new `!Send` task with an associated join handle
pub(crate) unsafe fn joinable_local<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>)
where
T: Future + 'static,
S: Schedule,
T::Output: 'static,
{
let raw = RawTask::new::<_, S>(task);
let raw = RawTask::new::<T, S>(task, scheduler);
let task = Task {
raw,
_p: PhantomData,
};
let notified = Notified(Task {
raw,
_p: PhantomData,
});
let join = JoinHandle::new(raw);
(Notified(task), join)
(task, notified, join)
}
/// Create a new task with an associated join handle. This method is used
/// only when the task is not going to be stored in an `OwnedTasks` list.
///
/// Currently only blocking tasks and tests use this method.
pub(crate) fn unowned<T, S>(task: T, scheduler: S) -> (Notified<S>, JoinHandle<T::Output>)
where
S: Schedule,
T: Send + Future + 'static,
T::Output: Send + 'static,
{
let (task, notified, join) = new_task(task, scheduler);
drop(task);
(notified, join)
}
}
@ -137,10 +136,6 @@ cfg_rt_multi_thread! {
pub(crate) unsafe fn from_raw(ptr: NonNull<Header>) -> Notified<S> {
Notified(Task::from_raw(ptr))
}
pub(crate) fn header(&self) -> &Header {
self.0.header()
}
}
impl<S: 'static> Task<S> {

View File

@ -42,12 +42,12 @@ pub(super) fn vtable<T: Future, S: Schedule>() -> &'static Vtable {
}
impl RawTask {
pub(super) fn new<T, S>(task: T) -> RawTask
pub(super) fn new<T, S>(task: T, scheduler: S) -> RawTask
where
T: Future,
S: Schedule,
{
let ptr = Box::into_raw(Cell::<_, S>::new(task, State::new()));
let ptr = Box::into_raw(Cell::<_, S>::new(task, scheduler, State::new()));
let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) };
RawTask { ptr }

View File

@ -1,83 +0,0 @@
use crate::loom::sync::atomic::AtomicPtr;
use crate::runtime::task::{Header, Task};
use std::marker::PhantomData;
use std::ptr::{self, NonNull};
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
/// Concurrent stack of tasks, used to pass ownership of a task from one worker
/// to another.
pub(crate) struct TransferStack<T: 'static> {
head: AtomicPtr<Header>,
_p: PhantomData<T>,
}
impl<T: 'static> TransferStack<T> {
pub(crate) fn new() -> TransferStack<T> {
TransferStack {
head: AtomicPtr::new(ptr::null_mut()),
_p: PhantomData,
}
}
pub(crate) fn push(&self, task: Task<T>) {
let task = task.into_raw();
// We don't care about any memory associated w/ setting the `head`
// field, just the current value.
//
// The compare-exchange creates a release sequence.
let mut curr = self.head.load(Relaxed);
loop {
unsafe {
task.as_ref()
.stack_next
.with_mut(|ptr| *ptr = NonNull::new(curr))
};
let res = self
.head
.compare_exchange(curr, task.as_ptr() as *mut _, Release, Relaxed);
match res {
Ok(_) => return,
Err(actual) => {
curr = actual;
}
}
}
}
pub(crate) fn drain(&self) -> impl Iterator<Item = Task<T>> {
struct Iter<T: 'static>(Option<NonNull<Header>>, PhantomData<T>);
impl<T: 'static> Iterator for Iter<T> {
type Item = Task<T>;
fn next(&mut self) -> Option<Task<T>> {
let task = self.0?;
// Move the cursor forward
self.0 = unsafe { task.as_ref().stack_next.with(|ptr| *ptr) };
// Return the task
unsafe { Some(Task::from_raw(task)) }
}
}
impl<T: 'static> Drop for Iter<T> {
fn drop(&mut self) {
use std::process;
if self.0.is_some() {
// we have bugs
process::abort();
}
}
}
let ptr = self.head.swap(ptr::null_mut(), Acquire);
Iter(NonNull::new(ptr), PhantomData)
}
}

View File

@ -54,22 +54,23 @@ const REF_ONE: usize = 1 << REF_COUNT_SHIFT;
/// State a task is initialized with
///
/// A task is initialized with two references: one for the scheduler and one for
/// the `JoinHandle`. As the task starts with a `JoinHandle`, `JOIN_INTEREST` is
/// set. A new task is immediately pushed into the run queue for execution and
/// starts with the `NOTIFIED` flag set.
const INITIAL_STATE: usize = (REF_ONE * 2) | JOIN_INTEREST | NOTIFIED;
/// A task is initialized with three references:
///
/// * A reference that will be stored in an OwnedTasks or LocalOwnedTasks.
/// * A reference that will be sent to the scheduler as an ordinary notification.
/// * A reference for the JoinHandle.
///
/// As the task starts with a `JoinHandle`, `JOIN_INTEREST` is set.
/// As the task starts with a `Notified`, `NOTIFIED` is set.
const INITIAL_STATE: usize = (REF_ONE * 3) | JOIN_INTEREST | NOTIFIED;
/// All transitions are performed via RMW operations. This establishes an
/// unambiguous modification order.
impl State {
/// Return a task's initial state
pub(super) fn new() -> State {
// A task is initialized with three references: one for the scheduler,
// one for the `JoinHandle`, one for the task handle made available in
// release. As the task starts with a `JoinHandle`, `JOIN_INTEREST` is
// set. A new task is immediately pushed into the run queue for
// execution and starts with the `NOTIFIED` flag set.
// The raw task returned by this method has a ref-count of three. See
// the comment on INITIAL_STATE for more.
State {
val: AtomicUsize::new(INITIAL_STATE),
}
@ -82,10 +83,8 @@ impl State {
/// Attempt to transition the lifecycle to `Running`.
///
/// If `ref_inc` is set, the reference count is also incremented.
///
/// The `NOTIFIED` bit is always unset.
pub(super) fn transition_to_running(&self, ref_inc: bool) -> UpdateResult {
pub(super) fn transition_to_running(&self) -> UpdateResult {
self.fetch_update(|curr| {
assert!(curr.is_notified());
@ -95,10 +94,6 @@ impl State {
return None;
}
if ref_inc {
next.ref_inc();
}
next.set_running();
next.unset_notified();
Some(next)

View File

@ -1,5 +1,6 @@
use crate::runtime::blocking::NoopSchedule;
use crate::runtime::queue;
use crate::runtime::task::{self, Schedule, Task};
use crate::runtime::task::Inject;
use loom::thread;
@ -7,7 +8,7 @@ use loom::thread;
fn basic() {
loom::model(|| {
let (steal, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
let th = thread::spawn(move || {
let (_, mut local) = queue::local();
@ -30,7 +31,7 @@ fn basic() {
for _ in 0..2 {
for _ in 0..2 {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
}
@ -39,7 +40,7 @@ fn basic() {
}
// Push another task
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
while local.pop().is_some() {
@ -61,7 +62,7 @@ fn basic() {
fn steal_overflow() {
loom::model(|| {
let (steal, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
let th = thread::spawn(move || {
let (_, mut local) = queue::local();
@ -81,7 +82,7 @@ fn steal_overflow() {
let mut n = 0;
// push a task, pop a task
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
if local.pop().is_some() {
@ -89,7 +90,7 @@ fn steal_overflow() {
}
for _ in 0..6 {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
}
@ -111,7 +112,7 @@ fn steal_overflow() {
fn multi_stealer() {
const NUM_TASKS: usize = 5;
fn steal_tasks(steal: queue::Steal<Runtime>) -> usize {
fn steal_tasks(steal: queue::Steal<NoopSchedule>) -> usize {
let (_, mut local) = queue::local();
if steal.steal_into(&mut local).is_none() {
@ -129,11 +130,11 @@ fn multi_stealer() {
loom::model(|| {
let (steal, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
// Push work
for _ in 0..NUM_TASKS {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
}
@ -166,14 +167,14 @@ fn chained_steal() {
loom::model(|| {
let (s1, mut l1) = queue::local();
let (s2, mut l2) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
// Load up some tasks
for _ in 0..4 {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
l1.push_back(task, &inject);
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
l2.push_back(task, &inject);
}
@ -197,20 +198,3 @@ fn chained_steal() {
while inject.pop().is_some() {}
});
}
struct Runtime;
impl Schedule for Runtime {
fn bind(task: Task<Self>) -> Runtime {
std::mem::forget(task);
Runtime
}
fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
None
}
fn schedule(&self, _task: task::Notified<Self>) {
unreachable!();
}
}

View File

@ -1,21 +1,30 @@
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
use crate::runtime::task::joinable;
use self::unowned_wrapper::unowned;
#[cfg(all(tokio_unstable, feature = "tracing"))]
use self::joinable_wrapper::joinable;
mod unowned_wrapper {
use crate::runtime::blocking::NoopSchedule;
use crate::runtime::task::{JoinHandle, Notified};
#[cfg(all(tokio_unstable, feature = "tracing"))]
mod joinable_wrapper {
use crate::runtime::task::{JoinHandle, Notified, Schedule};
use tracing::Instrument;
pub(crate) fn joinable<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>)
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) fn unowned<T>(task: T) -> (Notified<NoopSchedule>, JoinHandle<T::Output>)
where
T: std::future::Future + Send + 'static,
S: Schedule,
T::Output: Send + 'static,
{
use tracing::Instrument;
let span = tracing::trace_span!("test_span");
crate::runtime::task::joinable(task.instrument(span))
let task = task.instrument(span);
let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule);
(task, handle)
}
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
pub(crate) fn unowned<T>(task: T) -> (Notified<NoopSchedule>, JoinHandle<T::Output>)
where
T: std::future::Future + Send + 'static,
T::Output: Send + 'static,
{
let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule);
(task, handle)
}
}

View File

@ -1,5 +1,5 @@
use crate::runtime::queue;
use crate::runtime::task::{self, Schedule, Task};
use crate::runtime::task::{self, Inject, Schedule, Task};
use std::thread;
use std::time::Duration;
@ -7,10 +7,10 @@ use std::time::Duration;
#[test]
fn fits_256() {
let (_, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
for _ in 0..256 {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
}
@ -22,10 +22,10 @@ fn fits_256() {
#[test]
fn overflow() {
let (_, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
for _ in 0..257 {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
}
@ -46,10 +46,10 @@ fn overflow() {
fn steal_batch() {
let (steal1, mut local1) = queue::local();
let (_, mut local2) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
for _ in 0..4 {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local1.push_back(task, &inject);
}
@ -78,7 +78,7 @@ fn stress1() {
for _ in 0..NUM_ITER {
let (steal, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
let th = thread::spawn(move || {
let (_, mut local) = queue::local();
@ -103,7 +103,7 @@ fn stress1() {
for _ in 0..NUM_LOCAL {
for _ in 0..NUM_PUSH {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
}
@ -134,7 +134,7 @@ fn stress2() {
for _ in 0..NUM_ITER {
let (steal, mut local) = queue::local();
let inject = queue::Inject::new();
let inject = Inject::new();
let th = thread::spawn(move || {
let (_, mut local) = queue::local();
@ -158,7 +158,7 @@ fn stress2() {
let mut num_pop = 0;
for i in 0..NUM_TASKS {
let (task, _) = super::joinable::<_, Runtime>(async {});
let (task, _) = super::unowned(async {});
local.push_back(task, &inject);
if i % 128 == 0 && local.pop().is_some() {
@ -187,11 +187,6 @@ fn stress2() {
struct Runtime;
impl Schedule for Runtime {
fn bind(task: Task<Self>) -> Runtime {
std::mem::forget(task);
Runtime
}
fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
None
}

View File

@ -1,44 +1,185 @@
use crate::runtime::task::{self, Schedule, Task};
use crate::util::linked_list::{Link, LinkedList};
use crate::runtime::blocking::NoopSchedule;
use crate::runtime::task::{self, unowned, JoinHandle, OwnedTasks, Schedule, Task};
use crate::util::TryLock;
use std::collections::VecDeque;
use std::future::Future;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
struct AssertDropHandle {
is_dropped: Arc<AtomicBool>,
}
impl AssertDropHandle {
#[track_caller]
fn assert_dropped(&self) {
assert!(self.is_dropped.load(Ordering::SeqCst));
}
#[track_caller]
fn assert_not_dropped(&self) {
assert!(!self.is_dropped.load(Ordering::SeqCst));
}
}
struct AssertDrop {
is_dropped: Arc<AtomicBool>,
}
impl AssertDrop {
fn new() -> (Self, AssertDropHandle) {
let shared = Arc::new(AtomicBool::new(false));
(
AssertDrop {
is_dropped: shared.clone(),
},
AssertDropHandle {
is_dropped: shared.clone(),
},
)
}
}
impl Drop for AssertDrop {
fn drop(&mut self) {
self.is_dropped.store(true, Ordering::SeqCst);
}
}
// A Notified does not shut down on drop, but it is dropped once the ref-count
// hits zero.
#[test]
fn create_drop() {
let _ = super::joinable::<_, Runtime>(async { unreachable!() });
fn create_drop1() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
);
drop(notified);
handle.assert_not_dropped();
drop(join);
handle.assert_dropped();
}
#[test]
fn create_drop2() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
);
drop(join);
handle.assert_not_dropped();
drop(notified);
handle.assert_dropped();
}
// Shutting down through Notified works
#[test]
fn create_shutdown1() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
);
drop(join);
handle.assert_not_dropped();
notified.shutdown();
handle.assert_dropped();
}
#[test]
fn create_shutdown2() {
let (ad, handle) = AssertDrop::new();
let (notified, join) = unowned(
async {
drop(ad);
unreachable!()
},
NoopSchedule,
);
handle.assert_not_dropped();
notified.shutdown();
handle.assert_dropped();
drop(join);
}
#[test]
fn schedule() {
with(|rt| {
let (task, _) = super::joinable(async {
rt.spawn(async {
crate::task::yield_now().await;
});
rt.schedule(task);
assert_eq!(2, rt.tick());
rt.shutdown();
})
}
#[test]
fn shutdown() {
with(|rt| {
let (task, _) = super::joinable(async {
rt.spawn(async {
loop {
crate::task::yield_now().await;
}
});
rt.schedule(task);
rt.tick_max(1);
rt.shutdown();
})
}
#[test]
fn shutdown_immediately() {
with(|rt| {
rt.spawn(async {
loop {
crate::task::yield_now().await;
}
});
rt.shutdown();
})
}
#[test]
fn spawn_during_shutdown() {
static DID_SPAWN: AtomicBool = AtomicBool::new(false);
struct SpawnOnDrop(Runtime);
impl Drop for SpawnOnDrop {
fn drop(&mut self) {
DID_SPAWN.store(true, Ordering::SeqCst);
self.0.spawn(async {});
}
}
with(|rt| {
let rt2 = rt.clone();
rt.spawn(async move {
let _spawn_on_drop = SpawnOnDrop(rt2);
loop {
crate::task::yield_now().await;
}
});
rt.tick_max(1);
rt.shutdown();
});
assert!(DID_SPAWN.load(Ordering::SeqCst));
}
fn with(f: impl FnOnce(Runtime)) {
struct Reset;
@ -51,10 +192,9 @@ fn with(f: impl FnOnce(Runtime)) {
let _reset = Reset;
let rt = Runtime(Arc::new(Inner {
released: task::TransferStack::new(),
owned: OwnedTasks::new(),
core: TryLock::new(Core {
queue: VecDeque::new(),
tasks: LinkedList::new(),
}),
}));
@ -66,18 +206,31 @@ fn with(f: impl FnOnce(Runtime)) {
struct Runtime(Arc<Inner>);
struct Inner {
released: task::TransferStack<Runtime>,
core: TryLock<Core>,
owned: OwnedTasks<Runtime>,
}
struct Core {
queue: VecDeque<task::Notified<Runtime>>,
tasks: LinkedList<Task<Runtime>, <Task<Runtime> as Link>::Target>,
}
static CURRENT: TryLock<Option<Runtime>> = TryLock::new(None);
impl Runtime {
fn spawn<T>(&self, future: T) -> JoinHandle<T::Output>
where
T: 'static + Send + Future,
T::Output: 'static + Send,
{
let (handle, notified) = self.0.owned.bind(future, self.clone());
if let Some(notified) = notified {
self.schedule(notified);
}
handle
}
fn tick(&self) -> usize {
self.tick_max(usize::MAX)
}
@ -91,8 +244,6 @@ impl Runtime {
task.run();
}
self.0.maintenance();
n
}
@ -107,7 +258,8 @@ impl Runtime {
fn shutdown(&self) {
let mut core = self.0.core.try_lock().unwrap();
for task in core.tasks.iter() {
self.0.owned.close();
while let Some(task) = self.0.owned.pop_back() {
task.shutdown();
}
@ -117,40 +269,14 @@ impl Runtime {
drop(core);
while !self.0.core.try_lock().unwrap().tasks.is_empty() {
self.0.maintenance();
}
}
}
impl Inner {
fn maintenance(&self) {
use std::mem::ManuallyDrop;
for task in self.released.drain() {
let task = ManuallyDrop::new(task);
// safety: see worker.rs
unsafe {
let ptr = task.header().into();
self.core.try_lock().unwrap().tasks.remove(ptr);
}
}
assert!(self.0.owned.is_empty());
}
}
impl Schedule for Runtime {
fn bind(task: Task<Self>) -> Runtime {
let rt = CURRENT.try_lock().unwrap().as_ref().unwrap().clone();
rt.0.core.try_lock().unwrap().tasks.push_front(task);
rt
}
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
// safety: copying worker.rs
let task = unsafe { Task::from_raw(task.header().into()) };
self.0.released.push(task);
None
unsafe { self.0.owned.remove(task) }
}
fn schedule(&self, task: task::Notified<Self>) {

View File

@ -12,7 +12,7 @@ pub(crate) use worker::Launch;
pub(crate) use worker::block_in_place;
use crate::loom::sync::Arc;
use crate::runtime::task::{self, JoinHandle};
use crate::runtime::task::JoinHandle;
use crate::runtime::Parker;
use std::fmt;
@ -30,7 +30,7 @@ pub(crate) struct ThreadPool {
///
/// The `Spawner` handle is *only* used for spawning new futures. It does not
/// impact the lifecycle of the thread pool in any way. The thread pool may
/// shutdown while there are outstanding `Spawner` instances.
/// shut down while there are outstanding `Spawner` instances.
///
/// `Spawner` instances are obtained by calling [`ThreadPool::spawner`].
///
@ -93,15 +93,7 @@ impl Spawner {
F: crate::future::Future + Send + 'static,
F::Output: Send + 'static,
{
let (task, handle) = task::joinable(future);
if let Err(task) = self.shared.schedule(task, false) {
// The newly spawned task could not be scheduled because the runtime
// is shutting down. The task must be explicitly shutdown at this point.
task.shutdown();
}
handle
worker::Shared::bind_new_task(&self.shared, future)
}
pub(crate) fn shutdown(&mut self) {

View File

@ -3,17 +3,70 @@
//! run queue and other state. When `block_in_place` is called, the worker's
//! "core" is handed off to a new thread allowing the scheduler to continue to
//! make progress while the originating thread blocks.
//!
//! # Shutdown
//!
//! Shutting down the runtime involves the following steps:
//!
//! 1. The Shared::close method is called. This closes the inject queue and
//! OwnedTasks instance and wakes up all worker threads.
//!
//! 2. Each worker thread observes the close signal next time it runs
//! Core::maintenance by checking whether the inject queue is closed.
//! The Core::is_shutdown flag is set to true.
//!
//! 3. The worker thread calls `pre_shutdown` in parallel. Here, the worker
//! will keep removing tasks from OwnedTasks until it is empty. No new
//! tasks can be pushed to the OwnedTasks during or after this step as it
//! was closed in step 1.
//!
//! 5. The workers call Shared::shutdown to enter the single-threaded phase of
//! shutdown. These calls will push their core to Shared::shutdown_cores,
//! and the last thread to push its core will finish the shutdown procedure.
//!
//! 6. The local run queue of each core is emptied, then the inject queue is
//! emptied.
//!
//! At this point, shutdown has completed. It is not possible for any of the
//! collections to contain any tasks at this point, as each collection was
//! closed first, then emptied afterwards.
//!
//! ## Spawns during shutdown
//!
//! When spawning tasks during shutdown, there are two cases:
//!
//! * The spawner observes the OwnedTasks being open, and the inject queue is
//! closed.
//! * The spawner observes the OwnedTasks being closed and doesn't check the
//! inject queue.
//!
//! The first case can only happen if the OwnedTasks::bind call happens before
//! or during step 1 of shutdown. In this case, the runtime will clean up the
//! task in step 3 of shutdown.
//!
//! In the latter case, the task was not spawned and the task is immediately
//! cancelled by the spawner.
//!
//! The correctness of shutdown requires both the inject queue and OwnedTasks
//! collection to have a closed bit. With a close bit on only the inject queue,
//! spawning could run in to a situation where a task is successfully bound long
//! after the runtime has shut down. With a close bit on only the OwnedTasks,
//! the first spawning situation could result in the notification being pushed
//! to the inject queue after step 6 of shutdown, which would leave a task in
//! the inject queue indefinitely. This would be a ref-count cycle and a memory
//! leak.
use crate::coop;
use crate::future::Future;
use crate::loom::rand::seed;
use crate::loom::sync::{Arc, Mutex};
use crate::park::{Park, Unpark};
use crate::runtime;
use crate::runtime::enter::EnterContext;
use crate::runtime::park::{Parker, Unparker};
use crate::runtime::task::{Inject, JoinHandle, OwnedTasks};
use crate::runtime::thread_pool::{AtomicCell, Idle};
use crate::runtime::{queue, task};
use crate::util::linked_list::{Link, LinkedList};
use crate::util::FastRand;
use std::cell::RefCell;
@ -44,7 +97,7 @@ struct Core {
lifo_slot: Option<Notified>,
/// The worker-local run queue.
run_queue: queue::Local<Arc<Worker>>,
run_queue: queue::Local<Arc<Shared>>,
/// True if the worker is currently searching for more work. Searching
/// involves attempting to steal from other workers.
@ -53,9 +106,6 @@ struct Core {
/// True if the scheduler is being shutdown
is_shutdown: bool,
/// Tasks owned by the core
tasks: LinkedList<Task, <Task as Link>::Target>,
/// Parker
///
/// Stored in an `Option` as the parker is added / removed to make the
@ -73,11 +123,14 @@ pub(super) struct Shared {
remotes: Box<[Remote]>,
/// Submit work to the scheduler while **not** currently on a worker thread.
inject: queue::Inject<Arc<Worker>>,
inject: Inject<Arc<Shared>>,
/// Coordinates idle workers
idle: Idle,
/// Collection of all active tasks spawned onto this executor.
owned: OwnedTasks<Arc<Shared>>,
/// Cores that have observed the shutdown signal
///
/// The core is **not** placed back in the worker to avoid it from being
@ -89,11 +142,7 @@ pub(super) struct Shared {
/// Used to communicate with a worker from other threads.
struct Remote {
/// Steal tasks from this worker.
steal: queue::Steal<Arc<Worker>>,
/// Transfers tasks to be released. Any worker pushes tasks, only the owning
/// worker pops.
pending_drop: task::TransferStack<Arc<Worker>>,
steal: queue::Steal<Arc<Shared>>,
/// Unparks the associated worker thread
unpark: Unparker,
@ -117,10 +166,10 @@ pub(crate) struct Launch(Vec<Arc<Worker>>);
type RunResult = Result<Box<Core>, ()>;
/// A task handle
type Task = task::Task<Arc<Worker>>;
type Task = task::Task<Arc<Shared>>;
/// A notified task handle
type Notified = task::Notified<Arc<Worker>>;
type Notified = task::Notified<Arc<Shared>>;
// Tracks thread-local state
scoped_thread_local!(static CURRENT: Context);
@ -142,22 +191,18 @@ pub(super) fn create(size: usize, park: Parker) -> (Arc<Shared>, Launch) {
run_queue,
is_searching: false,
is_shutdown: false,
tasks: LinkedList::new(),
park: Some(park),
rand: FastRand::new(seed()),
}));
remotes.push(Remote {
steal,
pending_drop: task::TransferStack::new(),
unpark,
});
remotes.push(Remote { steal, unpark });
}
let shared = Arc::new(Shared {
remotes: remotes.into_boxed_slice(),
inject: queue::Inject::new(),
inject: Inject::new(),
idle: Idle::new(size),
owned: OwnedTasks::new(),
shutdown_cores: Mutex::new(vec![]),
});
@ -203,18 +248,20 @@ where
CURRENT.with(|maybe_cx| {
match (crate::runtime::enter::context(), maybe_cx.is_some()) {
(EnterContext::Entered { .. }, true) => {
// We are on a thread pool runtime thread, so we just need to set up blocking.
// We are on a thread pool runtime thread, so we just need to
// set up blocking.
had_entered = true;
}
(EnterContext::Entered { allow_blocking }, false) => {
// We are on an executor, but _not_ on the thread pool.
// That is _only_ okay if we are in a thread pool runtime's block_on method:
// We are on an executor, but _not_ on the thread pool. That is
// _only_ okay if we are in a thread pool runtime's block_on
// method:
if allow_blocking {
had_entered = true;
return;
} else {
// This probably means we are on the basic_scheduler or in a LocalSet,
// where it is _not_ okay to block.
// This probably means we are on the basic_scheduler or in a
// LocalSet, where it is _not_ okay to block.
panic!("can call blocking only when running on the multi-threaded runtime");
}
}
@ -538,42 +585,28 @@ impl Core {
true
}
/// Runs maintenance work such as free pending tasks and check the pool's
/// state.
/// Runs maintenance work such as checking the pool's state.
fn maintenance(&mut self, worker: &Worker) {
self.drain_pending_drop(worker);
if !self.is_shutdown {
// Check if the scheduler has been shutdown
self.is_shutdown = worker.inject().is_closed();
}
}
// Signals all tasks to shut down, and waits for them to complete. Must run
// before we enter the single-threaded phase of shutdown processing.
/// Signals all tasks to shut down, and waits for them to complete. Must run
/// before we enter the single-threaded phase of shutdown processing.
fn pre_shutdown(&mut self, worker: &Worker) {
// The OwnedTasks was closed in Shared::close.
debug_assert!(worker.shared.owned.is_closed());
// Signal to all tasks to shut down.
for header in self.tasks.iter() {
while let Some(header) = worker.shared.owned.pop_back() {
header.shutdown();
}
loop {
self.drain_pending_drop(worker);
if self.tasks.is_empty() {
break;
}
// Wait until signalled
let park = self.park.as_mut().expect("park missing");
park.park().expect("park failed");
}
}
// Shutdown the core
/// Shutdown the core
fn shutdown(&mut self) {
assert!(self.tasks.is_empty());
// Take the core
let mut park = self.park.take().expect("park missing");
@ -582,149 +615,46 @@ impl Core {
park.shutdown();
}
fn drain_pending_drop(&mut self, worker: &Worker) {
use std::mem::ManuallyDrop;
for task in worker.remote().pending_drop.drain() {
let task = ManuallyDrop::new(task);
// safety: tasks are only pushed into the `pending_drop` stacks that
// are associated with the list they are inserted into. When a task
// is pushed into `pending_drop`, the ref-inc is skipped, so we must
// not ref-dec here.
//
// See `bind` and `release` implementations.
unsafe {
self.tasks.remove(task.header().into());
}
}
}
}
impl Worker {
/// Returns a reference to the scheduler's injection queue
fn inject(&self) -> &queue::Inject<Arc<Worker>> {
fn inject(&self) -> &Inject<Arc<Shared>> {
&self.shared.inject
}
/// Return a reference to this worker's remote data
fn remote(&self) -> &Remote {
&self.shared.remotes[self.index]
}
fn eq(&self, other: &Worker) -> bool {
self.shared.ptr_eq(&other.shared) && self.index == other.index
}
}
impl task::Schedule for Arc<Worker> {
fn bind(task: Task) -> Arc<Worker> {
CURRENT.with(|maybe_cx| {
let cx = maybe_cx.expect("scheduler context missing");
// Track the task
cx.core
.borrow_mut()
.as_mut()
.expect("scheduler core missing")
.tasks
.push_front(task);
// Return a clone of the worker
cx.worker.clone()
})
}
impl task::Schedule for Arc<Shared> {
fn release(&self, task: &Task) -> Option<Task> {
use std::ptr::NonNull;
enum Immediate {
// Task has been synchronously removed from the Core owned by the
// current thread
Removed(Option<Task>),
// Task is owned by another thread, so we need to notify it to clean
// up the task later.
MaybeRemote,
}
let immediate = CURRENT.with(|maybe_cx| {
let cx = match maybe_cx {
Some(cx) => cx,
None => return Immediate::MaybeRemote,
};
if !self.eq(&cx.worker) {
// Task owned by another core, so we need to notify it.
return Immediate::MaybeRemote;
}
let mut maybe_core = cx.core.borrow_mut();
if let Some(core) = &mut *maybe_core {
// Directly remove the task
//
// safety: the task is inserted in the list in `bind`.
unsafe {
let ptr = NonNull::from(task.header());
return Immediate::Removed(core.tasks.remove(ptr));
}
}
Immediate::MaybeRemote
});
// Checks if we were called from within a worker, allowing for immediate
// removal of a scheduled task. Else we have to go through the slower
// process below where we remotely mark a task as dropped.
match immediate {
Immediate::Removed(task) => return task,
Immediate::MaybeRemote => (),
};
// Track the task to be released by the worker that owns it
//
// Safety: We get a new handle without incrementing the ref-count.
// A ref-count is held by the "owned" linked list and it is only
// ever removed from that list as part of the release process: this
// method or popping the task from `pending_drop`. Thus, we can rely
// on the ref-count held by the linked-list to keep the memory
// alive.
//
// When the task is removed from the stack, it is forgotten instead
// of dropped.
let task = unsafe { Task::from_raw(task.header().into()) };
self.remote().pending_drop.push(task);
// The worker core has been handed off to another thread. In the
// event that the scheduler is currently shutting down, the thread
// that owns the task may be waiting on the release to complete
// shutdown.
if self.inject().is_closed() {
self.remote().unpark.unpark();
}
None
// SAFETY: Inserted into owned in bind.
unsafe { self.owned.remove(task) }
}
fn schedule(&self, task: Notified) {
// Because this is not a newly spawned task, if scheduling fails due to
// the runtime shutting down, there is no special work that must happen
// here.
let _ = self.shared.schedule(task, false);
(**self).schedule(task, false);
}
fn yield_now(&self, task: Notified) {
// Because this is not a newly spawned task, if scheduling fails due to
// the runtime shutting down, there is no special work that must happen
// here.
let _ = self.shared.schedule(task, true);
(**self).schedule(task, true);
}
}
impl Shared {
pub(super) fn schedule(&self, task: Notified, is_yield: bool) -> Result<(), Notified> {
pub(super) fn bind_new_task<T>(me: &Arc<Self>, future: T) -> JoinHandle<T::Output>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let (handle, notified) = me.owned.bind(future, me.clone());
if let Some(notified) = notified {
me.schedule(notified, false);
}
handle
}
pub(super) fn schedule(&self, task: Notified, is_yield: bool) {
CURRENT.with(|maybe_cx| {
if let Some(cx) = maybe_cx {
// Make sure the task is part of the **current** scheduler.
@ -732,15 +662,14 @@ impl Shared {
// And the current thread still holds a core
if let Some(core) = cx.core.borrow_mut().as_mut() {
self.schedule_local(core, task, is_yield);
return Ok(());
return;
}
}
}
// Otherwise, use the inject queue
self.inject.push(task)?;
// Otherwise, use the inject queue.
self.inject.push(task);
self.notify_parked();
Ok(())
})
}
@ -776,6 +705,7 @@ impl Shared {
pub(super) fn close(&self) {
if self.inject.close() {
self.owned.close();
self.notify_all();
}
}
@ -825,6 +755,8 @@ impl Shared {
return;
}
debug_assert!(self.owned.is_empty());
for mut core in cores.drain(..) {
core.shutdown();
}

View File

@ -573,6 +573,32 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> {
marker: marker::PhantomData,
})
}
/// Returns a reference to the original `Mutex`.
///
/// ```
/// use tokio::sync::{Mutex, MutexGuard};
///
/// async fn unlock_and_relock<'l>(guard: MutexGuard<'l, u32>) -> MutexGuard<'l, u32> {
/// println!("1. contains: {:?}", *guard);
/// let mutex = MutexGuard::mutex(&guard);
/// drop(guard);
/// let guard = mutex.lock().await;
/// println!("2. contains: {:?}", *guard);
/// guard
/// }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// # let mutex = Mutex::new(0u32);
/// # let guard = mutex.lock().await;
/// # unlock_and_relock(guard).await;
/// # }
/// ```
#[inline]
pub fn mutex(this: &Self) -> &'a Mutex<T> {
this.lock
}
}
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
@ -608,6 +634,35 @@ impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
// === impl OwnedMutexGuard ===
impl<T: ?Sized> OwnedMutexGuard<T> {
/// Returns a reference to the original `Arc<Mutex>`.
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{Mutex, OwnedMutexGuard};
///
/// async fn unlock_and_relock(guard: OwnedMutexGuard<u32>) -> OwnedMutexGuard<u32> {
/// println!("1. contains: {:?}", *guard);
/// let mutex: Arc<Mutex<u32>> = OwnedMutexGuard::mutex(&guard).clone();
/// drop(guard);
/// let guard = mutex.lock_owned().await;
/// println!("2. contains: {:?}", *guard);
/// guard
/// }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// # let mutex = Arc::new(Mutex::new(0u32));
/// # let guard = mutex.lock_owned().await;
/// # unlock_and_relock(guard).await;
/// # }
/// ```
#[inline]
pub fn mutex(this: &Self) -> &Arc<Mutex<T>> {
&this.lock
}
}
impl<T: ?Sized> Drop for OwnedMutexGuard<T> {
fn drop(&mut self) {
self.lock.s.release(1)

View File

@ -1,4 +1,4 @@
use super::Semaphore;
use super::{Semaphore, SemaphorePermit, TryAcquireError};
use crate::loom::cell::UnsafeCell;
use std::error::Error;
use std::fmt;
@ -8,15 +8,30 @@ use std::ops::Drop;
use std::ptr;
use std::sync::atomic::{AtomicBool, Ordering};
/// A thread-safe cell which can be written to only once.
// This file contains an implementation of an OnceCell. The principle
// behind the safety the of the cell is that any thread with an `&OnceCell` may
// access the `value` field according the following rules:
//
// 1. When `value_set` is false, the `value` field may be modified by the
// thread holding the permit on the semaphore.
// 2. When `value_set` is true, the `value` field may be accessed immutably by
// any thread.
//
// It is an invariant that if the semaphore is closed, then `value_set` is true.
// The reverse does not necessarily hold — but if not, the semaphore may not
// have any available permits.
//
// A thread with a `&mut OnceCell` may modify the value in any way it wants as
// long as the invariants are upheld.
/// A thread-safe cell that can be written to only once.
///
/// Provides the functionality to either set the value, in case `OnceCell`
/// is uninitialized, or get the already initialized value by using an async
/// function via [`OnceCell::get_or_init`].
///
/// [`OnceCell::get_or_init`]: crate::sync::OnceCell::get_or_init
/// A `OnceCell` is typically used for global variables that need to be
/// initialized once on first use, but need no further changes. The `OnceCell`
/// in Tokio allows the initialization procedure to be asynchronous.
///
/// # Examples
///
/// ```
/// use tokio::sync::OnceCell;
///
@ -28,8 +43,28 @@ use std::sync::atomic::{AtomicBool, Ordering};
///
/// #[tokio::main]
/// async fn main() {
/// let result1 = ONCE.get_or_init(some_computation).await;
/// assert_eq!(*result1, 2);
/// let result = ONCE.get_or_init(some_computation).await;
/// assert_eq!(*result, 2);
/// }
/// ```
///
/// It is often useful to write a wrapper method for accessing the value.
///
/// ```
/// use tokio::sync::OnceCell;
///
/// static ONCE: OnceCell<u32> = OnceCell::const_new();
///
/// async fn get_global_integer() -> &'static u32 {
/// ONCE.get_or_init(|| async {
/// 1 + 1
/// }).await
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let result = get_global_integer().await;
/// assert_eq!(*result, 2);
/// }
/// ```
pub struct OnceCell<T> {
@ -68,7 +103,7 @@ impl<T: Eq> Eq for OnceCell<T> {}
impl<T> Drop for OnceCell<T> {
fn drop(&mut self) {
if self.initialized() {
if self.initialized_mut() {
unsafe {
self.value
.with_mut(|ptr| ptr::drop_in_place((&mut *ptr).as_mut_ptr()));
@ -90,7 +125,7 @@ impl<T> From<T> for OnceCell<T> {
}
impl<T> OnceCell<T> {
/// Creates a new uninitialized OnceCell instance.
/// Creates a new empty `OnceCell` instance.
pub fn new() -> Self {
OnceCell {
value_set: AtomicBool::new(false),
@ -99,8 +134,9 @@ impl<T> OnceCell<T> {
}
}
/// Creates a new initialized OnceCell instance if `value` is `Some`, otherwise
/// has the same functionality as [`OnceCell::new`].
/// Creates a new `OnceCell` that contains the provided value, if any.
///
/// If the `Option` is `None`, this is equivalent to `OnceCell::new`.
///
/// [`OnceCell::new`]: crate::sync::OnceCell::new
pub fn new_with(value: Option<T>) -> Self {
@ -111,8 +147,31 @@ impl<T> OnceCell<T> {
}
}
/// Creates a new uninitialized OnceCell instance.
#[cfg(all(feature = "parking_lot", not(all(loom, test)),))]
/// Creates a new empty `OnceCell` instance.
///
/// Equivalent to `OnceCell::new`, except that it can be used in static
/// variables.
///
/// # Example
///
/// ```
/// use tokio::sync::OnceCell;
///
/// static ONCE: OnceCell<u32> = OnceCell::const_new();
///
/// async fn get_global_integer() -> &'static u32 {
/// ONCE.get_or_init(|| async {
/// 1 + 1
/// }).await
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let result = get_global_integer().await;
/// assert_eq!(*result, 2);
/// }
/// ```
#[cfg(all(feature = "parking_lot", not(all(loom, test))))]
#[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))]
pub const fn const_new() -> Self {
OnceCell {
@ -122,33 +181,48 @@ impl<T> OnceCell<T> {
}
}
/// Whether the value of the OnceCell is set or not.
/// Returns `true` if the `OnceCell` currently contains a value, and `false`
/// otherwise.
pub fn initialized(&self) -> bool {
// Using acquire ordering so any threads that read a true from this
// atomic is able to read the value.
self.value_set.load(Ordering::Acquire)
}
// SAFETY: safe to call only once self.initialized() is true
/// Returns `true` if the `OnceCell` currently contains a value, and `false`
/// otherwise.
fn initialized_mut(&mut self) -> bool {
*self.value_set.get_mut()
}
// SAFETY: The OnceCell must not be empty.
unsafe fn get_unchecked(&self) -> &T {
&*self.value.with(|ptr| (*ptr).as_ptr())
}
// SAFETY: safe to call only once self.initialized() is true. Safe because
// because of the mutable reference.
// SAFETY: The OnceCell must not be empty.
unsafe fn get_unchecked_mut(&mut self) -> &mut T {
&mut *self.value.with_mut(|ptr| (*ptr).as_mut_ptr())
}
// SAFETY: safe to call only once a permit on the semaphore has been
// acquired
unsafe fn set_value(&self, value: T) {
self.value.with_mut(|ptr| (*ptr).as_mut_ptr().write(value));
fn set_value(&self, value: T, permit: SemaphorePermit<'_>) -> &T {
// SAFETY: We are holding the only permit on the semaphore.
unsafe {
self.value.with_mut(|ptr| (*ptr).as_mut_ptr().write(value));
}
// Using release ordering so any threads that read a true from this
// atomic is able to read the value we just stored.
self.value_set.store(true, Ordering::Release);
self.semaphore.close();
permit.forget();
// SAFETY: We just initialized the cell.
unsafe { self.get_unchecked() }
}
/// Tries to get a reference to the value of the OnceCell.
///
/// Returns None if the value of the OnceCell hasn't previously been initialized.
/// Returns a reference to the value currently stored in the `OnceCell`, or
/// `None` if the `OnceCell` is empty.
pub fn get(&self) -> Option<&T> {
if self.initialized() {
Some(unsafe { self.get_unchecked() })
@ -157,179 +231,161 @@ impl<T> OnceCell<T> {
}
}
/// Tries to return a mutable reference to the value of the cell.
/// Returns a mutable reference to the value currently stored in the
/// `OnceCell`, or `None` if the `OnceCell` is empty.
///
/// Returns None if the cell hasn't previously been initialized.
/// Since this call borrows the `OnceCell` mutably, it is safe to mutate the
/// value inside the `OnceCell` — the mutable borrow statically guarantees
/// no other references exist.
pub fn get_mut(&mut self) -> Option<&mut T> {
if self.initialized() {
if self.initialized_mut() {
Some(unsafe { self.get_unchecked_mut() })
} else {
None
}
}
/// Sets the value of the OnceCell to the argument value.
/// Set the value of the `OnceCell` to the given value if the `OnceCell` is
/// empty.
///
/// If the value of the OnceCell was already set prior to this call
/// then [`SetError::AlreadyInitializedError`] is returned. If another thread
/// is initializing the cell while this method is called,
/// [`SetError::InitializingError`] is returned. In order to wait
/// for an ongoing initialization to finish, call
/// [`OnceCell::get_or_init`] instead.
/// If the `OnceCell` already has a value, this call will fail with an
/// [`SetError::AlreadyInitializedError`].
///
/// If the `OnceCell` is empty, but some other task is currently trying to
/// set the value, this call will fail with [`SetError::InitializingError`].
///
/// [`SetError::AlreadyInitializedError`]: crate::sync::SetError::AlreadyInitializedError
/// [`SetError::InitializingError`]: crate::sync::SetError::InitializingError
/// ['OnceCell::get_or_init`]: crate::sync::OnceCell::get_or_init
pub fn set(&self, value: T) -> Result<(), SetError<T>> {
if !self.initialized() {
// Another thread might be initializing the cell, in which case `try_acquire` will
// return an error
match self.semaphore.try_acquire() {
Ok(_permit) => {
if !self.initialized() {
// SAFETY: There is only one permit on the semaphore, hence only one
// mutable reference is created
unsafe { self.set_value(value) };
return Ok(());
} else {
unreachable!(
"acquired the permit after OnceCell value was already initialized."
);
}
}
_ => {
// Couldn't acquire the permit, look if initializing process is already completed
if !self.initialized() {
return Err(SetError::InitializingError(value));
}
}
}
if self.initialized() {
return Err(SetError::AlreadyInitializedError(value));
}
Err(SetError::AlreadyInitializedError(value))
// Another task might be initializing the cell, in which case
// `try_acquire` will return an error. If we succeed to acquire the
// permit, then we can set the value.
match self.semaphore.try_acquire() {
Ok(permit) => {
debug_assert!(!self.initialized());
self.set_value(value, permit);
Ok(())
}
Err(TryAcquireError::NoPermits) => {
// Some other task is holding the permit. That task is
// currently trying to initialize the value.
Err(SetError::InitializingError(value))
}
Err(TryAcquireError::Closed) => {
// The semaphore was closed. Some other task has initialized
// the value.
Err(SetError::AlreadyInitializedError(value))
}
}
}
/// Tries to initialize the value of the OnceCell using the async function `f`.
/// If the value of the OnceCell was already initialized prior to this call,
/// a reference to that initialized value is returned. If some other thread
/// initiated the initialization prior to this call and the initialization
/// hasn't completed, this call waits until the initialization is finished.
/// Get the value currently in the `OnceCell`, or initialize it with the
/// given asynchronous operation.
///
/// This will deadlock if `f` tries to initialize the cell itself.
/// If some other task is currently working on initializing the `OnceCell`,
/// this call will wait for that other task to finish, then return the value
/// that the other task produced.
///
/// If the provided operation is cancelled or panics, the initialization
/// attempt is cancelled. If there are other tasks waiting for the value to
/// be initialized, one of them will start another attempt at initializing
/// the value.
///
/// This will deadlock if `f` tries to initialize the cell recursively.
pub async fn get_or_init<F, Fut>(&self, f: F) -> &T
where
F: FnOnce() -> Fut,
Fut: Future<Output = T>,
{
if self.initialized() {
// SAFETY: once the value is initialized, no mutable references are given out, so
// we can give out arbitrarily many immutable references
// SAFETY: The OnceCell has been fully initialized.
unsafe { self.get_unchecked() }
} else {
// After acquire().await we have either acquired a permit while self.value
// is still uninitialized, or the current thread is awoken after another thread
// has initialized the value and closed the semaphore, in which case self.initialized
// is true and we don't set the value here
// Here we try to acquire the semaphore permit. Holding the permit
// will allow us to set the value of the OnceCell, and prevents
// other tasks from initializing the OnceCell while we are holding
// it.
match self.semaphore.acquire().await {
Ok(_permit) => {
if !self.initialized() {
// If `f()` panics or `select!` is called, this `get_or_init` call
// is aborted and the semaphore permit is dropped.
let value = f().await;
Ok(permit) => {
debug_assert!(!self.initialized());
// SAFETY: There is only one permit on the semaphore, hence only one
// mutable reference is created
unsafe { self.set_value(value) };
// If `f()` panics or `select!` is called, this
// `get_or_init` call is aborted and the semaphore permit is
// dropped.
let value = f().await;
// SAFETY: once the value is initialized, no mutable references are given out, so
// we can give out arbitrarily many immutable references
unsafe { self.get_unchecked() }
} else {
unreachable!("acquired semaphore after value was already initialized.");
}
self.set_value(value, permit)
}
Err(_) => {
if self.initialized() {
// SAFETY: once the value is initialized, no mutable references are given out, so
// we can give out arbitrarily many immutable references
unsafe { self.get_unchecked() }
} else {
unreachable!(
"Semaphore closed, but the OnceCell has not been initialized."
);
}
debug_assert!(self.initialized());
// SAFETY: The semaphore has been closed. This only happens
// when the OnceCell is fully initialized.
unsafe { self.get_unchecked() }
}
}
}
}
/// Tries to initialize the value of the OnceCell using the async function `f`.
/// If the value of the OnceCell was already initialized prior to this call,
/// a reference to that initialized value is returned. If some other thread
/// initiated the initialization prior to this call and the initialization
/// hasn't completed, this call waits until the initialization is finished.
/// If the function argument `f` returns an error, `get_or_try_init`
/// returns that error, otherwise the result of `f` will be stored in the cell.
/// Get the value currently in the `OnceCell`, or initialize it with the
/// given asynchronous operation.
///
/// This will deadlock if `f` tries to initialize the cell itself.
/// If some other task is currently working on initializing the `OnceCell`,
/// this call will wait for that other task to finish, then return the value
/// that the other task produced.
///
/// If the provided operation returns an error, is cancelled or panics, the
/// initialization attempt is cancelled. If there are other tasks waiting
/// for the value to be initialized, one of them will start another attempt
/// at initializing the value.
///
/// This will deadlock if `f` tries to initialize the cell recursively.
pub async fn get_or_try_init<E, F, Fut>(&self, f: F) -> Result<&T, E>
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, E>>,
{
if self.initialized() {
// SAFETY: once the value is initialized, no mutable references are given out, so
// we can give out arbitrarily many immutable references
// SAFETY: The OnceCell has been fully initialized.
unsafe { Ok(self.get_unchecked()) }
} else {
// After acquire().await we have either acquired a permit while self.value
// is still uninitialized, or the current thread is awoken after another thread
// has initialized the value and closed the semaphore, in which case self.initialized
// is true and we don't set the value here
// Here we try to acquire the semaphore permit. Holding the permit
// will allow us to set the value of the OnceCell, and prevents
// other tasks from initializing the OnceCell while we are holding
// it.
match self.semaphore.acquire().await {
Ok(_permit) => {
if !self.initialized() {
// If `f()` panics or `select!` is called, this `get_or_try_init` call
// is aborted and the semaphore permit is dropped.
let value = f().await;
Ok(permit) => {
debug_assert!(!self.initialized());
match value {
Ok(value) => {
// SAFETY: There is only one permit on the semaphore, hence only one
// mutable reference is created
unsafe { self.set_value(value) };
// If `f()` panics or `select!` is called, this
// `get_or_try_init` call is aborted and the semaphore
// permit is dropped.
let value = f().await;
// SAFETY: once the value is initialized, no mutable references are given out, so
// we can give out arbitrarily many immutable references
unsafe { Ok(self.get_unchecked()) }
}
Err(e) => Err(e),
}
} else {
unreachable!("acquired semaphore after value was already initialized.");
match value {
Ok(value) => Ok(self.set_value(value, permit)),
Err(e) => Err(e),
}
}
Err(_) => {
if self.initialized() {
// SAFETY: once the value is initialized, no mutable references are given out, so
// we can give out arbitrarily many immutable references
unsafe { Ok(self.get_unchecked()) }
} else {
unreachable!(
"Semaphore closed, but the OnceCell has not been initialized."
);
}
debug_assert!(self.initialized());
// SAFETY: The semaphore has been closed. This only happens
// when the OnceCell is fully initialized.
unsafe { Ok(self.get_unchecked()) }
}
}
}
}
/// Moves the value out of the cell, destroying the cell in the process.
///
/// Returns `None` if the cell is uninitialized.
/// Take the value from the cell, destroying the cell in the process.
/// Returns `None` if the cell is empty.
pub fn into_inner(mut self) -> Option<T> {
if self.initialized() {
if self.initialized_mut() {
// Set to uninitialized for the destructor of `OnceCell` to work properly
*self.value_set.get_mut() = false;
Some(unsafe { self.value.with(|ptr| ptr::read(ptr).assume_init()) })
@ -338,20 +394,18 @@ impl<T> OnceCell<T> {
}
}
/// Takes ownership of the current value, leaving the cell uninitialized.
///
/// Returns `None` if the cell is uninitialized.
/// Takes ownership of the current value, leaving the cell empty. Returns
/// `None` if the cell is empty.
pub fn take(&mut self) -> Option<T> {
std::mem::take(self).into_inner()
}
}
// Since `get` gives us access to immutable references of the
// OnceCell, OnceCell can only be Sync if T is Sync, otherwise
// OnceCell would allow sharing references of !Sync values across
// threads. We need T to be Send in order for OnceCell to by Sync
// because we can use `set` on `&OnceCell<T>` to send
// values (of type T) across threads.
// Since `get` gives us access to immutable references of the OnceCell, OnceCell
// can only be Sync if T is Sync, otherwise OnceCell would allow sharing
// references of !Sync values across threads. We need T to be Send in order for
// OnceCell to by Sync because we can use `set` on `&OnceCell<T>` to send values
// (of type T) across threads.
unsafe impl<T: Sync + Send> Sync for OnceCell<T> {}
// Access to OnceCell's value is guarded by the semaphore permit
@ -359,20 +413,17 @@ unsafe impl<T: Sync + Send> Sync for OnceCell<T> {}
// it's safe to send it to another thread
unsafe impl<T: Send> Send for OnceCell<T> {}
/// Errors that can be returned from [`OnceCell::set`]
/// Errors that can be returned from [`OnceCell::set`].
///
/// [`OnceCell::set`]: crate::sync::OnceCell::set
#[derive(Debug, PartialEq)]
pub enum SetError<T> {
/// Error resulting from [`OnceCell::set`] calls if the cell was previously initialized.
/// The cell was already initialized when [`OnceCell::set`] was called.
///
/// [`OnceCell::set`]: crate::sync::OnceCell::set
AlreadyInitializedError(T),
/// Error resulting from [`OnceCell::set`] calls when the cell is currently being
/// initialized during the calls to that method.
///
/// [`OnceCell::set`]: crate::sync::OnceCell::set
/// The cell is currently being initialized.
InitializingError(T),
}

View File

@ -56,7 +56,7 @@
use crate::sync::notify::Notify;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::atomic::Ordering::{Relaxed, SeqCst};
use crate::loom::sync::atomic::Ordering::Relaxed;
use crate::loom::sync::{Arc, RwLock, RwLockReadGuard};
use std::ops;
@ -74,7 +74,7 @@ pub struct Receiver<T> {
shared: Arc<Shared<T>>,
/// Last observed version
version: usize,
version: Version,
}
/// Sends values to the associated [`Receiver`](struct@Receiver).
@ -104,7 +104,7 @@ struct Shared<T> {
///
/// The lowest bit represents a "closed" state. The rest of the bits
/// represent the current version.
version: AtomicUsize,
state: AtomicState,
/// Tracks the number of `Receiver` instances
ref_count_rx: AtomicUsize,
@ -152,7 +152,69 @@ pub mod error {
impl std::error::Error for RecvError {}
}
const CLOSED: usize = 1;
use self::state::{AtomicState, Version};
mod state {
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::atomic::Ordering::SeqCst;
const CLOSED: usize = 1;
/// The version part of the state. The lowest bit is always zero.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(super) struct Version(usize);
/// Snapshot of the state. The first bit is used as the CLOSED bit.
/// The remaining bits are used as the version.
#[derive(Copy, Clone, Debug)]
pub(super) struct StateSnapshot(usize);
/// The state stored in an atomic integer.
#[derive(Debug)]
pub(super) struct AtomicState(AtomicUsize);
impl Version {
/// Get the initial version when creating the channel.
pub(super) fn initial() -> Self {
Version(0)
}
}
impl StateSnapshot {
/// Extract the version from the state.
pub(super) fn version(self) -> Version {
Version(self.0 & !CLOSED)
}
/// Is the closed bit set?
pub(super) fn is_closed(self) -> bool {
(self.0 & CLOSED) == CLOSED
}
}
impl AtomicState {
/// Create a new `AtomicState` that is not closed and which has the
/// version set to `Version::initial()`.
pub(super) fn new() -> Self {
AtomicState(AtomicUsize::new(0))
}
/// Load the current value of the state.
pub(super) fn load(&self) -> StateSnapshot {
StateSnapshot(self.0.load(SeqCst))
}
/// Increment the version counter.
pub(super) fn increment_version(&self) {
// Increment by two to avoid touching the CLOSED bit.
self.0.fetch_add(2, SeqCst);
}
/// Set the closed bit in the state.
pub(super) fn set_closed(&self) {
self.0.fetch_or(CLOSED, SeqCst);
}
}
}
/// Creates a new watch channel, returning the "send" and "receive" handles.
///
@ -184,7 +246,7 @@ const CLOSED: usize = 1;
pub fn channel<T>(init: T) -> (Sender<T>, Receiver<T>) {
let shared = Arc::new(Shared {
value: RwLock::new(init),
version: AtomicUsize::new(0),
state: AtomicState::new(),
ref_count_rx: AtomicUsize::new(1),
notify_rx: Notify::new(),
notify_tx: Notify::new(),
@ -194,13 +256,16 @@ pub fn channel<T>(init: T) -> (Sender<T>, Receiver<T>) {
shared: shared.clone(),
};
let rx = Receiver { shared, version: 0 };
let rx = Receiver {
shared,
version: Version::initial(),
};
(tx, rx)
}
impl<T> Receiver<T> {
fn from_shared(version: usize, shared: Arc<Shared<T>>) -> Self {
fn from_shared(version: Version, shared: Arc<Shared<T>>) -> Self {
// No synchronization necessary as this is only used as a counter and
// not memory access.
shared.ref_count_rx.fetch_add(1, Relaxed);
@ -247,7 +312,7 @@ impl<T> Receiver<T> {
/// [`changed`]: Receiver::changed
pub fn borrow_and_update(&mut self) -> Ref<'_, T> {
let inner = self.shared.value.read().unwrap();
self.version = self.shared.version.load(SeqCst) & !CLOSED;
self.version = self.shared.state.load().version();
Ref { inner }
}
@ -315,11 +380,11 @@ impl<T> Receiver<T> {
fn maybe_changed<T>(
shared: &Shared<T>,
version: &mut usize,
version: &mut Version,
) -> Option<Result<(), error::RecvError>> {
// Load the version from the state
let state = shared.version.load(SeqCst);
let new_version = state & !CLOSED;
let state = shared.state.load();
let new_version = state.version();
if *version != new_version {
// Observe the new version and return
@ -327,7 +392,7 @@ fn maybe_changed<T>(
return Some(Ok(()));
}
if CLOSED == state & CLOSED {
if state.is_closed() {
// All receivers have dropped.
return Some(Err(error::RecvError(())));
}
@ -368,8 +433,7 @@ impl<T> Sender<T> {
let mut lock = self.shared.value.write().unwrap();
*lock = value;
// Update the version. 2 is used so that the CLOSED bit is not set.
self.shared.version.fetch_add(2, SeqCst);
self.shared.state.increment_version();
// Release the write lock.
//
@ -463,7 +527,7 @@ impl<T> Sender<T> {
cfg_signal_internal! {
pub(crate) fn subscribe(&self) -> Receiver<T> {
let shared = self.shared.clone();
let version = shared.version.load(SeqCst);
let version = shared.state.load().version();
Receiver::from_shared(version, shared)
}
@ -494,7 +558,7 @@ impl<T> Sender<T> {
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
self.shared.version.fetch_or(CLOSED, SeqCst);
self.shared.state.set_closed();
self.shared.notify_rx.notify_waiters();
}
}

View File

@ -89,13 +89,14 @@ cfg_rt! {
///
/// Tokio will spawn more blocking threads when they are requested through this
/// function until the upper limit configured on the [`Builder`] is reached.
/// This limit is very large by default, because `spawn_blocking` is often used
/// for various kinds of IO operations that cannot be performed asynchronously.
/// When you run CPU-bound code using `spawn_blocking`, you should keep this
/// large upper limit in mind. When running many CPU-bound computations, a
/// semaphore or some other synchronization primitive should be used to limit
/// the number of computation executed in parallel. Specialized CPU-bound
/// executors, such as [rayon], may also be a good fit.
/// After reaching the upper limit, the tasks are put in a queue.
/// The thread limit is very large by default, because `spawn_blocking` is often
/// used for various kinds of IO operations that cannot be performed
/// asynchronously. When you run CPU-bound code using `spawn_blocking`, you
/// should keep this large upper limit in mind. When running many CPU-bound
/// computations, a semaphore or some other synchronization primitive should be
/// used to limit the number of computation executed in parallel. Specialized
/// CPU-bound executors, such as [rayon], may also be a good fit.
///
/// This function is intended for non-async operations that eventually finish on
/// their own. If you want to spawn an ordinary thread, you should use

View File

@ -1,8 +1,7 @@
//! Runs `!Send` futures on the current thread.
use crate::loom::sync::{Arc, Mutex};
use crate::runtime::task::{self, JoinHandle, Task};
use crate::runtime::task::{self, JoinHandle, LocalOwnedTasks, Task};
use crate::sync::AtomicWaker;
use crate::util::linked_list::{Link, LinkedList};
use std::cell::{Cell, RefCell};
use std::collections::VecDeque;
@ -233,7 +232,7 @@ struct Context {
struct Tasks {
/// Collection of all active tasks spawned onto this executor.
owned: LinkedList<Task<Arc<Shared>>, <Task<Arc<Shared>> as Link>::Target>,
owned: LocalOwnedTasks<Arc<Shared>>,
/// Local run queue sender and receiver.
queue: VecDeque<task::Notified<Arc<Shared>>>,
@ -309,10 +308,12 @@ cfg_rt! {
let cx = maybe_cx
.expect("`spawn_local` called from outside of a `task::LocalSet`");
// Safety: Tasks are only polled and dropped from the thread that
// spawns them.
let (task, handle) = unsafe { task::joinable_local(future) };
cx.tasks.borrow_mut().queue.push_back(task);
let (handle, notified) = cx.tasks.borrow_mut().owned.bind(future, cx.shared.clone());
if let Some(notified) = notified {
cx.shared.schedule(notified);
}
handle
})
}
@ -334,7 +335,7 @@ impl LocalSet {
tick: Cell::new(0),
context: Context {
tasks: RefCell::new(Tasks {
owned: LinkedList::new(),
owned: LocalOwnedTasks::new(),
queue: VecDeque::with_capacity(INITIAL_CAPACITY),
}),
shared: Arc::new(Shared {
@ -389,8 +390,18 @@ impl LocalSet {
F::Output: 'static,
{
let future = crate::util::trace::task(future, "local", None);
let (task, handle) = unsafe { task::joinable_local(future) };
self.context.tasks.borrow_mut().queue.push_back(task);
let (handle, notified) = self
.context
.tasks
.borrow_mut()
.owned
.bind(future, self.context.shared.clone());
if let Some(notified) = notified {
self.context.shared.schedule(notified);
}
self.context.shared.waker.wake();
handle
}
@ -602,6 +613,12 @@ impl Default for LocalSet {
impl Drop for LocalSet {
fn drop(&mut self) {
self.with(|| {
// Close the LocalOwnedTasks. This ensures that any calls to
// spawn_local in the destructor of a future on this LocalSet will
// immediately cancel the task, and prevents the task from being
// added to `owned`.
self.context.tasks.borrow_mut().owned.close();
// Loop required here to ensure borrow is dropped between iterations
#[allow(clippy::while_let_loop)]
loop {
@ -691,26 +708,15 @@ impl Shared {
}
impl task::Schedule for Arc<Shared> {
fn bind(task: Task<Self>) -> Arc<Shared> {
CURRENT.with(|maybe_cx| {
let cx = maybe_cx.expect("scheduler context missing");
cx.tasks.borrow_mut().owned.push_front(task);
cx.shared.clone()
})
}
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
use std::ptr::NonNull;
CURRENT.with(|maybe_cx| {
let cx = maybe_cx.expect("scheduler context missing");
assert!(cx.shared.ptr_eq(self));
let ptr = NonNull::from(task.header());
// safety: task must be contained by list. It is inserted into the
// list in `bind`.
unsafe { cx.tasks.borrow_mut().owned.remove(ptr) }
// list when spawning.
unsafe { cx.tasks.borrow_mut().owned.remove(&task) }
})
}

View File

@ -304,4 +304,9 @@ cfg_rt! {
mod builder;
pub use builder::Builder;
}
/// Task-related futures.
pub mod futures {
pub use super::task_local::TaskLocalFuture;
}
}

View File

@ -2,6 +2,7 @@ use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::error::Error;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, thread};
@ -115,7 +116,7 @@ impl<T: 'static> LocalKey<T> {
/// }).await;
/// # }
/// ```
pub async fn scope<F>(&'static self, value: T, f: F) -> F::Output
pub fn scope<F>(&'static self, value: T, f: F) -> TaskLocalFuture<T, F>
where
F: Future,
{
@ -123,8 +124,8 @@ impl<T: 'static> LocalKey<T> {
local: &self,
slot: Some(value),
future: f,
_pinned: PhantomPinned,
}
.await
}
/// Sets a value `T` as the task-local value for the closure `F`.
@ -148,12 +149,14 @@ impl<T: 'static> LocalKey<T> {
where
F: FnOnce() -> R,
{
let mut scope = TaskLocalFuture {
let scope = TaskLocalFuture {
local: &self,
slot: Some(value),
future: (),
_pinned: PhantomPinned,
};
Pin::new(&mut scope).with_task(|_| f())
crate::pin!(scope);
scope.with_task(|_| f())
}
/// Accesses the current task-local and runs the provided closure.
@ -206,11 +209,37 @@ impl<T: 'static> fmt::Debug for LocalKey<T> {
}
pin_project! {
struct TaskLocalFuture<T: StaticLifetime, F> {
/// A future that sets a value `T` of a task local for the future `F` during
/// its execution.
///
/// The value of the task-local must be `'static` and will be dropped on the
/// completion of the future.
///
/// Created by the function [`LocalKey::scope`](self::LocalKey::scope).
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub struct TaskLocalFuture<T, F>
where
T: 'static
{
local: &'static LocalKey<T>,
slot: Option<T>,
#[pin]
future: F,
#[pin]
_pinned: PhantomPinned,
}
}
@ -252,10 +281,6 @@ impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> {
}
}
// Required to make `pin_project` happy.
trait StaticLifetime: 'static {}
impl<T: 'static> StaticLifetime for T {}
/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with).
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct AccessError {

View File

@ -236,37 +236,6 @@ impl<L: Link> Default for LinkedList<L, L::Target> {
}
}
// ===== impl Iter =====
cfg_rt_multi_thread! {
pub(crate) struct Iter<'a, T: Link> {
curr: Option<NonNull<T::Target>>,
_p: core::marker::PhantomData<&'a T>,
}
impl<L: Link> LinkedList<L, L::Target> {
pub(crate) fn iter(&self) -> Iter<'_, L> {
Iter {
curr: self.head,
_p: core::marker::PhantomData,
}
}
}
impl<'a, T: Link> Iterator for Iter<'a, T> {
type Item = &'a T::Target;
fn next(&mut self) -> Option<&'a T::Target> {
let curr = self.curr?;
// safety: the pointer references data contained by the list
self.curr = unsafe { T::pointers(curr).as_ref() }.get_next();
// safety: the value is still owned by the linked list.
Some(unsafe { &*curr.as_ptr() })
}
}
}
// ===== impl DrainFilter =====
cfg_io_readiness! {
@ -645,24 +614,6 @@ mod tests {
}
}
#[test]
fn iter() {
let a = entry(5);
let b = entry(7);
let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
assert_eq!(0, list.iter().count());
list.push_front(a.as_ref());
list.push_front(b.as_ref());
let mut i = list.iter();
assert_eq!(7, i.next().unwrap().val);
assert_eq!(5, i.next().unwrap().val);
assert!(i.next().is_none());
}
proptest::proptest! {
#[test]
fn fuzz_linked_list(ops: Vec<usize>) {

View File

@ -21,6 +21,9 @@ cfg_rt! {
mod wake;
pub(crate) use wake::WakerRef;
pub(crate) use wake::{waker_ref, Wake};
mod sync_wrapper;
pub(crate) use sync_wrapper::SyncWrapper;
}
cfg_rt_multi_thread! {

View File

@ -0,0 +1,26 @@
//! This module contains a type that can make `Send + !Sync` types `Sync` by
//! disallowing all immutable access to the value.
//!
//! A similar primitive is provided in the `sync_wrapper` crate.
pub(crate) struct SyncWrapper<T> {
value: T,
}
// safety: The SyncWrapper being send allows you to send the inner value across
// thread boundaries.
unsafe impl<T: Send> Send for SyncWrapper<T> {}
// safety: An immutable reference to a SyncWrapper is useless, so moving such an
// immutable reference across threads is safe.
unsafe impl<T> Sync for SyncWrapper<T> {}
impl<T> SyncWrapper<T> {
pub(crate) fn new(value: T) -> Self {
Self { value }
}
pub(crate) fn into_inner(self) -> T {
self.value
}
}

View File

@ -4,13 +4,30 @@
use std::cell::Cell;
use std::future::Future;
use std::io::{Cursor, SeekFrom};
use std::io::SeekFrom;
use std::net::SocketAddr;
use std::pin::Pin;
use std::rc::Rc;
use tokio::net::TcpStream;
use tokio::time::{Duration, Instant};
// The names of these structs behaves better when sorted.
// Send: Yes, Sync: Yes
#[derive(Clone)]
struct YY {}
// Send: Yes, Sync: No
#[derive(Clone)]
struct YN {
_value: Cell<u8>,
}
// Send: No, Sync: No
#[derive(Clone)]
struct NN {
_value: Rc<u8>,
}
#[allow(dead_code)]
type BoxFutureSync<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send + Sync>>;
#[allow(dead_code)]
@ -19,11 +36,11 @@ type BoxFutureSend<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> +
type BoxFuture<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T>>>;
#[allow(dead_code)]
type BoxAsyncRead = std::pin::Pin<Box<dyn tokio::io::AsyncBufRead>>;
type BoxAsyncRead = std::pin::Pin<Box<dyn tokio::io::AsyncBufRead + Send + Sync>>;
#[allow(dead_code)]
type BoxAsyncSeek = std::pin::Pin<Box<dyn tokio::io::AsyncSeek>>;
type BoxAsyncSeek = std::pin::Pin<Box<dyn tokio::io::AsyncSeek + Send + Sync>>;
#[allow(dead_code)]
type BoxAsyncWrite = std::pin::Pin<Box<dyn tokio::io::AsyncWrite>>;
type BoxAsyncWrite = std::pin::Pin<Box<dyn tokio::io::AsyncWrite + Send + Sync>>;
#[allow(dead_code)]
fn require_send<T: Send>(_t: &T) {}
@ -59,310 +76,577 @@ macro_rules! into_todo {
x
}};
}
macro_rules! assert_value {
($type:ty: Send & Sync) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f: $type = todo!();
require_send(&f);
require_sync(&f);
};
macro_rules! async_assert_fn_send {
(Send & $(!)?Sync & $(!)?Unpin, $value:expr) => {
require_send(&$value);
};
($type:ty: !Send & Sync) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f: $type = todo!();
AmbiguousIfSend::some_item(&f);
require_sync(&f);
};
};
($type:ty: Send & !Sync) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f: $type = todo!();
require_send(&f);
AmbiguousIfSync::some_item(&f);
};
};
($type:ty: !Send & !Sync) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f: $type = todo!();
AmbiguousIfSend::some_item(&f);
AmbiguousIfSync::some_item(&f);
};
};
($type:ty: Unpin) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f: $type = todo!();
require_unpin(&f);
};
(!Send & $(!)?Sync & $(!)?Unpin, $value:expr) => {
AmbiguousIfSend::some_item(&$value);
};
}
macro_rules! async_assert_fn_sync {
($(!)?Send & Sync & $(!)?Unpin, $value:expr) => {
require_sync(&$value);
};
($(!)?Send & !Sync & $(!)?Unpin, $value:expr) => {
AmbiguousIfSync::some_item(&$value);
};
}
macro_rules! async_assert_fn_unpin {
($(!)?Send & $(!)?Sync & Unpin, $value:expr) => {
require_unpin(&$value);
};
($(!)?Send & $(!)?Sync & !Unpin, $value:expr) => {
AmbiguousIfUnpin::some_item(&$value);
};
}
macro_rules! async_assert_fn {
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & Sync) => {
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): $($tok:tt)*) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
require_send(&f);
require_sync(&f);
async_assert_fn_send!($($tok)*, f);
async_assert_fn_sync!($($tok)*, f);
async_assert_fn_unpin!($($tok)*, f);
};
};
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & !Sync) => {
}
macro_rules! assert_value {
($type:ty: $($tok:tt)*) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
require_send(&f);
AmbiguousIfSync::some_item(&f);
};
};
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & Sync) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
AmbiguousIfSend::some_item(&f);
require_sync(&f);
};
};
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & !Sync) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
AmbiguousIfSend::some_item(&f);
AmbiguousIfSync::some_item(&f);
};
};
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Unpin) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
AmbiguousIfUnpin::some_item(&f);
};
};
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Unpin) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
require_unpin(&f);
let f: $type = todo!();
async_assert_fn_send!($($tok)*, f);
async_assert_fn_sync!($($tok)*, f);
async_assert_fn_unpin!($($tok)*, f);
};
};
}
async_assert_fn!(tokio::io::copy(&mut TcpStream, &mut TcpStream): Send & Sync);
async_assert_fn!(tokio::io::empty(): Send & Sync);
async_assert_fn!(tokio::io::repeat(u8): Send & Sync);
async_assert_fn!(tokio::io::sink(): Send & Sync);
async_assert_fn!(tokio::io::split(TcpStream): Send & Sync);
async_assert_fn!(tokio::io::stderr(): Send & Sync);
async_assert_fn!(tokio::io::stdin(): Send & Sync);
async_assert_fn!(tokio::io::stdout(): Send & Sync);
async_assert_fn!(tokio::io::Split<Cursor<Vec<u8>>>::next_segment(_): Send & Sync);
assert_value!(tokio::fs::DirBuilder: Send & Sync & Unpin);
assert_value!(tokio::fs::DirEntry: Send & Sync & Unpin);
assert_value!(tokio::fs::File: Send & Sync & Unpin);
assert_value!(tokio::fs::OpenOptions: Send & Sync & Unpin);
assert_value!(tokio::fs::ReadDir: Send & Sync & Unpin);
async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync);
async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync);
async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync);
async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync);
async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync);
async_assert_fn!(tokio::fs::metadata(&str): Send & Sync);
async_assert_fn!(tokio::fs::read(&str): Send & Sync);
async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync);
async_assert_fn!(tokio::fs::read_link(&str): Send & Sync);
async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync);
async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync);
async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync);
async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync);
async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync);
async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync);
async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync);
async_assert_fn!(tokio::fs::write(&str, Vec<u8>): Send & Sync);
async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync);
async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync);
async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync);
async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync);
async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::metadata(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read_link(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::write(&str, Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::DirBuilder::create(_, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::open(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::create(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::open(&str): Send & Sync);
async_assert_fn!(tokio::fs::File::create(&str): Send & Sync);
async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync);
async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync);
async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync);
async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync);
async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync);
async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync);
async_assert_fn!(tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync);
async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync);
async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync);
async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync);
async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync);
async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync);
async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync);
async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync);
async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync);
async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync);
async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync);
async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync);
async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync);
assert_value!(tokio::net::TcpListener: Send & Sync & Unpin);
assert_value!(tokio::net::TcpSocket: Send & Sync & Unpin);
assert_value!(tokio::net::TcpStream: Send & Sync & Unpin);
assert_value!(tokio::net::UdpSocket: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::OwnedReadHalf: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::OwnedWriteHalf: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::ReadHalf<'_>: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::ReuniteError: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::WriteHalf<'_>: Send & Sync & Unpin);
async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::readable(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::writable(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::peek_from(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::readable(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::writable(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync & !Unpin);
#[cfg(unix)]
mod unix_datagram {
use super::*;
async_assert_fn!(tokio::net::UnixListener::bind(&str): Send & Sync);
async_assert_fn!(tokio::net::UnixListener::accept(_): Send & Sync);
async_assert_fn!(tokio::net::UnixDatagram::send(_, &[u8]): Send & Sync);
async_assert_fn!(tokio::net::UnixDatagram::recv(_, &mut [u8]): Send & Sync);
async_assert_fn!(tokio::net::UnixDatagram::send_to(_, &[u8], &str): Send & Sync);
async_assert_fn!(tokio::net::UnixDatagram::recv_from(_, &mut [u8]): Send & Sync);
async_assert_fn!(tokio::net::UnixStream::connect(&str): Send & Sync);
use tokio::net::*;
assert_value!(UnixDatagram: Send & Sync & Unpin);
assert_value!(UnixListener: Send & Sync & Unpin);
assert_value!(UnixStream: Send & Sync & Unpin);
assert_value!(unix::OwnedReadHalf: Send & Sync & Unpin);
assert_value!(unix::OwnedWriteHalf: Send & Sync & Unpin);
assert_value!(unix::ReadHalf<'_>: Send & Sync & Unpin);
assert_value!(unix::ReuniteError: Send & Sync & Unpin);
assert_value!(unix::SocketAddr: Send & Sync & Unpin);
assert_value!(unix::UCred: Send & Sync & Unpin);
assert_value!(unix::WriteHalf<'_>: Send & Sync & Unpin);
async_assert_fn!(UnixDatagram::readable(_): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::recv(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::recv_from(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::send(_, &[u8]): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::send_to(_, &[u8], &str): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::writable(_): Send & Sync & !Unpin);
async_assert_fn!(UnixListener::accept(_): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::connect(&str): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::readable(_): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::writable(_): Send & Sync & !Unpin);
}
async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync);
async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync);
#[cfg(windows)]
mod windows_named_pipe {
use super::*;
use tokio::net::windows::named_pipe::*;
assert_value!(ClientOptions: Send & Sync & Unpin);
assert_value!(NamedPipeClient: Send & Sync & Unpin);
assert_value!(NamedPipeServer: Send & Sync & Unpin);
assert_value!(PipeEnd: Send & Sync & Unpin);
assert_value!(PipeInfo: Send & Sync & Unpin);
assert_value!(PipeMode: Send & Sync & Unpin);
assert_value!(ServerOptions: Send & Sync & Unpin);
async_assert_fn!(NamedPipeClient::readable(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeClient::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeClient::writable(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::connect(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::readable(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::writable(_): Send & Sync & !Unpin);
}
assert_value!(tokio::process::Child: Send & Sync & Unpin);
assert_value!(tokio::process::ChildStderr: Send & Sync & Unpin);
assert_value!(tokio::process::ChildStdin: Send & Sync & Unpin);
assert_value!(tokio::process::ChildStdout: Send & Sync & Unpin);
assert_value!(tokio::process::Command: Send & Sync & Unpin);
async_assert_fn!(tokio::process::Child::kill(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::process::Child::wait(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync & !Unpin);
#[cfg(unix)]
async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync);
mod unix_signal {
use super::*;
assert_value!(tokio::signal::unix::Signal: Send & Sync & Unpin);
assert_value!(tokio::signal::unix::SignalKind: Send & Sync & Unpin);
async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync & !Unpin);
}
#[cfg(windows)]
mod windows_signal {
use super::*;
assert_value!(tokio::signal::windows::CtrlC: Send & Sync & Unpin);
assert_value!(tokio::signal::windows::CtrlBreak: Send & Sync & Unpin);
async_assert_fn!(tokio::signal::windows::CtrlC::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::signal::windows::CtrlBreak::recv(_): Send & Sync & !Unpin);
}
async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync);
async_assert_fn!(tokio::sync::Mutex<u8>::lock(_): Send & Sync);
async_assert_fn!(tokio::sync::Mutex<Cell<u8>>::lock(_): Send & Sync);
async_assert_fn!(tokio::sync::Mutex<Rc<u8>>::lock(_): !Send & !Sync);
async_assert_fn!(tokio::sync::Mutex<u8>::lock_owned(_): Send & Sync);
async_assert_fn!(tokio::sync::Mutex<Cell<u8>>::lock_owned(_): Send & Sync);
async_assert_fn!(tokio::sync::Mutex<Rc<u8>>::lock_owned(_): !Send & !Sync);
async_assert_fn!(tokio::sync::Notify::notified(_): Send & Sync);
async_assert_fn!(tokio::sync::RwLock<u8>::read(_): Send & Sync);
async_assert_fn!(tokio::sync::RwLock<u8>::write(_): Send & Sync);
async_assert_fn!(tokio::sync::RwLock<Cell<u8>>::read(_): !Send & !Sync);
async_assert_fn!(tokio::sync::RwLock<Cell<u8>>::write(_): !Send & !Sync);
async_assert_fn!(tokio::sync::RwLock<Rc<u8>>::read(_): !Send & !Sync);
async_assert_fn!(tokio::sync::RwLock<Rc<u8>>::write(_): !Send & !Sync);
async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync);
assert_value!(tokio::sync::AcquireError: Send & Sync & Unpin);
assert_value!(tokio::sync::Barrier: Send & Sync & Unpin);
assert_value!(tokio::sync::BarrierWaitResult: Send & Sync & Unpin);
assert_value!(tokio::sync::MappedMutexGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::MappedMutexGuard<'_, YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::MappedMutexGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::Mutex<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::Mutex<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::Mutex<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::MutexGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::MutexGuard<'_, YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::MutexGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::Notify: Send & Sync & Unpin);
assert_value!(tokio::sync::OnceCell<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OnceCell<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::OnceCell<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedMutexGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedMutexGuard<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedMutexGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockReadGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockReadGuard<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockReadGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockWriteGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockWriteGuard<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockWriteGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedSemaphorePermit: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLock<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLock<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLock<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLockReadGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockReadGuard<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockReadGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLockWriteGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockWriteGuard<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockWriteGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::Semaphore: Send & Sync & Unpin);
assert_value!(tokio::sync::SemaphorePermit<'_>: Send & Sync & Unpin);
assert_value!(tokio::sync::TryAcquireError: Send & Sync & Unpin);
assert_value!(tokio::sync::TryLockError: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::broadcast::Receiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::broadcast::Sender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::futures::Notified<'_>: Send & Sync & !Unpin);
assert_value!(tokio::sync::mpsc::OwnedPermit<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::OwnedPermit<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::OwnedPermit<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Permit<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::Permit<'_, YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Permit<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::Receiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::Sender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedReceiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedReceiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedReceiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedSender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedSender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedSender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendError<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendError<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendError<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendTimeoutError<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::TrySendError<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::TrySendError<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::TrySendError<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::oneshot::Receiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::oneshot::Sender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::watch::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Receiver<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::watch::Ref<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Ref<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Ref<'_, YY>: !Send & Sync & Unpin);
assert_value!(tokio::sync::watch::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Sender<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Sender<YY>: Send & Sync & Unpin);
async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<NN>::lock(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<NN>::lock_owned(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YN>::lock(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YN>::lock_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YY>::lock(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YY>::lock_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Notify::notified(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send + Sync>>): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send>>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send + Sync>>): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send>>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<NN>::read(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<NN>::write(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YN>::read(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YN>::write(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YY>::read(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YY>::write(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire_many(_, u32): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire_many_owned(_, u32): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<NN>::recv(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<YN>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<YY>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<NN>::recv(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<YN>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<YY>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve_owned(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send(_, NN): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send_timeout(_, NN, Duration): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send(_, YN): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send_timeout(_, YN, Duration): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send(_, YY): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send_timeout(_, YY, Duration): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<NN>::recv(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YN>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YY>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedSender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YN>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::oneshot::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::oneshot::Sender<YN>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::oneshot::Sender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<NN>::changed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<YN>::changed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<YY>::changed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Sender<YN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Sender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<u8>::recv(_): Send & Sync);
async_assert_fn!(tokio::sync::broadcast::Receiver<Cell<u8>>::recv(_): Send & Sync);
async_assert_fn!(tokio::sync::broadcast::Receiver<Rc<u8>>::recv(_): !Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSync<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSync<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSend<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::unconstrained(BoxFuture<()>): !Send & !Sync & Unpin);
async_assert_fn!(tokio::task::unconstrained(BoxFutureSend<()>): Send & !Sync & Unpin);
async_assert_fn!(tokio::task::unconstrained(BoxFutureSync<()>): Send & Sync & Unpin);
assert_value!(tokio::task::LocalSet: !Send & !Sync & Unpin);
assert_value!(tokio::task::JoinHandle<YY>: Send & Sync & Unpin);
assert_value!(tokio::task::JoinHandle<YN>: Send & Sync & Unpin);
assert_value!(tokio::task::JoinHandle<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::task::JoinError: Send & Sync & Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<u8>::recv(_): Send & Sync);
async_assert_fn!(tokio::sync::mpsc::Receiver<Cell<u8>>::recv(_): Send & Sync);
async_assert_fn!(tokio::sync::mpsc::Receiver<Rc<u8>>::recv(_): !Send & !Sync);
async_assert_fn!(tokio::sync::mpsc::Sender<u8>::send(_, u8): Send & Sync);
async_assert_fn!(tokio::sync::mpsc::Sender<Cell<u8>>::send(_, Cell<u8>): Send & !Sync);
async_assert_fn!(tokio::sync::mpsc::Sender<Rc<u8>>::send(_, Rc<u8>): !Send & !Sync);
assert_value!(tokio::runtime::Builder: Send & Sync & Unpin);
assert_value!(tokio::runtime::EnterGuard<'_>: Send & Sync & Unpin);
assert_value!(tokio::runtime::Handle: Send & Sync & Unpin);
assert_value!(tokio::runtime::Runtime: Send & Sync & Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<u8>::recv(_): Send & Sync);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<Cell<u8>>::recv(_): Send & Sync);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<Rc<u8>>::recv(_): !Send & !Sync);
assert_value!(tokio::time::Interval: Send & Sync & Unpin);
assert_value!(tokio::time::Instant: Send & Sync & Unpin);
assert_value!(tokio::time::Sleep: Send & Sync & !Unpin);
assert_value!(tokio::time::Timeout<BoxFutureSync<()>>: Send & Sync & !Unpin);
assert_value!(tokio::time::Timeout<BoxFutureSend<()>>: Send & !Sync & !Unpin);
assert_value!(tokio::time::Timeout<BoxFuture<()>>: !Send & !Sync & !Unpin);
assert_value!(tokio::time::error::Elapsed: Send & Sync & Unpin);
assert_value!(tokio::time::error::Error: Send & Sync & Unpin);
async_assert_fn!(tokio::time::advance(Duration): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::sleep(Duration): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::sleep_until(Instant): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSync<()>): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSync<()>): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::Interval::tick(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<u8>::changed(_): Send & Sync);
async_assert_fn!(tokio::sync::watch::Sender<u8>::closed(_): Send & Sync);
async_assert_fn!(tokio::sync::watch::Sender<Cell<u8>>::closed(_): !Send & !Sync);
async_assert_fn!(tokio::sync::watch::Sender<Rc<u8>>::closed(_): !Send & !Sync);
assert_value!(tokio::io::BufReader<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::BufStream<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::BufWriter<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::DuplexStream: Send & Sync & Unpin);
assert_value!(tokio::io::Empty: Send & Sync & Unpin);
assert_value!(tokio::io::Interest: Send & Sync & Unpin);
assert_value!(tokio::io::Lines<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::ReadBuf<'_>: Send & Sync & Unpin);
assert_value!(tokio::io::ReadHalf<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::Ready: Send & Sync & Unpin);
assert_value!(tokio::io::Repeat: Send & Sync & Unpin);
assert_value!(tokio::io::Sink: Send & Sync & Unpin);
assert_value!(tokio::io::Split<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::Stderr: Send & Sync & Unpin);
assert_value!(tokio::io::Stdin: Send & Sync & Unpin);
assert_value!(tokio::io::Stdout: Send & Sync & Unpin);
assert_value!(tokio::io::Take<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::WriteHalf<TcpStream>: Send & Sync & Unpin);
async_assert_fn!(tokio::io::copy(&mut TcpStream, &mut TcpStream): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::copy_bidirectional(&mut TcpStream, &mut TcpStream): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::copy_buf(&mut tokio::io::BufReader<TcpStream>, &mut TcpStream): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::empty(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::repeat(u8): Send & Sync & Unpin);
async_assert_fn!(tokio::io::sink(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::split(TcpStream): Send & Sync & Unpin);
async_assert_fn!(tokio::io::stderr(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::stdin(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::stdout(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::Split<tokio::io::BufReader<TcpStream>>::next_segment(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::Lines<tokio::io::BufReader<TcpStream>>::next_line(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncBufReadExt::read_until(&mut BoxAsyncRead, u8, &mut Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncBufReadExt::read_line(&mut BoxAsyncRead, &mut String): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncReadExt::read(&mut BoxAsyncRead, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_buf(&mut BoxAsyncRead, &mut Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncReadExt::read_exact(&mut BoxAsyncRead, &mut [u8]): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncReadExt::read_u8(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i8(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u16(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i16(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u32(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i32(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u64(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i64(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u128(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i128(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u16_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i16_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u128_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i128_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_to_end(&mut BoxAsyncRead, &mut Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncReadExt::read_to_string(&mut BoxAsyncRead, &mut String): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncSeekExt::seek(&mut BoxAsyncSeek, SeekFrom): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncSeekExt::stream_position(&mut BoxAsyncSeek): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write(&mut BoxAsyncWrite, &[u8]): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_vectored(&mut BoxAsyncWrite, _): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_buf(&mut BoxAsyncWrite, &mut bytes::Bytes): Send
& Sync
& !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_all_buf(&mut BoxAsyncWrite, &mut bytes::Bytes): Send
& Sync
& !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_all(&mut BoxAsyncWrite, &[u8]): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u8(&mut BoxAsyncWrite, u8): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i8(&mut BoxAsyncWrite, i8): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u16(&mut BoxAsyncWrite, u16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i16(&mut BoxAsyncWrite, i16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u32(&mut BoxAsyncWrite, u32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i32(&mut BoxAsyncWrite, i32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u64(&mut BoxAsyncWrite, u64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i64(&mut BoxAsyncWrite, i64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u128(&mut BoxAsyncWrite, u128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i128(&mut BoxAsyncWrite, i128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u16_le(&mut BoxAsyncWrite, u16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i16_le(&mut BoxAsyncWrite, i16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u32_le(&mut BoxAsyncWrite, u32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i32_le(&mut BoxAsyncWrite, i32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u64_le(&mut BoxAsyncWrite, u64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i64_le(&mut BoxAsyncWrite, i64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u128_le(&mut BoxAsyncWrite, u128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i128_le(&mut BoxAsyncWrite, i128): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncWriteExt::flush(&mut BoxAsyncWrite): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::shutdown(&mut BoxAsyncWrite): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<u8>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = u8> + Send + Sync>>): Send & Sync);
async_assert_fn!(tokio::sync::OnceCell<u8>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = u8> + Send>>): Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<u8>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = u8>>>): !Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<Cell<u8>>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = Cell<u8>> + Send + Sync>>): !Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<Cell<u8>>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = Cell<u8>> + Send>>): !Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<Cell<u8>>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = Cell<u8>>>>): !Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<Rc<u8>>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = Rc<u8>> + Send + Sync>>): !Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<Rc<u8>>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = Rc<u8>> + Send>>): !Send & !Sync);
async_assert_fn!(tokio::sync::OnceCell<Rc<u8>>::get_or_init(
_, fn() -> Pin<Box<dyn Future<Output = Rc<u8>>>>): !Send & !Sync);
assert_value!(tokio::sync::OnceCell<u8>: Send & Sync);
assert_value!(tokio::sync::OnceCell<Cell<u8>>: Send & !Sync);
assert_value!(tokio::sync::OnceCell<Rc<u8>>: !Send & !Sync);
#[cfg(unix)]
mod unix_asyncfd {
use super::*;
use tokio::io::unix::*;
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSync<()>): Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSend<()>): Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFuture<()>): !Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSync<()>): !Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSend<()>): !Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFuture<()>): !Send & !Sync);
async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync);
assert_value!(tokio::task::LocalSet: !Send & !Sync);
struct ImplsFd<T> {
_t: T,
}
impl<T> std::os::unix::io::AsRawFd for ImplsFd<T> {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
unreachable!()
}
}
async_assert_fn!(tokio::time::advance(Duration): Send & Sync);
async_assert_fn!(tokio::time::sleep(Duration): Send & Sync);
async_assert_fn!(tokio::time::sleep_until(Instant): Send & Sync);
async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSync<()>): Send & Sync);
async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSend<()>): Send & !Sync);
async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Send & !Sync);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSync<()>): Send & Sync);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSend<()>): Send & !Sync);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Send & !Sync);
async_assert_fn!(tokio::time::Interval::tick(_): Send & Sync);
assert_value!(tokio::time::Interval: Unpin);
async_assert_fn!(tokio::time::sleep(Duration): !Unpin);
async_assert_fn!(tokio::time::sleep_until(Instant): !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Unpin);
async_assert_fn!(tokio::time::Interval::tick(_): !Unpin);
async_assert_fn!(tokio::io::AsyncBufReadExt::read_until(&mut BoxAsyncRead, u8, &mut Vec<u8>): !Unpin);
async_assert_fn!(tokio::io::AsyncBufReadExt::read_line(&mut BoxAsyncRead, &mut String): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read(&mut BoxAsyncRead, &mut [u8]): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_exact(&mut BoxAsyncRead, &mut [u8]): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u8(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i8(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u16(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i16(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u32(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i32(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u64(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i64(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u128(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i128(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u16_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i16_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u32_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i32_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u64_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i64_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u128_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i128_le(&mut BoxAsyncRead): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_to_end(&mut BoxAsyncRead, &mut Vec<u8>): !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_to_string(&mut BoxAsyncRead, &mut String): !Unpin);
async_assert_fn!(tokio::io::AsyncSeekExt::seek(&mut BoxAsyncSeek, SeekFrom): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write(&mut BoxAsyncWrite, &[u8]): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_all(&mut BoxAsyncWrite, &[u8]): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u8(&mut BoxAsyncWrite, u8): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i8(&mut BoxAsyncWrite, i8): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u16(&mut BoxAsyncWrite, u16): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i16(&mut BoxAsyncWrite, i16): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u32(&mut BoxAsyncWrite, u32): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i32(&mut BoxAsyncWrite, i32): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u64(&mut BoxAsyncWrite, u64): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i64(&mut BoxAsyncWrite, i64): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u128(&mut BoxAsyncWrite, u128): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i128(&mut BoxAsyncWrite, i128): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u16_le(&mut BoxAsyncWrite, u16): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i16_le(&mut BoxAsyncWrite, i16): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u32_le(&mut BoxAsyncWrite, u32): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i32_le(&mut BoxAsyncWrite, i32): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u64_le(&mut BoxAsyncWrite, u64): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i64_le(&mut BoxAsyncWrite, i64): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u128_le(&mut BoxAsyncWrite, u128): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i128_le(&mut BoxAsyncWrite, i128): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::flush(&mut BoxAsyncWrite): !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::shutdown(&mut BoxAsyncWrite): !Unpin);
assert_value!(AsyncFd<ImplsFd<YY>>: Send & Sync & Unpin);
assert_value!(AsyncFd<ImplsFd<YN>>: Send & !Sync & Unpin);
assert_value!(AsyncFd<ImplsFd<NN>>: !Send & !Sync & Unpin);
assert_value!(AsyncFdReadyGuard<'_, ImplsFd<YY>>: Send & Sync & Unpin);
assert_value!(AsyncFdReadyGuard<'_, ImplsFd<YN>>: !Send & !Sync & Unpin);
assert_value!(AsyncFdReadyGuard<'_, ImplsFd<NN>>: !Send & !Sync & Unpin);
assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<YY>>: Send & Sync & Unpin);
assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<YN>>: Send & !Sync & Unpin);
assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<NN>>: !Send & !Sync & Unpin);
assert_value!(TryIoError: Send & Sync & Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::readable(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::readable_mut(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::writable(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::writable_mut(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::readable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::readable_mut(_): Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::writable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::writable_mut(_): Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::readable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::readable_mut(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::writable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::writable_mut(_): !Send & !Sync & !Unpin);
}

View File

@ -13,7 +13,6 @@ use std::{
task::{Context, Waker},
};
use nix::errno::Errno;
use nix::unistd::{close, read, write};
use futures::{poll, FutureExt};
@ -56,10 +55,6 @@ impl TestWaker {
}
}
fn is_blocking(e: &nix::Error) -> bool {
Some(Errno::EAGAIN) == e.as_errno()
}
#[derive(Debug)]
struct FileDescriptor {
fd: RawFd,
@ -73,11 +68,7 @@ impl AsRawFd for FileDescriptor {
impl Read for &FileDescriptor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match read(self.fd, buf) {
Ok(n) => Ok(n),
Err(e) if is_blocking(&e) => Err(ErrorKind::WouldBlock.into()),
Err(e) => Err(io::Error::new(ErrorKind::Other, e)),
}
read(self.fd, buf).map_err(io::Error::from)
}
}
@ -89,11 +80,7 @@ impl Read for FileDescriptor {
impl Write for &FileDescriptor {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match write(self.fd, buf) {
Ok(n) => Ok(n),
Err(e) if is_blocking(&e) => Err(ErrorKind::WouldBlock.into()),
Err(e) => Err(io::Error::new(ErrorKind::Other, e)),
}
write(self.fd, buf).map_err(io::Error::from)
}
fn flush(&mut self) -> io::Result<()> {

View File

@ -0,0 +1,13 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", unix))]
use tokio::process::Command;
#[tokio::test]
async fn arg0() {
let mut cmd = Command::new("sh");
cmd.arg0("test_string").arg("-c").arg("echo $0");
let output = cmd.output().await.unwrap();
assert_eq!(output.stdout, b"test_string\n");
}

View File

@ -211,7 +211,7 @@ impl Read for &'_ File {
assert!(dst.len() >= data.len());
assert!(dst.len() <= 16 * 1024, "actual = {}", dst.len()); // max buffer
&mut dst[..data.len()].copy_from_slice(&data);
dst[..data.len()].copy_from_slice(&data);
Ok(data.len())
}
Some(Read(Err(e))) => Err(e),

View File

@ -87,9 +87,12 @@ async fn try_send_recv_never_block() -> io::Result<()> {
dgram1.writable().await.unwrap();
match dgram1.try_send(payload) {
Err(err) => match err.kind() {
io::ErrorKind::WouldBlock | io::ErrorKind::Other => break,
_ => unreachable!("unexpected error {:?}", err),
Err(err) => match (err.kind(), err.raw_os_error()) {
(io::ErrorKind::WouldBlock, _) => break,
(_, Some(libc::ENOBUFS)) => break,
_ => {
panic!("unexpected error {:?}", err);
}
},
Ok(len) => {
assert_eq!(len, payload.len());
@ -291,9 +294,12 @@ async fn try_recv_buf_never_block() -> io::Result<()> {
dgram1.writable().await.unwrap();
match dgram1.try_send(payload) {
Err(err) => match err.kind() {
io::ErrorKind::WouldBlock | io::ErrorKind::Other => break,
_ => unreachable!("unexpected error {:?}", err),
Err(err) => match (err.kind(), err.raw_os_error()) {
(io::ErrorKind::WouldBlock, _) => break,
(_, Some(libc::ENOBUFS)) => break,
_ => {
panic!("unexpected error {:?}", err);
}
},
Ok(len) => {
assert_eq!(len, payload.len());