chore: update CI to clippy 1.88 (#7452)

This commit is contained in:
Aaron Chen 2025-07-09 14:34:24 +08:00 committed by GitHub
parent aff24dfbeb
commit a7896d07f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 63 additions and 84 deletions

View File

@ -19,7 +19,7 @@ env:
rust_nightly: nightly-2025-01-25
# Pin a specific miri version
rust_miri_nightly: nightly-2025-06-02
rust_clippy: '1.77'
rust_clippy: '1.88'
# When updating this, also update:
# - README.md
# - tokio/README.md

View File

@ -149,7 +149,7 @@ When updating this, also update:
-->
```
cargo +1.77 clippy --all --tests --all-features
cargo +1.88 clippy --all --tests --all-features
```
When building documentation, a simple `cargo doc` is not sufficient. To produce

View File

@ -145,7 +145,7 @@ impl ChunkReader {
fn new(chunk_size: usize, service_interval: Duration) -> Self {
let mut service_intervals = interval(service_interval);
service_intervals.set_missed_tick_behavior(MissedTickBehavior::Burst);
let data: Vec<u8> = std::iter::repeat(0).take(chunk_size).collect();
let data: Vec<u8> = std::iter::repeat_n(0, chunk_size).collect();
Self {
data,
service_intervals,

View File

@ -82,9 +82,7 @@ async fn respond(req: Request<()>) -> Result<Response<String>, Box<dyn Error>> {
String::new()
}
};
let response = response
.body(body)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
let response = response.body(body).map_err(io::Error::other)?;
Ok(response)
}
@ -160,7 +158,7 @@ impl Decoder for Http {
let mut r = httparse::Request::new(&mut parsed_headers);
let status = r.parse(src).map_err(|e| {
let msg = format!("failed to parse http request: {e:?}");
io::Error::new(io::ErrorKind::Other, msg)
io::Error::other(msg)
})?;
let amt = match status {
@ -180,8 +178,7 @@ impl Decoder for Http {
headers[i] = Some((k, v));
}
let method = http::Method::try_from(r.method.unwrap())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let method = http::Method::try_from(r.method.unwrap()).map_err(io::Error::other)?;
(
method,
@ -191,10 +188,7 @@ impl Decoder for Http {
)
};
if version != 1 {
return Err(io::Error::new(
io::ErrorKind::Other,
"only HTTP/1.1 accepted",
));
return Err(io::Error::other("only HTTP/1.1 accepted"));
}
let data = src.split_to(amt).freeze();
let mut ret = Request::builder();
@ -209,13 +203,11 @@ impl Decoder for Http {
None => break,
};
let value = HeaderValue::from_bytes(data.slice(v.0..v.1).as_ref())
.map_err(|_| io::Error::new(io::ErrorKind::Other, "header decode error"))?;
.map_err(|_| io::Error::other("header decode error"))?;
ret = ret.header(&data[k.0..k.1], value);
}
let req = ret
.body(())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let req = ret.body(()).map_err(io::Error::other)?;
Ok(Some(req))
}
}

View File

@ -25,7 +25,7 @@ async fn feed_cat(mut cat: Child, n: usize) -> io::Result<ExitStatus> {
// Produce n lines on the child's stdout.
let write = async {
for i in 0..n {
let bytes = format!("line {}\n", i).into_bytes();
let bytes = format!("line {i}\n").into_bytes();
stdin.write_all(&bytes).await.unwrap();
}
@ -52,7 +52,7 @@ async fn feed_cat(mut cat: Child, n: usize) -> io::Result<ExitStatus> {
(false, 0) => panic!("broken pipe"),
(true, n) if n != 0 => panic!("extraneous data"),
_ => {
let expected = format!("line {}", num_lines);
let expected = format!("line {num_lines}");
assert_eq!(expected, data);
}
};

View File

@ -53,7 +53,7 @@ pub enum BroadcastStreamRecvError {
impl fmt::Display for BroadcastStreamRecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BroadcastStreamRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt),
BroadcastStreamRecvError::Lagged(amt) => write!(f, "channel lagged by {amt}"),
}
}
}

View File

@ -12,7 +12,7 @@ async fn watch_stream_message_not_twice() {
let mut counter = 0;
let mut stream = WatchStream::new(rx).map(move |payload| {
println!("{}", payload);
println!("{payload}");
if payload == "goodbye" {
counter += 1;
}

View File

@ -141,11 +141,9 @@ impl Decoder for AnyDelimiterCodec {
// there's no max_length set, we'll read to the end of the buffer.
let read_to = cmp::min(self.max_length.saturating_add(1), buf.len());
let new_chunk_offset = buf[self.next_index..read_to].iter().position(|b| {
self.seek_delimiters
.iter()
.any(|delimiter| *b == *delimiter)
});
let new_chunk_offset = buf[self.next_index..read_to]
.iter()
.position(|b| self.seek_delimiters.contains(b));
match (self.is_discarding, new_chunk_offset) {
(true, Some(offset)) => {

View File

@ -40,7 +40,7 @@ impl Listener for tokio::net::TcpListener {
}
fn local_addr(&self) -> Result<Self::Addr> {
self.local_addr().map(Into::into)
self.local_addr()
}
}

View File

@ -13,6 +13,6 @@ impl Listener for tokio::net::UnixListener {
}
fn local_addr(&self) -> Result<Self::Addr> {
self.local_addr().map(Into::into)
self.local_addr()
}
}

View File

@ -18,16 +18,16 @@
//! Those invariants shall be true at any time.
//!
//! 1. A node that has no parents and no handles can no longer be cancelled.
//! This is important during both cancellation and refcounting.
//! This is important during both cancellation and refcounting.
//!
//! 2. If node B *is* or *was* a child of node A, then node B was created *after* node A.
//! This is important for deadlock safety, as it is used for lock order.
//! Node B can only become the child of node A in two ways:
//! - being created with `child_node()`, in which case it is trivially true that
//! node A already existed when node B was created
//! - being moved A->C->B to A->B because node C was removed in `decrease_handle_refcount()`
//! or `cancel()`. In this case the invariant still holds, as B was younger than C, and C
//! was younger than A, therefore B is also younger than A.
//! This is important for deadlock safety, as it is used for lock order.
//! Node B can only become the child of node A in two ways:
//! - being created with `child_node()`, in which case it is trivially true that
//! node A already existed when node B was created
//! - being moved A->C->B to A->B because node C was removed in `decrease_handle_refcount()`
//! or `cancel()`. In this case the invariant still holds, as B was younger than C, and C
//! was younger than A, therefore B is also younger than A.
//!
//! 3. If two nodes are both unlocked and node A is the parent of node B, then node B is a child of
//! node A. It is important to always restore that invariant before dropping the lock of a node.

View File

@ -51,10 +51,7 @@ impl<T: Stack> Level<T> {
pub(crate) fn next_expiration(&self, now: u64) -> Option<Expiration> {
// Use the `occupied` bit field to get the index of the next slot that
// needs to be processed.
let slot = match self.next_occupied_slot(now) {
Some(slot) => slot,
None => return None,
};
let slot = self.next_occupied_slot(now)?;
// From the slot index, calculate the `Instant` at which it needs to be
// processed. This value *must* be in the future with respect to `now`.

View File

@ -73,8 +73,8 @@ where
/// # Arguments
///
/// * `when`: is the instant at which the entry should be fired. It is
/// represented as the number of milliseconds since the creation
/// of the timing wheel.
/// represented as the number of milliseconds since the creation
/// of the timing wheel.
///
/// * `item`: The item to insert into the wheel.
///

View File

@ -17,7 +17,6 @@ use std::path::Path;
/// # Ok(())
/// # }
/// ```
pub async fn copy(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<u64, std::io::Error> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();

View File

@ -22,9 +22,9 @@ use std::path::Path;
/// limited to just these cases:
///
/// * If any directory in the path specified by `path` does not already exist
/// and it could not be created otherwise. The specific error conditions for
/// when a directory is being created (after it is determined to not exist) are
/// outlined by [`fs::create_dir`].
/// and it could not be created otherwise. The specific error conditions for
/// when a directory is being created (after it is determined to not exist) are
/// outlined by [`fs::create_dir`].
///
/// Notable exception is made for situations where any of the directories
/// specified in the `path` could not be created as it was being created concurrently.

View File

@ -316,15 +316,15 @@
//!
//! - `full`: Enables all features listed below except `test-util` and `tracing`.
//! - `rt`: Enables `tokio::spawn`, the current-thread scheduler,
//! and non-scheduler utilities.
//! and non-scheduler utilities.
//! - `rt-multi-thread`: Enables the heavier, multi-threaded, work-stealing scheduler.
//! - `io-util`: Enables the IO based `Ext` traits.
//! - `io-std`: Enable `Stdout`, `Stdin` and `Stderr` types.
//! - `net`: Enables `tokio::net` types such as `TcpStream`, `UnixStream` and
//! `UdpSocket`, as well as (on Unix-like systems) `AsyncFd` and (on
//! FreeBSD) `PollAio`.
//! `UdpSocket`, as well as (on Unix-like systems) `AsyncFd` and (on
//! FreeBSD) `PollAio`.
//! - `time`: Enables `tokio::time` types and allows the schedulers to enable
//! the built in timer.
//! the built in timer.
//! - `process`: Enables `tokio::process` types.
//! - `macros`: Enables `#[tokio::main]` and `#[tokio::test]` macros.
//! - `sync`: Enables all `tokio::sync` types.
@ -332,10 +332,10 @@
//! - `fs`: Enables `tokio::fs` types.
//! - `test-util`: Enables testing based infrastructure for the Tokio runtime.
//! - `parking_lot`: As a potential optimization, use the `_parking_lot_` crate's
//! synchronization primitives internally. Also, this
//! dependency is necessary to construct some of our primitives
//! in a `const` context. `MSRV` may increase according to the
//! `_parking_lot_` release in use.
//! synchronization primitives internally. Also, this
//! dependency is necessary to construct some of our primitives
//! in a `const` context. `MSRV` may increase according to the
//! `_parking_lot_` release in use.
//!
//! _Note: `AsyncRead` and `AsyncWrite` traits do not require any features and are
//! always available._

View File

@ -10,9 +10,9 @@
//! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP
//! * [`UdpSocket`] provides functionality for communication over UDP
//! * [`UnixListener`] and [`UnixStream`] provide functionality for communication over a
//! Unix Domain Stream Socket **(available on Unix only)**
//! Unix Domain Stream Socket **(available on Unix only)**
//! * [`UnixDatagram`] provides functionality for communication
//! over Unix Domain Datagram Socket **(available on Unix only)**
//! over Unix Domain Datagram Socket **(available on Unix only)**
//! * [`tokio::net::unix::pipe`] for FIFO pipes **(available on Unix only)**
//! * [`tokio::net::windows::named_pipe`] for Named Pipes **(available on Windows only)**
//!

View File

@ -198,13 +198,13 @@ impl<T: IntoRawFd> From<T> for Pipe {
}
}
impl<'a> io::Read for &'a Pipe {
impl io::Read for &Pipe {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
(&self.fd).read(bytes)
}
}
impl<'a> io::Write for &'a Pipe {
impl io::Write for &Pipe {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
(&self.fd).write(bytes)
}

View File

@ -203,7 +203,7 @@ impl ScheduledIo {
///
/// # Arguments
/// - `tick`: whether setting the tick or trying to clear readiness for a
/// specific tick.
/// specific tick.
/// - `f`: a closure returning a new readiness value given the previous
/// readiness.
pub(super) fn set_readiness(&self, tick_op: Tick, f: impl Fn(Ready) -> Ready) {

View File

@ -502,7 +502,7 @@ impl Semaphore {
.as_ref()
.map_or(true, |waker| !waker.will_wake(cx.waker()))
{
old_waker = std::mem::replace(waker, Some(cx.waker().clone()));
old_waker = waker.replace(cx.waker().clone());
}
});

View File

@ -1273,10 +1273,7 @@ impl<T> Receiver<T> {
match (*ptr).waker {
Some(ref w) if w.will_wake(waker) => {}
_ => {
old_waker = std::mem::replace(
&mut (*ptr).waker,
Some(waker.clone()),
);
old_waker = (*ptr).waker.replace(waker.clone());
}
}

View File

@ -1109,7 +1109,7 @@ impl Notified<'_> {
None => true,
};
if should_update {
old_waker = std::mem::replace(&mut *v, Some(waker.clone()));
old_waker = (*v).replace(waker.clone());
}
}
});

View File

@ -1094,7 +1094,7 @@ impl<T> From<T> for RwLock<T> {
}
}
impl<T: ?Sized> Default for RwLock<T>
impl<T> Default for RwLock<T>
where
T: Default,
{

View File

@ -472,11 +472,11 @@ impl<T: 'static> JoinSet<T> {
/// This function returns:
///
/// * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is
/// available right now.
/// available right now.
/// * `Poll::Ready(Some(Ok(value)))` if one of the tasks in this `JoinSet` has completed.
/// The `value` is the return value of one of the tasks that completed.
/// The `value` is the return value of one of the tasks that completed.
/// * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been
/// aborted. The `err` is the `JoinError` from the panicked/aborted task.
/// aborted. The `err` is the `JoinError` from the panicked/aborted task.
/// * `Poll::Ready(None)` if the `JoinSet` is empty.
///
/// Note that this method may return `Poll::Pending` even if one of the tasks has completed.
@ -526,12 +526,12 @@ impl<T: 'static> JoinSet<T> {
/// This function returns:
///
/// * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is
/// available right now.
/// available right now.
/// * `Poll::Ready(Some(Ok((id, value))))` if one of the tasks in this `JoinSet` has completed.
/// The `value` is the return value of one of the tasks that completed, and
/// The `value` is the return value of one of the tasks that completed, and
/// `id` is the [task ID] of that task.
/// * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been
/// aborted. The `err` is the `JoinError` from the panicked/aborted task.
/// aborted. The `err` is the `JoinError` from the panicked/aborted task.
/// * `Poll::Ready(None)` if the `JoinSet` is empty.
///
/// Note that this method may return `Poll::Pending` even if one of the tasks has completed.

View File

@ -15,7 +15,7 @@ impl AsyncRead for R {
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
buf.put_slice(b"z");
Poll::Ready(Ok(()))
}
}
@ -68,12 +68,10 @@ fn method_delegation() {
assert_eq!(1, rw.read(&mut buf).await.unwrap());
assert_eq!(b'z', buf[0]);
assert_eq!(1, rw.write(&[b'x']).await.unwrap());
assert_eq!(1, rw.write(b"x").await.unwrap());
assert_eq!(
2,
rw.write_vectored(&[io::IoSlice::new(&[b'x'])])
.await
.unwrap()
rw.write_vectored(&[io::IoSlice::new(b"x")]).await.unwrap()
);
assert!(rw.is_write_vectored());

View File

@ -19,7 +19,7 @@ impl AsyncRead for RW {
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
buf.put_slice(b"z");
Poll::Ready(Ok(()))
}
}

View File

@ -17,7 +17,7 @@ impl AsyncRead for RW {
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
buf.put_slice(b"z");
Poll::Ready(Ok(()))
}
}
@ -101,12 +101,10 @@ fn method_delegation() {
assert_eq!(1, r.read(&mut buf).await.unwrap());
assert_eq!(b'z', buf[0]);
assert_eq!(1, w.write(&[b'x']).await.unwrap());
assert_eq!(1, w.write(b"x").await.unwrap());
assert_eq!(
2,
w.write_vectored(&[io::IoSlice::new(&[b'x'])])
.await
.unwrap()
w.write_vectored(&[io::IoSlice::new(b"x")]).await.unwrap()
);
assert!(w.is_write_vectored());

View File

@ -177,7 +177,7 @@ mod block_in_place_cases {
Ok(()) => {}
Err(err) if err.is_panic() => std::panic::resume_unwind(err.into_panic()),
Err(err) if err.is_cancelled() => panic!("task cancelled"),
Err(err) => panic!("{:?}", err),
Err(err) => panic!("{err:?}"),
}
}