ci: fix clippy warnings triggered under specific cfg (#7495)

This commit is contained in:
Motoyuki Kimura 2025-08-04 19:36:17 +09:00 committed by GitHub
parent 2403b91d75
commit 0922aa2a0b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 58 additions and 41 deletions

View File

@ -832,6 +832,11 @@ jobs:
clippy:
name: clippy
runs-on: ubuntu-latest
strategy:
matrix:
rustflags:
- ""
- "--cfg tokio_unstable --cfg tokio_taskdump --cfg tokio_uring -Dwarnings"
steps:
- uses: actions/checkout@v4
- name: Install Rust ${{ env.rust_clippy }}
@ -841,9 +846,10 @@ jobs:
components: clippy
- uses: Swatinem/rust-cache@v2
# Run clippy
- name: "clippy --all"
- name: "clippy --all ${{ matrix.rustflags }}"
run: cargo clippy --all --tests --all-features --no-deps
env:
RUSTFLAGS: ${{ matrix.rustflags }}
docs:
name: docs
runs-on: ${{ matrix.run.os }}

View File

@ -507,6 +507,7 @@ impl File {
/// # Ok(())
/// # }
/// ```
#[allow(clippy::result_large_err)]
pub fn try_into_std(mut self) -> Result<StdFile, Self> {
match Arc::try_unwrap(self.std) {
Ok(file) => Ok(file),

View File

@ -61,7 +61,7 @@ impl BacktraceSymbol {
let name = sym.name();
Self {
name: name.as_ref().map(|name| name.as_bytes().into()),
name_demangled: name.map(|name| format!("{}", name).into()),
name_demangled: name.map(|name| format!("{name}").into()),
addr: sym.addr().map(Address),
filename: sym.filename().map(From::from),
lineno: sym.lineno(),

View File

@ -14,7 +14,7 @@ use std::{io, mem, task::Waker};
const DEFAULT_RING_SIZE: u32 = 256;
#[repr(usize)]
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum State {
Uninitialized = 0,
Initialized = 1,
@ -22,8 +22,8 @@ enum State {
}
impl State {
fn as_usize(self) -> usize {
self as usize
fn as_usize(&self) -> usize {
*self as usize
}
fn from_usize(value: usize) -> Self {
@ -96,10 +96,10 @@ impl UringContext {
ops.remove(idx);
}
Some(other) => {
panic!("unexpected lifecycle for slot {}: {:?}", idx, other);
panic!("unexpected lifecycle for slot {idx}: {other:?}");
}
None => {
panic!("no op at index {}", idx);
panic!("no op at index {idx}");
}
}
}
@ -293,7 +293,7 @@ impl Handle {
// We can safely remove the entry from the slab, as it has already been completed.
ops.remove(index);
}
prev => panic!("Unexpected state: {:?}", prev),
prev => panic!("Unexpected state: {prev:?}"),
};
}
}

View File

@ -243,7 +243,7 @@ impl HistogramBuilder {
}
pub(crate) fn legacy_mut(&mut self, f: impl Fn(&mut LegacyBuilder)) {
let legacy = self.legacy.get_or_insert_with(|| LegacyBuilder::default());
let legacy = self.legacy.get_or_insert_with(LegacyBuilder::default);
f(legacy);
}

View File

@ -50,7 +50,7 @@ impl LogHistogram {
/// - If `bucket_offset` is greater than the specified number of buckets, `(n - p + 1) * 2^p`
fn from_n_p(n: u32, p: u32, bucket_offset: usize) -> Self {
assert!(n >= p, "{n} (n) must be at least as large as {p} (p)");
let num_buckets = ((n - p + 1) * 1 << p) as usize - bucket_offset;
let num_buckets = ((n - p + 1) << p) as usize - bucket_offset;
Self {
num_buckets,
p,
@ -59,7 +59,7 @@ impl LogHistogram {
}
fn truncate_to_max_value(&self, max_value: u64) -> LogHistogram {
let mut hist = self.clone();
let mut hist = *self;
while hist.max_value() >= max_value {
hist.num_buckets -= 1;
}
@ -79,11 +79,7 @@ impl LogHistogram {
pub(crate) fn value_to_bucket(&self, value: u64) -> usize {
let index = bucket_index(value, self.p);
let offset_bucket = if index < self.bucket_offset as u64 {
0
} else {
index - self.bucket_offset as u64
};
let offset_bucket = index.saturating_sub(self.bucket_offset as u64);
let max = self.num_buckets - 1;
offset_bucket.min(max as u64) as usize
}
@ -180,7 +176,7 @@ impl LogHistogramBuilder {
assert!(max_error > 0.0, "max_error must be greater than 0");
assert!(max_error < 1.0, "max_error must be less than 1");
let mut p = 2;
while 2_f64.powf(-1.0 * p as f64) > max_error && p <= MAX_PRECISION {
while 2_f64.powf(-(p as f64)) > max_error && p <= MAX_PRECISION {
p += 1;
}
self.precision = Some(p);
@ -434,9 +430,7 @@ mod test {
assert_eq!(
config.value_to_bucket(i as u64),
i,
"{} should be in bucket {}",
i,
i
"{i} should be in bucket {i}"
);
}

View File

@ -7,7 +7,7 @@ impl Handle {
let trace_status = &self.shared.trace_status;
// If a dump is in progress, block.
trace_status.start_trace_request(&self).await;
trace_status.start_trace_request(self).await;
let result = loop {
if let Some(result) = trace_status.take_result() {
@ -19,7 +19,7 @@ impl Handle {
};
// Allow other queued dumps to proceed.
trace_status.end_trace_request(&self).await;
trace_status.end_trace_request(self).await;
result
}

View File

@ -249,7 +249,7 @@ pub(crate) struct Notified<S: 'static>(Task<S>);
impl<S> Notified<S> {
#[cfg(all(tokio_unstable, feature = "rt-multi-thread"))]
#[inline]
pub(crate) fn task_meta<'task, 'meta>(&'task self) -> crate::runtime::TaskMeta<'meta> {
pub(crate) fn task_meta<'meta>(&self) -> crate::runtime::TaskMeta<'meta> {
self.0.task_meta()
}
}
@ -270,7 +270,7 @@ pub(crate) struct LocalNotified<S: 'static> {
impl<S> LocalNotified<S> {
#[cfg(tokio_unstable)]
#[inline]
pub(crate) fn task_meta<'task, 'meta>(&'task self) -> crate::runtime::TaskMeta<'meta> {
pub(crate) fn task_meta<'meta>(&self) -> crate::runtime::TaskMeta<'meta> {
self.task.task_meta()
}
}
@ -441,7 +441,7 @@ impl<S: 'static> Task<S> {
// the compiler infers the lifetimes to be the same, and considers the task
// to be borrowed for the lifetime of the returned `TaskMeta`.
#[cfg(tokio_unstable)]
pub(crate) fn task_meta<'task, 'meta>(&'task self) -> crate::runtime::TaskMeta<'meta> {
pub(crate) fn task_meta<'meta>(&self) -> crate::runtime::TaskMeta<'meta> {
crate::runtime::TaskMeta {
id: self.id(),
spawned_at: self.spawned_at().into(),

View File

@ -56,17 +56,17 @@ impl Tree {
is_last: bool,
prefix: &str,
) -> fmt::Result {
let root_fmt = format!("{}", root);
let root_fmt = format!("{root}");
let current;
let next;
if is_last {
current = format!("{prefix}└╼\u{a0}{root_fmt}");
next = format!("{}\u{a0}\u{a0}\u{a0}", prefix);
next = format!("{prefix}\u{a0}\u{a0}\u{a0}");
} else {
current = format!("{prefix}├╼\u{a0}{root_fmt}");
next = format!("{}\u{a0}\u{a0}", prefix);
next = format!("{prefix}\u{a0}\u{a0}");
}
write!(f, "{}", {

View File

@ -655,22 +655,40 @@ mod unstable {
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
fn deterministic_select_multi_thread() {
let seed = b"bytes used to generate seed";
let (tx, rx) = std::sync::mpsc::channel();
let rt1 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.on_thread_park(move || tx.send(()).unwrap())
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
// This makes sure that `enter_runtime()` from worker thread is called before the one from main thread,
// ensuring that the RNG state is consistent. See also https://github.com/tokio-rs/tokio/pull/7495.
rx.recv().unwrap();
let rt1_values = rt1.block_on(async {
let _ = tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) }).await;
tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) })
.await
.unwrap()
});
let (tx, rx) = std::sync::mpsc::channel();
let rt2 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.on_thread_park(move || tx.send(()).unwrap())
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
// This makes sure that `enter_runtime()` from worker thread is called before the one from main thread,
// ensuring that the RNG state is consistent. See also https://github.com/tokio-rs/tokio/pull/7495.
rx.recv().unwrap();
let rt2_values = rt2.block_on(async {
let _ = tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) }).await;
tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) })
.await
.unwrap()
});
assert_eq!(rt1_values, rt2_values);

View File

@ -34,16 +34,14 @@ fn spawn_task_hook_fires() {
let count_realized = count.load(Ordering::SeqCst);
assert_eq!(
TASKS, count_realized,
"Total number of spawned task hook invocations was incorrect, expected {TASKS}, got {}",
count_realized
"Total number of spawned task hook invocations was incorrect, expected {TASKS}, got {count_realized}"
);
let count_ids_realized = ids.lock().unwrap().len();
assert_eq!(
TASKS, count_ids_realized,
"Total number of spawned task hook invocations was incorrect, expected {TASKS}, got {}",
count_realized
"Total number of spawned task hook invocations was incorrect, expected {TASKS}, got {count_realized}"
);
}
@ -180,7 +178,7 @@ fn mk_spawn_location_hook(
event: &'static str,
count: &Arc<AtomicUsize>,
) -> impl Fn(&tokio::runtime::TaskMeta<'_>) {
let count = Arc::clone(&count);
let count = Arc::clone(count);
move |data| {
eprintln!("{event} ({:?}): {:?}", data.id(), data.spawned_at());
// Assert that the spawn location is in this file.

View File

@ -334,7 +334,7 @@ async fn try_join_next_with_id() {
count += 1;
joined.insert(id);
}
Some(Err(err)) => panic!("failed: {}", err),
Some(Err(err)) => panic!("failed: {err}"),
None => {
break;
}

View File

@ -92,8 +92,8 @@ async fn task_trace_self() {
for line in good_line {
let s = format!("{}:{}:", file!(), line);
assert!(log.lock().unwrap().iter().any(|x| {
eprintln!("{}", x);
format!("{}", x).contains(&s)
eprintln!("{x}");
format!("{x}").contains(&s)
}));
}
for line in bad_line {
@ -102,6 +102,6 @@ async fn task_trace_self() {
.lock()
.unwrap()
.iter()
.any(|x| format!("{}", x).contains(&s)));
.any(|x| format!("{x}").contains(&s)));
}
}

View File

@ -181,6 +181,6 @@ fn expect_task_named(name: &str) -> NewSpan {
.named("runtime.spawn")
.with_target("tokio::task")
.with_fields(
expect::field("task.name").with_value(&tracing::field::debug(format_args!("{}", name))),
expect::field("task.name").with_value(&tracing::field::debug(format_args!("{name}"))),
)
}