mirror of
https://github.com/tower-rs/tower.git
synced 2025-09-27 13:00:43 +00:00
tower: fix annoying clippy lints (#639)
This fixes a bunch of minor clippy lints. None of them were particularly major, but I was getting tired of the warnings showing up in vscode. The one lint that had to be ignored rather than fixed is the `clippy::bool_assert_comparison` lint, which triggers on the `tower_test::assert_request_eq!` macro. The lint triggers when writing code like `assert_eq!(whatever, true)` rather than simply `assert!(whatever)`. In this case, this occurs because the macro makes an assertion about a request value, and in _some_ tests, the request type is `bool`. We can't change this to use `assert!`, because in most cases, when the request is not `bool`, we actually do need `assert_eq!`, so I ignored that warning.
This commit is contained in:
parent
665834c7a6
commit
db43c07e96
@ -34,8 +34,13 @@ macro_rules! assert_request_eq {
|
||||
Some(r) => r,
|
||||
None => panic!("expected a request but none was received."),
|
||||
};
|
||||
|
||||
assert_eq!(actual, $expect, $($arg)*);
|
||||
// In some cases, this may be used with `bool` as the `Request` type, in
|
||||
// which case, clippy emits a warning. However, this can't be changed to
|
||||
// `assert!`, because the request type may *not* be `bool`...
|
||||
#[allow(clippy::bool_assert_comparison)]
|
||||
{
|
||||
assert_eq!(actual, $expect, $($arg)*);
|
||||
}
|
||||
send_response
|
||||
}};
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ async fn main() {
|
||||
println!("ENDPOINT_CAPACITY={}", ENDPOINT_CAPACITY);
|
||||
print!("MAX_ENDPOINT_LATENCIES=[");
|
||||
for max in &MAX_ENDPOINT_LATENCIES {
|
||||
let l = max.as_secs() * 1_000 + u64::from(max.subsec_nanos() / 1_000 / 1_000);
|
||||
let l = max.as_secs() * 1_000 + u64::from(max.subsec_millis());
|
||||
print!("{}ms, ", l);
|
||||
}
|
||||
println!("]");
|
||||
@ -122,7 +122,7 @@ fn gen_disco() -> impl Discover<
|
||||
let svc = tower::service_fn(move |_| {
|
||||
let start = Instant::now();
|
||||
|
||||
let maxms = u64::from(latency.subsec_nanos() / 1_000 / 1_000)
|
||||
let maxms = u64::from(latency.subsec_millis())
|
||||
.saturating_add(latency.as_secs().saturating_mul(1_000));
|
||||
let latency = Duration::from_millis(rand::thread_rng().gen_range(0..maxms));
|
||||
|
||||
|
@ -45,7 +45,7 @@ async fn single_endpoint() {
|
||||
handle.send_error("endpoint lost");
|
||||
assert_pending!(svc.poll_ready());
|
||||
assert!(
|
||||
svc.get_ref().len() == 0,
|
||||
svc.get_ref().is_empty(),
|
||||
"balancer must drop failed endpoints"
|
||||
);
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ where
|
||||
fn call(&mut self, request: Request) -> Self::Future {
|
||||
ResponseFuture::new(match self.predicate.check(request) {
|
||||
Ok(request) => Either::Right(self.inner.call(request).err_into()),
|
||||
Err(e) => Either::Left(futures_util::future::ready(Err(e.into()))),
|
||||
Err(e) => Either::Left(futures_util::future::ready(Err(e))),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ where
|
||||
match self.state {
|
||||
State::Ready { .. } => return Poll::Ready(ready!(self.inner.poll_ready(cx))),
|
||||
State::Limited => {
|
||||
if let Poll::Pending = Pin::new(&mut self.sleep).poll(cx) {
|
||||
if Pin::new(&mut self.sleep).poll(cx).is_pending() {
|
||||
tracing::trace!("rate limit exceeded; sleeping.");
|
||||
return Poll::Pending;
|
||||
}
|
||||
|
@ -143,7 +143,10 @@ where
|
||||
if self.not_ready.is_empty() {
|
||||
return Poll::Ready(Ok(()));
|
||||
} else {
|
||||
if let Poll::Pending = self.services[self.not_ready[0]].poll_ready(cx)? {
|
||||
if self.services[self.not_ready[0]]
|
||||
.poll_ready(cx)?
|
||||
.is_pending()
|
||||
{
|
||||
return Poll::Pending;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ async fn builder_service() {
|
||||
|
||||
let fut = client.ready().await.unwrap().call("hello");
|
||||
assert_request_eq!(handle, true).send_response("world");
|
||||
assert_eq!(fut.await.unwrap(), true);
|
||||
assert!(fut.await.unwrap());
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
|
Loading…
x
Reference in New Issue
Block a user