mirror of
https://github.com/tower-rs/tower.git
synced 2025-10-01 15:01:18 +00:00
docs pass (#490)
This branch makes the following changes: * New `lib.rs` docs for `tower`, which should hopefully provide a better explanation of Tower's core abstractions & their relationships * Nicer docs for `ServiceBuilder` * Added `#[doc(cfg(...))]` attributes for feature flagged APIs * Example improvements * Fixing a bunch of broken intra-rustdoc links Signed-off-by: Eliza Weisman <eliza@buoyant.io>
This commit is contained in:
parent
bef0ade3cb
commit
fdd66e5305
@ -58,9 +58,12 @@ use std::fmt;
|
||||
/// Ok::<_, Infallible>(request.to_uppercase())
|
||||
/// });
|
||||
///
|
||||
/// // Wrap our servic in a `LogService` so requests are logged.
|
||||
/// // Wrap our service in a `LogService` so requests are logged.
|
||||
/// let wrapped_service = log_layer.layer(uppercase_service);
|
||||
/// ```
|
||||
///
|
||||
/// [`Service`]: https://docs.rs/tower-service/latest/tower_service/trait.Service.html
|
||||
/// [`Layer::layer`]: crate::Layer::layer
|
||||
pub fn layer_fn<T>(f: T) -> LayerFn<T> {
|
||||
LayerFn { f }
|
||||
}
|
||||
|
@ -23,6 +23,7 @@
|
||||
/// assert_eq!(response.await.unwrap(), "world");
|
||||
/// # }
|
||||
/// ```
|
||||
/// [`SendResponse`]: crate::mock::SendResponse
|
||||
#[macro_export]
|
||||
macro_rules! assert_request_eq {
|
||||
($mock_handle:expr, $expect:expr) => {
|
||||
|
@ -16,9 +16,9 @@
|
||||
//!
|
||||
//! Second, [`pool`] implements a dynamically sized pool of services. It estimates the overall
|
||||
//! current load by tracking successful and unsuccessful calls to `poll_ready`, and uses an
|
||||
//! exponentially weighted moving average to add (using [`tower::make_service::MakeService`]) or
|
||||
//! remove (by dropping) services in response to increases or decreases in load. Use this if you
|
||||
//! are able to dynamically add more service endpoints to the system to handle added load.
|
||||
//! exponentially weighted moving average to add (using [`MakeService`]) or remove (by dropping)
|
||||
//! services in response to increases or decreases in load. Use this if you are able to
|
||||
//! dynamically add more service endpoints to the system to handle added load.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
@ -52,6 +52,7 @@
|
||||
//! }
|
||||
//! # }
|
||||
//! ```
|
||||
//! [`MakeService`]: crate::MakeService
|
||||
|
||||
pub mod error;
|
||||
pub mod p2c;
|
||||
|
@ -7,12 +7,17 @@ use tower_layer::Layer;
|
||||
///
|
||||
/// This construction may seem a little odd at first glance. This is not a layer that takes
|
||||
/// requests and produces responses in the traditional sense. Instead, it is more like
|
||||
/// [`MakeService`](tower::make_service::MakeService) in that it takes service _descriptors_ (see
|
||||
/// `Target` on `MakeService`) and produces _services_. Since [`Balance`] spreads requests across a
|
||||
/// _set_ of services, the inner service should produce a [`Discover`], not just a single
|
||||
/// [`MakeService`] in that it takes service _descriptors_ (see `Target` on `MakeService`)
|
||||
/// and produces _services_. Since [`Balance`] spreads requests across a _set_ of services,
|
||||
/// the inner service should produce a [`Discover`], not just a single
|
||||
/// [`Service`], given a service descriptor.
|
||||
///
|
||||
/// See the [module-level documentation](..) for details on load balancing.
|
||||
/// See the [module-level documentation](crate::balance) for details on load balancing.
|
||||
///
|
||||
/// [`Balance`]: crate::balance::p2c::Balance
|
||||
/// [`Discover`]: crate::discover::Discover
|
||||
/// [`MakeService`]: crate::MakeService
|
||||
/// [`Service`]: crate::Service
|
||||
#[derive(Clone)]
|
||||
pub struct MakeBalanceLayer<D, Req> {
|
||||
_marker: PhantomData<fn(D, Req)>,
|
||||
|
@ -13,12 +13,16 @@ use tower_service::Service;
|
||||
|
||||
/// Constructs load balancers over dynamic service sets produced by a wrapped "inner" service.
|
||||
///
|
||||
/// This is effectively an implementation of [`MakeService`](tower::make_service::MakeService),
|
||||
/// except that it forwards the service descriptors (`Target`) to an inner service (`S`), and
|
||||
/// expects that service to produce a service set in the form of a [`Discover`]. It then wraps the
|
||||
/// service set in a [`Balance`] before returning it as the "made" service.
|
||||
/// This is effectively an implementation of [`MakeService`] except that it forwards the service
|
||||
/// descriptors (`Target`) to an inner service (`S`), and expects that service to produce a
|
||||
/// service set in the form of a [`Discover`]. It then wraps the service set in a [`Balance`]
|
||||
/// before returning it as the "made" service.
|
||||
///
|
||||
/// See the [module-level documentation](..) for details on load balancing.
|
||||
/// See the [module-level documentation](crate::balance) for details on load balancing.
|
||||
///
|
||||
/// [`MakeService`]: crate::MakeService
|
||||
/// [`Discover`]: crate::discover::Discover
|
||||
/// [`Balance`]: crate::balance::p2c::Balance
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MakeBalance<S, Req> {
|
||||
inner: S,
|
||||
@ -26,6 +30,8 @@ pub struct MakeBalance<S, Req> {
|
||||
}
|
||||
|
||||
/// A [`Balance`] in the making.
|
||||
///
|
||||
/// [`Balance`]: crate::balance::p2c::Balance
|
||||
#[pin_project]
|
||||
#[derive(Debug)]
|
||||
pub struct MakeFuture<F, Req> {
|
||||
|
@ -3,7 +3,7 @@
|
||||
//! It is a simple but robust technique for spreading load across services with only inexact load
|
||||
//! measurements. As its name implies, whenever a request comes in, it samples two ready services
|
||||
//! at random, and issues the request to whichever service is less loaded. How loaded a service is
|
||||
//! is determined by the return value of [`Load`](tower::load::Load).
|
||||
//! is determined by the return value of [`Load`](crate::load::Load).
|
||||
//!
|
||||
//! As described in the [Finagle Guide][finagle]:
|
||||
//!
|
||||
@ -16,9 +16,9 @@
|
||||
//!
|
||||
//! The balance service and layer implementations rely on _service discovery_ to provide the
|
||||
//! underlying set of services to balance requests across. This happens through the
|
||||
//! [`Discover`](tower::discover::Discover) trait, which is essentially a `Stream` that indicates
|
||||
//! [`Discover`](crate::discover::Discover) trait, which is essentially a `Stream` that indicates
|
||||
//! when services become available or go away. If you have a fixed set of services, consider using
|
||||
//! [`ServiceList`](tower::discover::ServiceList).
|
||||
//! [`ServiceList`](crate::discover::ServiceList).
|
||||
//!
|
||||
//! Since the load balancer needs to perform _random_ choices, the constructors in this module
|
||||
//! usually come in two forms: one that uses randomness provided by the operating system, and one
|
||||
|
@ -4,16 +4,15 @@ use tower_layer::{Identity, Layer, Stack};
|
||||
|
||||
use std::fmt;
|
||||
|
||||
/// Declaratively construct Service values.
|
||||
/// Declaratively construct [`Service`] values.
|
||||
///
|
||||
/// `ServiceBuilder` provides a [builder-like interface][builder] for composing
|
||||
/// layers to be applied to a `Service`.
|
||||
/// layers to be applied to a [`Service`].
|
||||
///
|
||||
/// # Service
|
||||
///
|
||||
/// A [`Service`](tower_service::Service) is a trait representing an
|
||||
/// asynchronous function of a request to a response. It is similar to `async
|
||||
/// fn(Request) -> Result<Response, Error>`.
|
||||
/// A [`Service`] is a trait representing an asynchronous function of a request
|
||||
/// to a response. It is similar to `async fn(Request) -> Result<Response, Error>`.
|
||||
///
|
||||
/// A `Service` is typically bound to a single transport, such as a TCP
|
||||
/// connection. It defines how _all_ inbound or outbound requests are handled
|
||||
@ -33,7 +32,7 @@ use std::fmt;
|
||||
/// # // to say that it should only be run with cfg(feature = "...")
|
||||
/// # use tower::Service;
|
||||
/// # use tower::builder::ServiceBuilder;
|
||||
/// #[cfg(all(feature = "buffer", feature = "limit"))]
|
||||
/// # #[cfg(all(feature = "buffer", feature = "limit"))]
|
||||
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
|
||||
/// ServiceBuilder::new()
|
||||
/// .buffer(100)
|
||||
@ -52,7 +51,7 @@ use std::fmt;
|
||||
/// ```
|
||||
/// # use tower::Service;
|
||||
/// # use tower::builder::ServiceBuilder;
|
||||
/// #[cfg(all(feature = "buffer", feature = "limit"))]
|
||||
/// # #[cfg(all(feature = "buffer", feature = "limit"))]
|
||||
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
|
||||
/// ServiceBuilder::new()
|
||||
/// .concurrency_limit(10)
|
||||
@ -75,7 +74,7 @@ use std::fmt;
|
||||
/// # use tower::builder::ServiceBuilder;
|
||||
/// # #[cfg(feature = "limit")]
|
||||
/// # use tower::limit::concurrency::ConcurrencyLimitLayer;
|
||||
/// #[cfg(feature = "limit")]
|
||||
/// # #[cfg(feature = "limit")]
|
||||
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
|
||||
/// ServiceBuilder::new()
|
||||
/// .concurrency_limit(5)
|
||||
@ -91,7 +90,7 @@ use std::fmt;
|
||||
/// # use tower::Service;
|
||||
/// # use tower::builder::ServiceBuilder;
|
||||
/// # use std::time::Duration;
|
||||
/// #[cfg(all(feature = "buffer", feature = "limit"))]
|
||||
/// # #[cfg(all(feature = "buffer", feature = "limit"))]
|
||||
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
|
||||
/// ServiceBuilder::new()
|
||||
/// .buffer(5)
|
||||
@ -101,6 +100,7 @@ use std::fmt;
|
||||
/// # ;
|
||||
/// # }
|
||||
/// ```
|
||||
/// [`Service`]: crate::Service
|
||||
#[derive(Clone)]
|
||||
pub struct ServiceBuilder<L> {
|
||||
layer: L,
|
||||
@ -117,14 +117,25 @@ impl ServiceBuilder<Identity> {
|
||||
|
||||
impl<L> ServiceBuilder<L> {
|
||||
/// Add a new layer `T` into the `ServiceBuilder`.
|
||||
///
|
||||
/// This wraps the inner service with the service provided by a user-defined
|
||||
/// [`Layer`]. The provided layer must implement the [`Layer`] trait.
|
||||
///
|
||||
/// [`Layer`]: crate::Layer
|
||||
pub fn layer<T>(self, layer: T) -> ServiceBuilder<Stack<T, L>> {
|
||||
ServiceBuilder {
|
||||
layer: Stack::new(layer, self.layer),
|
||||
}
|
||||
}
|
||||
|
||||
/// Buffer requests when when the next layer is out of capacity.
|
||||
/// Buffer requests when when the next layer is not ready.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`Buffer`]
|
||||
/// middleware.
|
||||
///
|
||||
/// [`Buffer`]: crate::buffer
|
||||
#[cfg(feature = "buffer")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "buffer")))]
|
||||
pub fn buffer<Request>(
|
||||
self,
|
||||
bound: usize,
|
||||
@ -137,7 +148,13 @@ impl<L> ServiceBuilder<L> {
|
||||
/// A request is in-flight from the time the request is received until the
|
||||
/// response future completes. This includes the time spent in the next
|
||||
/// layers.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the
|
||||
/// [`ConcurrencyLimit`] middleware.
|
||||
///
|
||||
/// [`ConcurrencyLimit`]: crate::limit::concurrency
|
||||
#[cfg(feature = "limit")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "limit")))]
|
||||
pub fn concurrency_limit(
|
||||
self,
|
||||
max: usize,
|
||||
@ -147,19 +164,33 @@ impl<L> ServiceBuilder<L> {
|
||||
|
||||
/// Drop requests when the next layer is unable to respond to requests.
|
||||
///
|
||||
/// Usually, when a layer or service does not have capacity to process a
|
||||
/// request (i.e., `poll_ready` returns `NotReady`), the caller waits until
|
||||
/// Usually, when a service or middleware does not have capacity to process a
|
||||
/// request (i.e., [`poll_ready`] returns [`Pending`]), the caller waits until
|
||||
/// capacity becomes available.
|
||||
///
|
||||
/// `load_shed` immediately responds with an error when the next layer is
|
||||
/// [`LoadShed`] immediately responds with an error when the next layer is
|
||||
/// out of capacity.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`LoadShed`]
|
||||
/// middleware.
|
||||
///
|
||||
/// [`LoadShed`]: crate::load_shed
|
||||
/// [`poll_ready`]: crate::Service::poll_ready
|
||||
/// [`Pending`]: std::task::Poll::Pending
|
||||
#[cfg(feature = "load-shed")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "load-shed")))]
|
||||
pub fn load_shed(self) -> ServiceBuilder<Stack<crate::load_shed::LoadShedLayer, L>> {
|
||||
self.layer(crate::load_shed::LoadShedLayer::new())
|
||||
}
|
||||
|
||||
/// Limit requests to at most `num` per the given duration
|
||||
/// Limit requests to at most `num` per the given duration.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`RateLimit`]
|
||||
/// middleware.
|
||||
///
|
||||
/// [`RateLimit`]: crate::limit::rate
|
||||
#[cfg(feature = "limit")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "limit")))]
|
||||
pub fn rate_limit(
|
||||
self,
|
||||
num: u64,
|
||||
@ -168,12 +199,18 @@ impl<L> ServiceBuilder<L> {
|
||||
self.layer(crate::limit::RateLimitLayer::new(num, per))
|
||||
}
|
||||
|
||||
/// Retry failed requests.
|
||||
/// Retry failed requests according to the given [retry policy][policy].
|
||||
///
|
||||
/// `policy` must implement [`Policy`].
|
||||
/// `policy` determines which failed requests will be retried. It must
|
||||
/// implement the [`retry::Policy`][policy] trait.
|
||||
///
|
||||
/// [`Policy`]: ../retry/trait.Policy.html
|
||||
/// This wraps the inner service with an instance of the [`Retry`]
|
||||
/// middleware.
|
||||
///
|
||||
/// [`Retry`]: crate::retry
|
||||
/// [policy]: crate::retry::Policy
|
||||
#[cfg(feature = "retry")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "retry")))]
|
||||
pub fn retry<P>(self, policy: P) -> ServiceBuilder<Stack<crate::retry::RetryLayer<P>, L>> {
|
||||
self.layer(crate::retry::RetryLayer::new(policy))
|
||||
}
|
||||
@ -182,7 +219,13 @@ impl<L> ServiceBuilder<L> {
|
||||
///
|
||||
/// If the next layer takes more than `timeout` to respond to a request,
|
||||
/// processing is terminated and an error is returned.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`timeout`]
|
||||
/// middleware.
|
||||
///
|
||||
/// [`timeout`]: crate::timeout
|
||||
#[cfg(feature = "timeout")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "timeout")))]
|
||||
pub fn timeout(
|
||||
self,
|
||||
timeout: std::time::Duration,
|
||||
@ -191,7 +234,67 @@ impl<L> ServiceBuilder<L> {
|
||||
}
|
||||
|
||||
/// Map one request type to another.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`MapRequest`]
|
||||
/// middleware.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Changing the type of a request:
|
||||
///
|
||||
/// ```rust
|
||||
/// use tower::ServiceBuilder;
|
||||
/// use tower::ServiceExt;
|
||||
///
|
||||
/// # #[tokio::main]
|
||||
/// # async fn main() -> Result<(), ()> {
|
||||
/// // Suppose we have some `Service` whose request type is `String`:
|
||||
/// let string_svc = tower::service_fn(|request: String| async move {
|
||||
/// println!("request: {}", request);
|
||||
/// Ok(())
|
||||
/// });
|
||||
///
|
||||
/// // ...but we want to call that service with a `usize`. What do we do?
|
||||
///
|
||||
/// let usize_svc = ServiceBuilder::new()
|
||||
/// // Add a middlware that converts the request type to a `String`:
|
||||
/// .map_request(|request: usize| format!("{}", request))
|
||||
/// // ...and wrap the string service with that middleware:
|
||||
/// .service(string_svc);
|
||||
///
|
||||
/// // Now, we can call that service with a `usize`:
|
||||
/// usize_svc.oneshot(42).await?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// Modifying the request value:
|
||||
///
|
||||
/// ```rust
|
||||
/// use tower::ServiceBuilder;
|
||||
/// use tower::ServiceExt;
|
||||
///
|
||||
/// # #[tokio::main]
|
||||
/// # async fn main() -> Result<(), ()> {
|
||||
/// // A service that takes a number and returns it:
|
||||
/// let svc = tower::service_fn(|request: usize| async move {
|
||||
/// Ok(request)
|
||||
/// });
|
||||
///
|
||||
/// let svc = ServiceBuilder::new()
|
||||
/// // Add a middleware that adds 1 to each request
|
||||
/// .map_request(|request: usize| request + 1)
|
||||
/// .service(svc);
|
||||
///
|
||||
/// let response = svc.oneshot(1).await?;
|
||||
/// assert_eq!(response, 2);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// [`MapRequest`]: crate::util::MapRequest
|
||||
#[cfg(feature = "util")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
|
||||
pub fn map_request<F, R1, R2>(
|
||||
self,
|
||||
f: F,
|
||||
@ -203,7 +306,16 @@ impl<L> ServiceBuilder<L> {
|
||||
}
|
||||
|
||||
/// Fallibly one request type to another, or to an error.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`TryMapRequest`]
|
||||
/// middleware.
|
||||
///
|
||||
/// See the documentation for the [`try_map_request` combinator] for details.
|
||||
///
|
||||
/// [`TryMapRequest`]: crate::util::MapResponse
|
||||
/// [`try_map_request` combinator]: crate::util::ServiceExt::try_map_request
|
||||
#[cfg(feature = "util")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
|
||||
pub fn try_map_request<F, R1, R2, E>(
|
||||
self,
|
||||
f: F,
|
||||
@ -215,7 +327,16 @@ impl<L> ServiceBuilder<L> {
|
||||
}
|
||||
|
||||
/// Map one response type to another.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`MapResponse`]
|
||||
/// middleware.
|
||||
///
|
||||
/// See the documentation for the [`map_response` combinator] for details.
|
||||
///
|
||||
/// [`MapResponse`]: crate::util::MapResponse
|
||||
/// [`map_response` combinator]: crate::util::ServiceExt::map_response
|
||||
#[cfg(feature = "util")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
|
||||
pub fn map_response<F>(
|
||||
self,
|
||||
f: F,
|
||||
@ -224,7 +345,16 @@ impl<L> ServiceBuilder<L> {
|
||||
}
|
||||
|
||||
/// Map one error type to another.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`MapErr`]
|
||||
/// middleware.
|
||||
///
|
||||
/// See the documentation for the [`map_err` combinator] for details.
|
||||
///
|
||||
/// [`MapErr`]: crate::util::MapErr
|
||||
/// [`map_err` combinator]: crate::util::ServiceExt::map_err
|
||||
#[cfg(feature = "util")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
|
||||
pub fn map_err<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapErrLayer<F>, L>> {
|
||||
self.layer(crate::util::MapErrLayer::new(f))
|
||||
}
|
||||
@ -232,6 +362,9 @@ impl<L> ServiceBuilder<L> {
|
||||
/// Apply a function after the service, regardless of whether the future
|
||||
/// succeeds or fails.
|
||||
///
|
||||
/// This wraps the inner service with an instance of the [`Then`]
|
||||
/// middleware.
|
||||
///
|
||||
/// This is similar to the [`map_response`] and [`map_err] functions,
|
||||
/// except that the *same* function is invoked when the service's future
|
||||
/// completes, whether it completes successfully or fails. This function
|
||||
@ -240,19 +373,25 @@ impl<L> ServiceBuilder<L> {
|
||||
///
|
||||
/// See the documentation for the [`then` combinator] for details.
|
||||
///
|
||||
/// [`Then`]: crate::util::Then
|
||||
/// [`then` combinator]: crate::util::ServiceExt::then
|
||||
/// [`map_response`]: ServiceBuilder::map_response
|
||||
/// [`map_err`]: ServiceBuilder::map_err
|
||||
#[cfg(feature = "util")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
|
||||
pub fn then<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::ThenLayer<F>, L>> {
|
||||
self.layer(crate::util::ThenLayer::new(f))
|
||||
}
|
||||
|
||||
|
||||
/// Obtains the underlying `Layer` implementation.
|
||||
/// Returns the underlying `Layer` implementation.
|
||||
pub fn into_inner(self) -> L {
|
||||
self.layer
|
||||
}
|
||||
|
||||
/// Wrap the service `S` with the layers.
|
||||
/// Wrap the service `S` with the middleware provided by this
|
||||
/// `ServiceBuilder`'s [`Layer`]s, returning a new `Service`.
|
||||
///
|
||||
/// [`Layer`]: crate::Layer
|
||||
pub fn service<S>(&self, service: S) -> L::Service
|
||||
where
|
||||
L: Layer<S>,
|
||||
|
@ -8,11 +8,85 @@
|
||||
#![allow(elided_lifetimes_in_paths)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
//! `fn(Request) -> Future<Response>`
|
||||
//! `async fn(Request) -> Result<Response, Error>`
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Tower is a library of modular and reusable components for building
|
||||
//! robust networking clients and servers.
|
||||
|
||||
//!
|
||||
//! Tower provides a simple core abstraction, the [`Service`] trait, which
|
||||
//! represents an asynchronous function taking a request and returning either a
|
||||
//! response or an error. This abstraction can be used to model both clients and
|
||||
//! servers.
|
||||
//!
|
||||
//! Generic components, like [timeouts], [rate limiting], and [load balancing],
|
||||
//! can be modeled as [`Service`]s that wrap some inner service and apply
|
||||
//! additional behavior before or after the inner service is called. This allows
|
||||
//! implementing these components in a protocol-agnostic, composable way. Typically,
|
||||
//! such services are referred to as _middleware_.
|
||||
//!
|
||||
//! An additional abstraction, the [`Layer`] trait, is used to compose
|
||||
//! middleware with [`Service`]s. If a [`Service`] can be thought of as an
|
||||
//! asynchronous function from a request type to a response type, a [`Layer`] is
|
||||
//! a function taking a [`Service`] of one type and returning a [`Service`] of a
|
||||
//! different type. The [`ServiceBuilder`] type is used to add middleware to a
|
||||
//! service by composing it with multiple multiple [`Layer`]s.
|
||||
//!
|
||||
//! ## The Tower Ecosystem
|
||||
//!
|
||||
//! Tower is made up of the following crates:
|
||||
//!
|
||||
//! * [`tower`] (this crate)
|
||||
//! * [`tower-service`]
|
||||
//! * [`tower-layer`]
|
||||
//! * [`tower-test`]
|
||||
//!
|
||||
//! Since the [`Service`] and [`Layer`] traits are important integration points
|
||||
//! for all libraries using Tower, they are kept as stable as possible, and
|
||||
//! breaking changes are made rarely. Therefore, they are defined in separate
|
||||
//! crates, [`tower-service`] and [`tower-layer`]. This crate contains
|
||||
//! re-exports of those core traits, implementations of commonly-used
|
||||
//! middleware, and [utilities] for working with [`Service`]s and [`Layer`]s.
|
||||
//! Finally, the [`tower-test`] crate provides tools for testing programs using
|
||||
//! Tower.
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! The various middleware implementations provided by this crate are feature
|
||||
//! flagged, so that users can only compile the parts of Tower they need. By
|
||||
//! default, all the optional middleware are disabled.
|
||||
//!
|
||||
//! To get started using all of Tower's optional middleware, add this to your
|
||||
//! `Cargo.toml`:
|
||||
//!
|
||||
//! ```toml
|
||||
//! tower = { version = "0.4", features = ["full"] }
|
||||
//! ```
|
||||
//!
|
||||
//! Alternatively, you can only enable some features. For example, to enable
|
||||
//! only the [`retry`] and [`timeout`] middleware, write:
|
||||
//!
|
||||
//! ```toml
|
||||
//! tower = { version = "0.4", features = ["retry", "timeout"] }
|
||||
//! ```
|
||||
//!
|
||||
//! See [here](#modules) for a complete list of all middleware provided by
|
||||
//! Tower.
|
||||
//!
|
||||
//! [`Service`]: crate::Service
|
||||
//! [`Layer]: crate::Layer
|
||||
//! [timeouts]: crate::timeout
|
||||
//! [rate limiting]: crate::limit::rate
|
||||
//! [load balancing]: crate::balance
|
||||
//! [`ServiceBuilder`]: crate::ServiceBuilder
|
||||
//! [utilities]: crate::ServiceExt
|
||||
//! [`tower`]: https://crates.io/crates/tower
|
||||
//! [`tower-service`]: https://crates.io/crates/tower-service
|
||||
//! [`tower-layer`]: https://crates.io/crates/tower-layer
|
||||
//! [`tower-test`]: https://crates.io/crates/tower-test
|
||||
//! [`retry`]: crate::retry
|
||||
//! [`timeout`]: crate::timeout
|
||||
#[cfg(feature = "balance")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "balance")))]
|
||||
pub mod balance;
|
||||
@ -72,6 +146,10 @@ pub use self::util::{service_fn, ServiceExt};
|
||||
|
||||
#[doc(inline)]
|
||||
pub use crate::builder::ServiceBuilder;
|
||||
#[cfg(feature = "make")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "make")))]
|
||||
#[doc(inline)]
|
||||
pub use crate::make::MakeService;
|
||||
#[doc(inline)]
|
||||
pub use tower_layer::Layer;
|
||||
#[doc(inline)]
|
||||
|
@ -71,6 +71,7 @@ where
|
||||
}
|
||||
|
||||
#[cfg(feature = "load")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "load")))]
|
||||
impl<S> crate::load::Load for ConcurrencyLimit<S>
|
||||
where
|
||||
S: crate::load::Load,
|
||||
|
@ -119,6 +119,7 @@ where
|
||||
}
|
||||
|
||||
#[cfg(feature = "load")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "load")))]
|
||||
impl<S> crate::load::Load for RateLimit<S>
|
||||
where
|
||||
S: crate::load::Load,
|
||||
|
@ -59,6 +59,7 @@ where
|
||||
|
||||
/// Proxies `Discover` such that all changes are wrapped with a constant load.
|
||||
#[cfg(feature = "discover")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
|
||||
impl<D: Discover + Unpin, M: Copy> Stream for Constant<D, M> {
|
||||
type Item = Result<Change<D::Key, Constant<D::Service, M>>, D::Error>;
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
//! balance services depending on their load. Which load metric to use depends on your exact
|
||||
//! use-case, but the ones above should get you quite far!
|
||||
//!
|
||||
//! When the `discover` feature is enabled, wrapper types for [`tower::discover::Discover`] that
|
||||
//! When the `discover` feature is enabled, wrapper types for [`Discover`] that
|
||||
//! wrap the discovered services with the given load estimator are also provided.
|
||||
//!
|
||||
//! # When does a request complete?
|
||||
@ -27,7 +27,7 @@
|
||||
//! `CompleteOnOnResponse` is what you would normally expect for a request-response cycle: when the
|
||||
//! response is produced, the request is considered "finished", and load goes down. This can be
|
||||
//! overriden by your own user-defined type to track more complex request completion semantics. See
|
||||
//! the documentation for [`tower::load::completion`] for more details.
|
||||
//! the documentation for [`completion`] for more details.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
@ -53,6 +53,8 @@
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//! [`tower::balance`]: crate::balance
|
||||
//! [`Discover`]: crate::discover::Discover
|
||||
// TODO: a custom completion example would be good here
|
||||
|
||||
pub mod completion;
|
||||
|
@ -52,6 +52,7 @@ pub struct PeakEwma<S, C = CompleteOnResponse> {
|
||||
#[pin_project]
|
||||
#[derive(Debug)]
|
||||
#[cfg(feature = "discover")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
|
||||
pub struct PeakEwmaDiscover<D, C = CompleteOnResponse> {
|
||||
#[pin]
|
||||
discover: D,
|
||||
@ -183,6 +184,7 @@ impl<D, C> PeakEwmaDiscover<D, C> {
|
||||
}
|
||||
|
||||
#[cfg(feature = "discover")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
|
||||
impl<D, C> Stream for PeakEwmaDiscover<D, C>
|
||||
where
|
||||
D: Discover,
|
||||
|
@ -31,6 +31,7 @@ struct RefCount(Arc<()>);
|
||||
#[pin_project]
|
||||
#[derive(Debug)]
|
||||
#[cfg(feature = "discover")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
|
||||
pub struct PendingRequestsDiscover<D, C = CompleteOnResponse> {
|
||||
#[pin]
|
||||
discover: D,
|
||||
|
@ -41,7 +41,10 @@ impl<T, U, E> BoxService<T, U, E> {
|
||||
BoxService { inner }
|
||||
}
|
||||
|
||||
/// Returns a [`Layer`] for wrapping a [`Service`] in a `BoxService` middleware.
|
||||
/// Returns a [`Layer`] for wrapping a [`Service`] in a `BoxService`
|
||||
/// middleware.
|
||||
///
|
||||
/// [`Layer`]: crate::Layer
|
||||
pub fn layer<S>() -> LayerFn<fn(S) -> Self>
|
||||
where
|
||||
S: Service<T, Response = U, Error = E> + Send + 'static,
|
||||
|
@ -36,6 +36,8 @@ impl<T, U, E> UnsyncBoxService<T, U, E> {
|
||||
}
|
||||
|
||||
/// Returns a [`Layer`] for wrapping a [`Service`] in an `UnsyncBoxService` middleware.
|
||||
///
|
||||
/// [`Layer`]: crate::Layer
|
||||
pub fn layer<S>() -> LayerFn<fn(S) -> Self>
|
||||
where
|
||||
S: Service<T, Response = U, Error = E> + 'static,
|
||||
|
@ -4,7 +4,7 @@ use tower_service::Service;
|
||||
|
||||
/// Service returned by the [`MapRequest`] combinator.
|
||||
///
|
||||
/// [`MapRequest`]: crate::util::ServiceExt::MapRequest
|
||||
/// [`MapRequest`]: crate::util::ServiceExt::map_request
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MapRequest<S, F> {
|
||||
inner: S,
|
||||
|
@ -48,8 +48,8 @@ pub mod future {
|
||||
pub use super::map_err::MapErrFuture;
|
||||
pub use super::map_response::MapResponseFuture;
|
||||
pub use super::map_result::MapResultFuture;
|
||||
pub use super::then::ThenFuture;
|
||||
pub use super::optional::future as optional;
|
||||
pub use super::then::ThenFuture;
|
||||
}
|
||||
|
||||
/// An extension trait for `Service`s that provides a variety of convenient
|
||||
@ -154,7 +154,12 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
///
|
||||
/// // Call the new service
|
||||
/// let id = 13;
|
||||
/// let name = new_service.call(id).await.unwrap();
|
||||
/// let name = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await?;
|
||||
/// # Ok::<(), u8>(())
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -216,7 +221,13 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
///
|
||||
/// // Call the new service
|
||||
/// let id = 13;
|
||||
/// let code = new_service.call(id).await.unwrap_err();
|
||||
/// let code = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await
|
||||
/// .unwrap_err();
|
||||
/// # Ok::<(), u32>(())
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -311,7 +322,12 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
///
|
||||
/// // Call the new service
|
||||
/// let id = 13;
|
||||
/// let name = new_service.call(id).await.unwrap();
|
||||
/// let name = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await?;
|
||||
/// # Ok::<(), DbError>(())
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -378,9 +394,10 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
/// let id = 13;
|
||||
/// let record = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// .await?;
|
||||
/// # Ok::<(), BoxError>(())
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -426,11 +443,18 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
///
|
||||
/// // Call the new service
|
||||
/// let id = 13;
|
||||
/// let response = new_service.ready_and().call(id).await;
|
||||
/// let response = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await;
|
||||
/// # response
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// [`map_response`]: ServiceExt::map_response
|
||||
/// [`map_err`]: ServiceExt::map_err
|
||||
/// [`Error`]: crate::Service::Error
|
||||
/// [`Response`]: crate::Service::Response
|
||||
/// [`poll_ready`]: crate::Service::poll_ready
|
||||
@ -486,7 +510,12 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
///
|
||||
/// // Call the new service
|
||||
/// let id = 13;
|
||||
/// let response = new_service.call(id).await;
|
||||
/// let response = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await;
|
||||
/// # response
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -545,7 +574,12 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
///
|
||||
/// // Call the new service
|
||||
/// let id = "13";
|
||||
/// let response = new_service.call(id).await;
|
||||
/// let response = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await;
|
||||
/// # response
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -610,7 +644,7 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
/// # }
|
||||
/// #
|
||||
/// # fn call(&mut self, request: u32) -> Self::Future {
|
||||
/// # futures_util::future::ready(Ok(())))
|
||||
/// # futures_util::future::ready(Ok(()))
|
||||
/// # }
|
||||
/// # }
|
||||
/// #
|
||||
@ -631,7 +665,7 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
/// let mut new_service = service.then(|result| async move {
|
||||
/// match result {
|
||||
/// Ok(record) => Ok(record),
|
||||
/// Err(e) => recover_from_error(e).await
|
||||
/// Err(e) => recover_from_error(e).await,
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
@ -639,9 +673,10 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
/// let id = 13;
|
||||
/// let record = new_service
|
||||
/// .ready_and()
|
||||
/// .await?
|
||||
/// .call(id)
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// .await?;
|
||||
/// # Ok::<(), DbError>(())
|
||||
/// # };
|
||||
/// # }
|
||||
/// ```
|
||||
@ -649,7 +684,7 @@ pub trait ServiceExt<Request>: tower_service::Service<Request> {
|
||||
/// [`Future`]: crate::Service::Future
|
||||
/// [`Output`]: std::future::Future::Output
|
||||
/// [`futures` crate]: https://docs.rs/futures
|
||||
/// [`FuturesExt::then`]: https://docs.rs/futures/latest/futures/future/trait.FutureExt.html#method.then
|
||||
/// [`FutureExt::then`]: https://docs.rs/futures/latest/futures/future/trait.FutureExt.html#method.then
|
||||
/// [`Error`]: crate::Service::Error
|
||||
/// [`Response`]: crate::Service::Response
|
||||
/// [`poll_ready`]: crate::Service::poll_ready
|
||||
|
Loading…
x
Reference in New Issue
Block a user