mirror of
https://github.com/rust-lang/cargo.git
synced 2025-10-01 11:30:39 +00:00
Use context instead of with_context
This commit is contained in:
parent
b66cad8038
commit
5ca1add0b0
@ -100,8 +100,8 @@ impl<'a, 'gctx> BuildRunner<'a, 'gctx> {
|
||||
let jobserver = match bcx.gctx.jobserver_from_env() {
|
||||
Some(c) => c.clone(),
|
||||
None => {
|
||||
let client = Client::new(bcx.jobs() as usize)
|
||||
.with_context(|| "failed to create jobserver")?;
|
||||
let client =
|
||||
Client::new(bcx.jobs() as usize).context("failed to create jobserver")?;
|
||||
client.acquire_raw()?;
|
||||
client
|
||||
}
|
||||
@ -354,11 +354,11 @@ impl<'a, 'gctx> BuildRunner<'a, 'gctx> {
|
||||
.unwrap()
|
||||
.host
|
||||
.prepare()
|
||||
.with_context(|| "couldn't prepare build directories")?;
|
||||
.context("couldn't prepare build directories")?;
|
||||
for target in self.files.as_mut().unwrap().target.values_mut() {
|
||||
target
|
||||
.prepare()
|
||||
.with_context(|| "couldn't prepare build directories")?;
|
||||
.context("couldn't prepare build directories")?;
|
||||
}
|
||||
|
||||
let files = self.files.as_ref().unwrap();
|
||||
|
@ -431,7 +431,7 @@ fn build_work(build_runner: &mut BuildRunner<'_, '_>, unit: &Unit) -> CargoResul
|
||||
// If we have an old build directory, then just move it into place,
|
||||
// otherwise create it!
|
||||
paths::create_dir_all(&script_out_dir)
|
||||
.with_context(|| "failed to create script output directory for build command")?;
|
||||
.context("failed to create script output directory for build command")?;
|
||||
|
||||
// For all our native lib dependencies, pick up their metadata to pass
|
||||
// along to this custom build command. We're also careful to augment our
|
||||
|
@ -211,9 +211,9 @@ impl OnDiskReports {
|
||||
report_file
|
||||
.file()
|
||||
.read_to_string(&mut file_contents)
|
||||
.with_context(|| "failed to read report")?;
|
||||
.context("failed to read report")?;
|
||||
let on_disk_reports: OnDiskReports =
|
||||
serde_json::from_str(&file_contents).with_context(|| "failed to load report")?;
|
||||
serde_json::from_str(&file_contents).context("failed to load report")?;
|
||||
if on_disk_reports.version != ON_DISK_VERSION {
|
||||
bail!("unable to read reports; reports were saved from a future version of Cargo");
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ impl<'gctx> JobQueue<'gctx> {
|
||||
.into_helper_thread(move |token| {
|
||||
messages.push(Message::Token(token));
|
||||
})
|
||||
.with_context(|| "failed to create helper thread for jobserver management")?;
|
||||
.context("failed to create helper thread for jobserver management")?;
|
||||
|
||||
// Create a helper thread to manage the diagnostics for rustfix if
|
||||
// necessary.
|
||||
@ -700,7 +700,7 @@ impl<'gctx> DrainState<'gctx> {
|
||||
.push(FutureIncompatReportPackage { package_id, items });
|
||||
}
|
||||
Message::Token(acquired_token) => {
|
||||
let token = acquired_token.with_context(|| "failed to acquire jobserver token")?;
|
||||
let token = acquired_token.context("failed to acquire jobserver token")?;
|
||||
self.tokens.push(token);
|
||||
}
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ impl<'gctx> Timings<'gctx> {
|
||||
.sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap());
|
||||
if self.report_html {
|
||||
self.report_html(build_runner, error)
|
||||
.with_context(|| "failed to save timing report")?;
|
||||
.context("failed to save timing report")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -543,7 +543,7 @@ impl GlobalCacheTracker {
|
||||
/// Deletes files from the global cache based on the given options.
|
||||
pub fn clean(&mut self, clean_ctx: &mut CleanContext<'_>, gc_opts: &GcOpts) -> CargoResult<()> {
|
||||
self.clean_inner(clean_ctx, gc_opts)
|
||||
.with_context(|| "failed to clean entries from the global cache")
|
||||
.context("failed to clean entries from the global cache")
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
@ -575,7 +575,7 @@ impl GlobalCacheTracker {
|
||||
gc_opts.is_download_cache_size_set(),
|
||||
&mut delete_paths,
|
||||
)
|
||||
.with_context(|| "failed to sync tracking database")?
|
||||
.context("failed to sync tracking database")?
|
||||
}
|
||||
if let Some(max_age) = gc_opts.max_index_age {
|
||||
let max_age = now - max_age.as_secs();
|
||||
|
@ -393,7 +393,7 @@ impl<'gctx> PackageSet<'gctx> {
|
||||
let multiplexing = gctx.http_config()?.multiplexing.unwrap_or(true);
|
||||
multi
|
||||
.pipelining(false, multiplexing)
|
||||
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
|
||||
.context("failed to enable multiplexing/pipelining in curl")?;
|
||||
|
||||
// let's not flood crates.io with connections
|
||||
multi.set_max_host_connections(2)?;
|
||||
@ -681,7 +681,7 @@ impl<'a, 'gctx> Downloads<'a, 'gctx> {
|
||||
.ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?;
|
||||
let pkg = source
|
||||
.download(id)
|
||||
.with_context(|| "unable to get packages from source")?;
|
||||
.context("unable to get packages from source")?;
|
||||
let (url, descriptor, authorization) = match pkg {
|
||||
MaybePackage::Ready(pkg) => {
|
||||
debug!("{} doesn't need a download", id);
|
||||
@ -951,7 +951,7 @@ impl<'a, 'gctx> Downloads<'a, 'gctx> {
|
||||
self.set
|
||||
.multi
|
||||
.perform()
|
||||
.with_context(|| "failed to perform http requests")
|
||||
.context("failed to perform http requests")
|
||||
})?;
|
||||
debug!(target: "network", "handles remaining: {}", n);
|
||||
let results = &mut self.results;
|
||||
@ -981,7 +981,7 @@ impl<'a, 'gctx> Downloads<'a, 'gctx> {
|
||||
self.set
|
||||
.multi
|
||||
.wait(&mut [], timeout)
|
||||
.with_context(|| "failed to wait on curl `Multi`")?;
|
||||
.context("failed to wait on curl `Multi`")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -147,13 +147,13 @@ fn create_package(
|
||||
.status("Packaging", pkg.package_id().to_string())?;
|
||||
dst.file().set_len(0)?;
|
||||
let uncompressed_size = tar(ws, pkg, local_reg, ar_files, dst.file(), &filename)
|
||||
.with_context(|| "failed to prepare local package for uploading")?;
|
||||
.context("failed to prepare local package for uploading")?;
|
||||
|
||||
dst.seek(SeekFrom::Start(0))?;
|
||||
let src_path = dst.path();
|
||||
let dst_path = dst.parent().join(&filename);
|
||||
fs::rename(&src_path, &dst_path)
|
||||
.with_context(|| "failed to move temporary tarball into final location")?;
|
||||
.context("failed to move temporary tarball into final location")?;
|
||||
|
||||
let dst_metadata = dst
|
||||
.file()
|
||||
@ -331,7 +331,7 @@ pub fn package(ws: &Workspace<'_>, opts: &PackageOpts<'_>) -> CargoResult<Vec<Fi
|
||||
if opts.verify {
|
||||
for (pkg, opts, tarball) in &outputs {
|
||||
run_verify(ws, pkg, tarball, local_reg.as_ref(), opts)
|
||||
.with_context(|| "failed to verify package tarball")?
|
||||
.context("failed to verify package tarball")?
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ impl InstallTracker {
|
||||
if contents.is_empty() {
|
||||
Ok(CrateListingV1::default())
|
||||
} else {
|
||||
Ok(toml::from_str(&contents).with_context(|| "invalid TOML found for metadata")?)
|
||||
Ok(toml::from_str(&contents).context("invalid TOML found for metadata")?)
|
||||
}
|
||||
})()
|
||||
.with_context(|| {
|
||||
@ -127,8 +127,7 @@ impl InstallTracker {
|
||||
let mut v2 = if contents.is_empty() {
|
||||
CrateListingV2::default()
|
||||
} else {
|
||||
serde_json::from_str(&contents)
|
||||
.with_context(|| "invalid JSON found for metadata")?
|
||||
serde_json::from_str(&contents).context("invalid JSON found for metadata")?
|
||||
};
|
||||
v2.sync_v1(&v1);
|
||||
Ok(v2)
|
||||
|
@ -32,7 +32,7 @@ pub fn vendor(ws: &Workspace<'_>, opts: &VendorOptions<'_>) -> CargoResult<()> {
|
||||
}
|
||||
let workspaces = extra_workspaces.iter().chain(Some(ws)).collect::<Vec<_>>();
|
||||
let _lock = gctx.acquire_package_cache_lock(CacheLockMode::MutateExclusive)?;
|
||||
let vendor_config = sync(gctx, &workspaces, opts).with_context(|| "failed to sync")?;
|
||||
let vendor_config = sync(gctx, &workspaces, opts).context("failed to sync")?;
|
||||
|
||||
if gctx.shell().verbosity() != Verbosity::Quiet {
|
||||
if vendor_config.source.is_empty() {
|
||||
@ -113,11 +113,11 @@ fn sync(
|
||||
// crate to work with.
|
||||
for ws in workspaces {
|
||||
let (packages, resolve) =
|
||||
ops::resolve_ws(ws, dry_run).with_context(|| "failed to load pkg lockfile")?;
|
||||
ops::resolve_ws(ws, dry_run).context("failed to load pkg lockfile")?;
|
||||
|
||||
packages
|
||||
.get_many(resolve.iter())
|
||||
.with_context(|| "failed to download packages")?;
|
||||
.context("failed to download packages")?;
|
||||
|
||||
for pkg in resolve.iter() {
|
||||
// Don't delete actual source code!
|
||||
@ -145,11 +145,11 @@ fn sync(
|
||||
// tables about them.
|
||||
for ws in workspaces {
|
||||
let (packages, resolve) =
|
||||
ops::resolve_ws(ws, dry_run).with_context(|| "failed to load pkg lockfile")?;
|
||||
ops::resolve_ws(ws, dry_run).context("failed to load pkg lockfile")?;
|
||||
|
||||
packages
|
||||
.get_many(resolve.iter())
|
||||
.with_context(|| "failed to download packages")?;
|
||||
.context("failed to download packages")?;
|
||||
|
||||
for pkg in resolve.iter() {
|
||||
// No need to vendor path crates since they're already in the
|
||||
@ -161,7 +161,7 @@ fn sync(
|
||||
pkg,
|
||||
packages
|
||||
.get_one(pkg)
|
||||
.with_context(|| "failed to fetch package")?
|
||||
.context("failed to fetch package")?
|
||||
.clone(),
|
||||
);
|
||||
|
||||
|
@ -268,7 +268,7 @@ impl<'gctx> HttpRegistry<'gctx> {
|
||||
|
||||
self.multi
|
||||
.pipelining(false, self.multiplexing)
|
||||
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
|
||||
.context("failed to enable multiplexing/pipelining in curl")?;
|
||||
|
||||
// let's not flood the server with connections
|
||||
self.multi.set_max_host_connections(2)?;
|
||||
@ -802,7 +802,7 @@ impl<'gctx> RegistryData for HttpRegistry<'gctx> {
|
||||
let remaining_in_multi = tls::set(&self.downloads, || {
|
||||
self.multi
|
||||
.perform()
|
||||
.with_context(|| "failed to perform http requests")
|
||||
.context("failed to perform http requests")
|
||||
})?;
|
||||
trace!(target: "network", "{} transfers remaining", remaining_in_multi);
|
||||
|
||||
@ -823,7 +823,7 @@ impl<'gctx> RegistryData for HttpRegistry<'gctx> {
|
||||
.unwrap_or_else(|| Duration::new(1, 0));
|
||||
self.multi
|
||||
.wait(&mut [], timeout)
|
||||
.with_context(|| "failed to wait on curl `Multi`")?;
|
||||
.context("failed to wait on curl `Multi`")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -642,10 +642,10 @@ impl<'gctx> RegistrySource<'gctx> {
|
||||
let prefix = unpack_dir.file_name().unwrap();
|
||||
let parent = unpack_dir.parent().unwrap();
|
||||
for entry in tar.entries()? {
|
||||
let mut entry = entry.with_context(|| "failed to iterate over archive")?;
|
||||
let mut entry = entry.context("failed to iterate over archive")?;
|
||||
let entry_path = entry
|
||||
.path()
|
||||
.with_context(|| "failed to read entry path")?
|
||||
.context("failed to read entry path")?
|
||||
.into_owned();
|
||||
|
||||
// We're going to unpack this tarball into the global source
|
||||
|
@ -308,7 +308,7 @@ impl RecursiveLock {
|
||||
self.is_exclusive = true;
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(e).with_context(|| "failed to acquire package cache lock");
|
||||
return Err(e).context("failed to acquire package cache lock");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -331,7 +331,7 @@ impl RecursiveLock {
|
||||
self.is_exclusive = true;
|
||||
return Ok(result);
|
||||
} else {
|
||||
return Err(e).with_context(|| "failed to acquire package cache lock");
|
||||
return Err(e).context("failed to acquire package cache lock");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -335,8 +335,8 @@ impl GlobalContext {
|
||||
/// any config files from disk. Those will be loaded lazily as-needed.
|
||||
pub fn default() -> CargoResult<GlobalContext> {
|
||||
let shell = Shell::new();
|
||||
let cwd = env::current_dir()
|
||||
.with_context(|| "couldn't get the current directory of the process")?;
|
||||
let cwd =
|
||||
env::current_dir().context("couldn't get the current directory of the process")?;
|
||||
let homedir = homedir(&cwd).ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Cargo couldn't find your home directory. \
|
||||
@ -496,7 +496,7 @@ impl GlobalContext {
|
||||
let exe = from_env()
|
||||
.or_else(|_| from_current_exe())
|
||||
.or_else(|_| from_argv())
|
||||
.with_context(|| "couldn't get the path to cargo executable")?;
|
||||
.context("couldn't get the path to cargo executable")?;
|
||||
Ok(exe)
|
||||
})
|
||||
.map(AsRef::as_ref)
|
||||
@ -569,8 +569,8 @@ impl GlobalContext {
|
||||
///
|
||||
/// There is not a need to also call [`Self::reload_rooted_at`].
|
||||
pub fn reload_cwd(&mut self) -> CargoResult<()> {
|
||||
let cwd = env::current_dir()
|
||||
.with_context(|| "couldn't get the current directory of the process")?;
|
||||
let cwd =
|
||||
env::current_dir().context("couldn't get the current directory of the process")?;
|
||||
let homedir = homedir(&cwd).ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Cargo couldn't find your home directory. \
|
||||
@ -1166,7 +1166,7 @@ impl GlobalContext {
|
||||
result.push(cv);
|
||||
Ok(())
|
||||
})
|
||||
.with_context(|| "could not load Cargo configuration")?;
|
||||
.context("could not load Cargo configuration")?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
@ -1206,7 +1206,7 @@ impl GlobalContext {
|
||||
})?;
|
||||
Ok(())
|
||||
})
|
||||
.with_context(|| "could not load Cargo configuration")?;
|
||||
.context("could not load Cargo configuration")?;
|
||||
|
||||
match cfg {
|
||||
CV::Table(map, _) => Ok(map),
|
||||
@ -1495,7 +1495,7 @@ impl GlobalContext {
|
||||
};
|
||||
let tmp_table = self
|
||||
.load_includes(tmp_table, &mut HashSet::new(), WhyLoad::Cli)
|
||||
.with_context(|| "failed to load --config include".to_string())?;
|
||||
.context("failed to load --config include".to_string())?;
|
||||
loaded_args
|
||||
.merge(tmp_table, true)
|
||||
.with_context(|| format!("failed to merge --config argument `{arg}`"))?;
|
||||
|
@ -268,7 +268,7 @@ pub struct StartedServer {
|
||||
impl RustfixDiagnosticServer {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
let listener = TcpListener::bind(&LOCALHOST[..])
|
||||
.with_context(|| "failed to bind TCP listener to manage locking")?;
|
||||
.context("failed to bind TCP listener to manage locking")?;
|
||||
let addr = listener.local_addr()?;
|
||||
|
||||
Ok(RustfixDiagnosticServer { listener, addr })
|
||||
|
@ -47,7 +47,7 @@ struct ServerClient {
|
||||
impl LockServer {
|
||||
pub fn new() -> Result<LockServer, Error> {
|
||||
let listener = TcpListener::bind(&LOCALHOST[..])
|
||||
.with_context(|| "failed to bind TCP listener to manage locking")?;
|
||||
.context("failed to bind TCP listener to manage locking")?;
|
||||
let addr = listener.local_addr()?;
|
||||
Ok(LockServer {
|
||||
listener,
|
||||
@ -159,15 +159,15 @@ impl Drop for LockServerStarted {
|
||||
impl LockServerClient {
|
||||
pub fn lock(addr: &SocketAddr, name: impl AsRef<[u8]>) -> Result<LockServerClient, Error> {
|
||||
let mut client =
|
||||
TcpStream::connect(&addr).with_context(|| "failed to connect to parent lock server")?;
|
||||
TcpStream::connect(&addr).context("failed to connect to parent lock server")?;
|
||||
client
|
||||
.write_all(name.as_ref())
|
||||
.and_then(|_| client.write_all(b"\n"))
|
||||
.with_context(|| "failed to write to lock server")?;
|
||||
.context("failed to write to lock server")?;
|
||||
let mut buf = [0];
|
||||
client
|
||||
.read_exact(&mut buf)
|
||||
.with_context(|| "failed to acquire lock")?;
|
||||
.context("failed to acquire lock")?;
|
||||
Ok(LockServerClient { _socket: client })
|
||||
}
|
||||
}
|
||||
|
@ -1131,7 +1131,7 @@ pub fn to_real_manifest(
|
||||
{
|
||||
let edition: Edition = edition
|
||||
.parse()
|
||||
.with_context(|| "failed to parse the `edition` key")?;
|
||||
.context("failed to parse the `edition` key")?;
|
||||
if let Some(pkg_msrv) = &rust_version {
|
||||
if let Some(edition_msrv) = edition.first_version() {
|
||||
let edition_msrv = RustVersion::try_from(edition_msrv).unwrap();
|
||||
|
@ -853,7 +853,7 @@ fn configure(toml: &TomlTarget, target: &mut Target) -> CargoResult<()> {
|
||||
target.set_edition(
|
||||
edition
|
||||
.parse()
|
||||
.with_context(|| "failed to parse the `edition` key")?,
|
||||
.context("failed to parse the `edition` key")?,
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
|
Loading…
x
Reference in New Issue
Block a user