mirror of
https://github.com/rust-lang/cargo.git
synced 2025-10-01 11:30:39 +00:00
Use context instead of with_context
This commit is contained in:
parent
b66cad8038
commit
5ca1add0b0
@ -100,8 +100,8 @@ impl<'a, 'gctx> BuildRunner<'a, 'gctx> {
|
|||||||
let jobserver = match bcx.gctx.jobserver_from_env() {
|
let jobserver = match bcx.gctx.jobserver_from_env() {
|
||||||
Some(c) => c.clone(),
|
Some(c) => c.clone(),
|
||||||
None => {
|
None => {
|
||||||
let client = Client::new(bcx.jobs() as usize)
|
let client =
|
||||||
.with_context(|| "failed to create jobserver")?;
|
Client::new(bcx.jobs() as usize).context("failed to create jobserver")?;
|
||||||
client.acquire_raw()?;
|
client.acquire_raw()?;
|
||||||
client
|
client
|
||||||
}
|
}
|
||||||
@ -354,11 +354,11 @@ impl<'a, 'gctx> BuildRunner<'a, 'gctx> {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.host
|
.host
|
||||||
.prepare()
|
.prepare()
|
||||||
.with_context(|| "couldn't prepare build directories")?;
|
.context("couldn't prepare build directories")?;
|
||||||
for target in self.files.as_mut().unwrap().target.values_mut() {
|
for target in self.files.as_mut().unwrap().target.values_mut() {
|
||||||
target
|
target
|
||||||
.prepare()
|
.prepare()
|
||||||
.with_context(|| "couldn't prepare build directories")?;
|
.context("couldn't prepare build directories")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let files = self.files.as_ref().unwrap();
|
let files = self.files.as_ref().unwrap();
|
||||||
|
@ -431,7 +431,7 @@ fn build_work(build_runner: &mut BuildRunner<'_, '_>, unit: &Unit) -> CargoResul
|
|||||||
// If we have an old build directory, then just move it into place,
|
// If we have an old build directory, then just move it into place,
|
||||||
// otherwise create it!
|
// otherwise create it!
|
||||||
paths::create_dir_all(&script_out_dir)
|
paths::create_dir_all(&script_out_dir)
|
||||||
.with_context(|| "failed to create script output directory for build command")?;
|
.context("failed to create script output directory for build command")?;
|
||||||
|
|
||||||
// For all our native lib dependencies, pick up their metadata to pass
|
// For all our native lib dependencies, pick up their metadata to pass
|
||||||
// along to this custom build command. We're also careful to augment our
|
// along to this custom build command. We're also careful to augment our
|
||||||
|
@ -211,9 +211,9 @@ impl OnDiskReports {
|
|||||||
report_file
|
report_file
|
||||||
.file()
|
.file()
|
||||||
.read_to_string(&mut file_contents)
|
.read_to_string(&mut file_contents)
|
||||||
.with_context(|| "failed to read report")?;
|
.context("failed to read report")?;
|
||||||
let on_disk_reports: OnDiskReports =
|
let on_disk_reports: OnDiskReports =
|
||||||
serde_json::from_str(&file_contents).with_context(|| "failed to load report")?;
|
serde_json::from_str(&file_contents).context("failed to load report")?;
|
||||||
if on_disk_reports.version != ON_DISK_VERSION {
|
if on_disk_reports.version != ON_DISK_VERSION {
|
||||||
bail!("unable to read reports; reports were saved from a future version of Cargo");
|
bail!("unable to read reports; reports were saved from a future version of Cargo");
|
||||||
}
|
}
|
||||||
|
@ -513,7 +513,7 @@ impl<'gctx> JobQueue<'gctx> {
|
|||||||
.into_helper_thread(move |token| {
|
.into_helper_thread(move |token| {
|
||||||
messages.push(Message::Token(token));
|
messages.push(Message::Token(token));
|
||||||
})
|
})
|
||||||
.with_context(|| "failed to create helper thread for jobserver management")?;
|
.context("failed to create helper thread for jobserver management")?;
|
||||||
|
|
||||||
// Create a helper thread to manage the diagnostics for rustfix if
|
// Create a helper thread to manage the diagnostics for rustfix if
|
||||||
// necessary.
|
// necessary.
|
||||||
@ -700,7 +700,7 @@ impl<'gctx> DrainState<'gctx> {
|
|||||||
.push(FutureIncompatReportPackage { package_id, items });
|
.push(FutureIncompatReportPackage { package_id, items });
|
||||||
}
|
}
|
||||||
Message::Token(acquired_token) => {
|
Message::Token(acquired_token) => {
|
||||||
let token = acquired_token.with_context(|| "failed to acquire jobserver token")?;
|
let token = acquired_token.context("failed to acquire jobserver token")?;
|
||||||
self.tokens.push(token);
|
self.tokens.push(token);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -299,7 +299,7 @@ impl<'gctx> Timings<'gctx> {
|
|||||||
.sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap());
|
.sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap());
|
||||||
if self.report_html {
|
if self.report_html {
|
||||||
self.report_html(build_runner, error)
|
self.report_html(build_runner, error)
|
||||||
.with_context(|| "failed to save timing report")?;
|
.context("failed to save timing report")?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -543,7 +543,7 @@ impl GlobalCacheTracker {
|
|||||||
/// Deletes files from the global cache based on the given options.
|
/// Deletes files from the global cache based on the given options.
|
||||||
pub fn clean(&mut self, clean_ctx: &mut CleanContext<'_>, gc_opts: &GcOpts) -> CargoResult<()> {
|
pub fn clean(&mut self, clean_ctx: &mut CleanContext<'_>, gc_opts: &GcOpts) -> CargoResult<()> {
|
||||||
self.clean_inner(clean_ctx, gc_opts)
|
self.clean_inner(clean_ctx, gc_opts)
|
||||||
.with_context(|| "failed to clean entries from the global cache")
|
.context("failed to clean entries from the global cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
@ -575,7 +575,7 @@ impl GlobalCacheTracker {
|
|||||||
gc_opts.is_download_cache_size_set(),
|
gc_opts.is_download_cache_size_set(),
|
||||||
&mut delete_paths,
|
&mut delete_paths,
|
||||||
)
|
)
|
||||||
.with_context(|| "failed to sync tracking database")?
|
.context("failed to sync tracking database")?
|
||||||
}
|
}
|
||||||
if let Some(max_age) = gc_opts.max_index_age {
|
if let Some(max_age) = gc_opts.max_index_age {
|
||||||
let max_age = now - max_age.as_secs();
|
let max_age = now - max_age.as_secs();
|
||||||
|
@ -393,7 +393,7 @@ impl<'gctx> PackageSet<'gctx> {
|
|||||||
let multiplexing = gctx.http_config()?.multiplexing.unwrap_or(true);
|
let multiplexing = gctx.http_config()?.multiplexing.unwrap_or(true);
|
||||||
multi
|
multi
|
||||||
.pipelining(false, multiplexing)
|
.pipelining(false, multiplexing)
|
||||||
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
|
.context("failed to enable multiplexing/pipelining in curl")?;
|
||||||
|
|
||||||
// let's not flood crates.io with connections
|
// let's not flood crates.io with connections
|
||||||
multi.set_max_host_connections(2)?;
|
multi.set_max_host_connections(2)?;
|
||||||
@ -681,7 +681,7 @@ impl<'a, 'gctx> Downloads<'a, 'gctx> {
|
|||||||
.ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?;
|
.ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?;
|
||||||
let pkg = source
|
let pkg = source
|
||||||
.download(id)
|
.download(id)
|
||||||
.with_context(|| "unable to get packages from source")?;
|
.context("unable to get packages from source")?;
|
||||||
let (url, descriptor, authorization) = match pkg {
|
let (url, descriptor, authorization) = match pkg {
|
||||||
MaybePackage::Ready(pkg) => {
|
MaybePackage::Ready(pkg) => {
|
||||||
debug!("{} doesn't need a download", id);
|
debug!("{} doesn't need a download", id);
|
||||||
@ -951,7 +951,7 @@ impl<'a, 'gctx> Downloads<'a, 'gctx> {
|
|||||||
self.set
|
self.set
|
||||||
.multi
|
.multi
|
||||||
.perform()
|
.perform()
|
||||||
.with_context(|| "failed to perform http requests")
|
.context("failed to perform http requests")
|
||||||
})?;
|
})?;
|
||||||
debug!(target: "network", "handles remaining: {}", n);
|
debug!(target: "network", "handles remaining: {}", n);
|
||||||
let results = &mut self.results;
|
let results = &mut self.results;
|
||||||
@ -981,7 +981,7 @@ impl<'a, 'gctx> Downloads<'a, 'gctx> {
|
|||||||
self.set
|
self.set
|
||||||
.multi
|
.multi
|
||||||
.wait(&mut [], timeout)
|
.wait(&mut [], timeout)
|
||||||
.with_context(|| "failed to wait on curl `Multi`")?;
|
.context("failed to wait on curl `Multi`")?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -147,13 +147,13 @@ fn create_package(
|
|||||||
.status("Packaging", pkg.package_id().to_string())?;
|
.status("Packaging", pkg.package_id().to_string())?;
|
||||||
dst.file().set_len(0)?;
|
dst.file().set_len(0)?;
|
||||||
let uncompressed_size = tar(ws, pkg, local_reg, ar_files, dst.file(), &filename)
|
let uncompressed_size = tar(ws, pkg, local_reg, ar_files, dst.file(), &filename)
|
||||||
.with_context(|| "failed to prepare local package for uploading")?;
|
.context("failed to prepare local package for uploading")?;
|
||||||
|
|
||||||
dst.seek(SeekFrom::Start(0))?;
|
dst.seek(SeekFrom::Start(0))?;
|
||||||
let src_path = dst.path();
|
let src_path = dst.path();
|
||||||
let dst_path = dst.parent().join(&filename);
|
let dst_path = dst.parent().join(&filename);
|
||||||
fs::rename(&src_path, &dst_path)
|
fs::rename(&src_path, &dst_path)
|
||||||
.with_context(|| "failed to move temporary tarball into final location")?;
|
.context("failed to move temporary tarball into final location")?;
|
||||||
|
|
||||||
let dst_metadata = dst
|
let dst_metadata = dst
|
||||||
.file()
|
.file()
|
||||||
@ -331,7 +331,7 @@ pub fn package(ws: &Workspace<'_>, opts: &PackageOpts<'_>) -> CargoResult<Vec<Fi
|
|||||||
if opts.verify {
|
if opts.verify {
|
||||||
for (pkg, opts, tarball) in &outputs {
|
for (pkg, opts, tarball) in &outputs {
|
||||||
run_verify(ws, pkg, tarball, local_reg.as_ref(), opts)
|
run_verify(ws, pkg, tarball, local_reg.as_ref(), opts)
|
||||||
.with_context(|| "failed to verify package tarball")?
|
.context("failed to verify package tarball")?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ impl InstallTracker {
|
|||||||
if contents.is_empty() {
|
if contents.is_empty() {
|
||||||
Ok(CrateListingV1::default())
|
Ok(CrateListingV1::default())
|
||||||
} else {
|
} else {
|
||||||
Ok(toml::from_str(&contents).with_context(|| "invalid TOML found for metadata")?)
|
Ok(toml::from_str(&contents).context("invalid TOML found for metadata")?)
|
||||||
}
|
}
|
||||||
})()
|
})()
|
||||||
.with_context(|| {
|
.with_context(|| {
|
||||||
@ -127,8 +127,7 @@ impl InstallTracker {
|
|||||||
let mut v2 = if contents.is_empty() {
|
let mut v2 = if contents.is_empty() {
|
||||||
CrateListingV2::default()
|
CrateListingV2::default()
|
||||||
} else {
|
} else {
|
||||||
serde_json::from_str(&contents)
|
serde_json::from_str(&contents).context("invalid JSON found for metadata")?
|
||||||
.with_context(|| "invalid JSON found for metadata")?
|
|
||||||
};
|
};
|
||||||
v2.sync_v1(&v1);
|
v2.sync_v1(&v1);
|
||||||
Ok(v2)
|
Ok(v2)
|
||||||
|
@ -32,7 +32,7 @@ pub fn vendor(ws: &Workspace<'_>, opts: &VendorOptions<'_>) -> CargoResult<()> {
|
|||||||
}
|
}
|
||||||
let workspaces = extra_workspaces.iter().chain(Some(ws)).collect::<Vec<_>>();
|
let workspaces = extra_workspaces.iter().chain(Some(ws)).collect::<Vec<_>>();
|
||||||
let _lock = gctx.acquire_package_cache_lock(CacheLockMode::MutateExclusive)?;
|
let _lock = gctx.acquire_package_cache_lock(CacheLockMode::MutateExclusive)?;
|
||||||
let vendor_config = sync(gctx, &workspaces, opts).with_context(|| "failed to sync")?;
|
let vendor_config = sync(gctx, &workspaces, opts).context("failed to sync")?;
|
||||||
|
|
||||||
if gctx.shell().verbosity() != Verbosity::Quiet {
|
if gctx.shell().verbosity() != Verbosity::Quiet {
|
||||||
if vendor_config.source.is_empty() {
|
if vendor_config.source.is_empty() {
|
||||||
@ -113,11 +113,11 @@ fn sync(
|
|||||||
// crate to work with.
|
// crate to work with.
|
||||||
for ws in workspaces {
|
for ws in workspaces {
|
||||||
let (packages, resolve) =
|
let (packages, resolve) =
|
||||||
ops::resolve_ws(ws, dry_run).with_context(|| "failed to load pkg lockfile")?;
|
ops::resolve_ws(ws, dry_run).context("failed to load pkg lockfile")?;
|
||||||
|
|
||||||
packages
|
packages
|
||||||
.get_many(resolve.iter())
|
.get_many(resolve.iter())
|
||||||
.with_context(|| "failed to download packages")?;
|
.context("failed to download packages")?;
|
||||||
|
|
||||||
for pkg in resolve.iter() {
|
for pkg in resolve.iter() {
|
||||||
// Don't delete actual source code!
|
// Don't delete actual source code!
|
||||||
@ -145,11 +145,11 @@ fn sync(
|
|||||||
// tables about them.
|
// tables about them.
|
||||||
for ws in workspaces {
|
for ws in workspaces {
|
||||||
let (packages, resolve) =
|
let (packages, resolve) =
|
||||||
ops::resolve_ws(ws, dry_run).with_context(|| "failed to load pkg lockfile")?;
|
ops::resolve_ws(ws, dry_run).context("failed to load pkg lockfile")?;
|
||||||
|
|
||||||
packages
|
packages
|
||||||
.get_many(resolve.iter())
|
.get_many(resolve.iter())
|
||||||
.with_context(|| "failed to download packages")?;
|
.context("failed to download packages")?;
|
||||||
|
|
||||||
for pkg in resolve.iter() {
|
for pkg in resolve.iter() {
|
||||||
// No need to vendor path crates since they're already in the
|
// No need to vendor path crates since they're already in the
|
||||||
@ -161,7 +161,7 @@ fn sync(
|
|||||||
pkg,
|
pkg,
|
||||||
packages
|
packages
|
||||||
.get_one(pkg)
|
.get_one(pkg)
|
||||||
.with_context(|| "failed to fetch package")?
|
.context("failed to fetch package")?
|
||||||
.clone(),
|
.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -268,7 +268,7 @@ impl<'gctx> HttpRegistry<'gctx> {
|
|||||||
|
|
||||||
self.multi
|
self.multi
|
||||||
.pipelining(false, self.multiplexing)
|
.pipelining(false, self.multiplexing)
|
||||||
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
|
.context("failed to enable multiplexing/pipelining in curl")?;
|
||||||
|
|
||||||
// let's not flood the server with connections
|
// let's not flood the server with connections
|
||||||
self.multi.set_max_host_connections(2)?;
|
self.multi.set_max_host_connections(2)?;
|
||||||
@ -802,7 +802,7 @@ impl<'gctx> RegistryData for HttpRegistry<'gctx> {
|
|||||||
let remaining_in_multi = tls::set(&self.downloads, || {
|
let remaining_in_multi = tls::set(&self.downloads, || {
|
||||||
self.multi
|
self.multi
|
||||||
.perform()
|
.perform()
|
||||||
.with_context(|| "failed to perform http requests")
|
.context("failed to perform http requests")
|
||||||
})?;
|
})?;
|
||||||
trace!(target: "network", "{} transfers remaining", remaining_in_multi);
|
trace!(target: "network", "{} transfers remaining", remaining_in_multi);
|
||||||
|
|
||||||
@ -823,7 +823,7 @@ impl<'gctx> RegistryData for HttpRegistry<'gctx> {
|
|||||||
.unwrap_or_else(|| Duration::new(1, 0));
|
.unwrap_or_else(|| Duration::new(1, 0));
|
||||||
self.multi
|
self.multi
|
||||||
.wait(&mut [], timeout)
|
.wait(&mut [], timeout)
|
||||||
.with_context(|| "failed to wait on curl `Multi`")?;
|
.context("failed to wait on curl `Multi`")?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -642,10 +642,10 @@ impl<'gctx> RegistrySource<'gctx> {
|
|||||||
let prefix = unpack_dir.file_name().unwrap();
|
let prefix = unpack_dir.file_name().unwrap();
|
||||||
let parent = unpack_dir.parent().unwrap();
|
let parent = unpack_dir.parent().unwrap();
|
||||||
for entry in tar.entries()? {
|
for entry in tar.entries()? {
|
||||||
let mut entry = entry.with_context(|| "failed to iterate over archive")?;
|
let mut entry = entry.context("failed to iterate over archive")?;
|
||||||
let entry_path = entry
|
let entry_path = entry
|
||||||
.path()
|
.path()
|
||||||
.with_context(|| "failed to read entry path")?
|
.context("failed to read entry path")?
|
||||||
.into_owned();
|
.into_owned();
|
||||||
|
|
||||||
// We're going to unpack this tarball into the global source
|
// We're going to unpack this tarball into the global source
|
||||||
|
@ -308,7 +308,7 @@ impl RecursiveLock {
|
|||||||
self.is_exclusive = true;
|
self.is_exclusive = true;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else {
|
} else {
|
||||||
return Err(e).with_context(|| "failed to acquire package cache lock");
|
return Err(e).context("failed to acquire package cache lock");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -331,7 +331,7 @@ impl RecursiveLock {
|
|||||||
self.is_exclusive = true;
|
self.is_exclusive = true;
|
||||||
return Ok(result);
|
return Ok(result);
|
||||||
} else {
|
} else {
|
||||||
return Err(e).with_context(|| "failed to acquire package cache lock");
|
return Err(e).context("failed to acquire package cache lock");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -335,8 +335,8 @@ impl GlobalContext {
|
|||||||
/// any config files from disk. Those will be loaded lazily as-needed.
|
/// any config files from disk. Those will be loaded lazily as-needed.
|
||||||
pub fn default() -> CargoResult<GlobalContext> {
|
pub fn default() -> CargoResult<GlobalContext> {
|
||||||
let shell = Shell::new();
|
let shell = Shell::new();
|
||||||
let cwd = env::current_dir()
|
let cwd =
|
||||||
.with_context(|| "couldn't get the current directory of the process")?;
|
env::current_dir().context("couldn't get the current directory of the process")?;
|
||||||
let homedir = homedir(&cwd).ok_or_else(|| {
|
let homedir = homedir(&cwd).ok_or_else(|| {
|
||||||
anyhow!(
|
anyhow!(
|
||||||
"Cargo couldn't find your home directory. \
|
"Cargo couldn't find your home directory. \
|
||||||
@ -496,7 +496,7 @@ impl GlobalContext {
|
|||||||
let exe = from_env()
|
let exe = from_env()
|
||||||
.or_else(|_| from_current_exe())
|
.or_else(|_| from_current_exe())
|
||||||
.or_else(|_| from_argv())
|
.or_else(|_| from_argv())
|
||||||
.with_context(|| "couldn't get the path to cargo executable")?;
|
.context("couldn't get the path to cargo executable")?;
|
||||||
Ok(exe)
|
Ok(exe)
|
||||||
})
|
})
|
||||||
.map(AsRef::as_ref)
|
.map(AsRef::as_ref)
|
||||||
@ -569,8 +569,8 @@ impl GlobalContext {
|
|||||||
///
|
///
|
||||||
/// There is not a need to also call [`Self::reload_rooted_at`].
|
/// There is not a need to also call [`Self::reload_rooted_at`].
|
||||||
pub fn reload_cwd(&mut self) -> CargoResult<()> {
|
pub fn reload_cwd(&mut self) -> CargoResult<()> {
|
||||||
let cwd = env::current_dir()
|
let cwd =
|
||||||
.with_context(|| "couldn't get the current directory of the process")?;
|
env::current_dir().context("couldn't get the current directory of the process")?;
|
||||||
let homedir = homedir(&cwd).ok_or_else(|| {
|
let homedir = homedir(&cwd).ok_or_else(|| {
|
||||||
anyhow!(
|
anyhow!(
|
||||||
"Cargo couldn't find your home directory. \
|
"Cargo couldn't find your home directory. \
|
||||||
@ -1166,7 +1166,7 @@ impl GlobalContext {
|
|||||||
result.push(cv);
|
result.push(cv);
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.with_context(|| "could not load Cargo configuration")?;
|
.context("could not load Cargo configuration")?;
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1206,7 +1206,7 @@ impl GlobalContext {
|
|||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.with_context(|| "could not load Cargo configuration")?;
|
.context("could not load Cargo configuration")?;
|
||||||
|
|
||||||
match cfg {
|
match cfg {
|
||||||
CV::Table(map, _) => Ok(map),
|
CV::Table(map, _) => Ok(map),
|
||||||
@ -1495,7 +1495,7 @@ impl GlobalContext {
|
|||||||
};
|
};
|
||||||
let tmp_table = self
|
let tmp_table = self
|
||||||
.load_includes(tmp_table, &mut HashSet::new(), WhyLoad::Cli)
|
.load_includes(tmp_table, &mut HashSet::new(), WhyLoad::Cli)
|
||||||
.with_context(|| "failed to load --config include".to_string())?;
|
.context("failed to load --config include".to_string())?;
|
||||||
loaded_args
|
loaded_args
|
||||||
.merge(tmp_table, true)
|
.merge(tmp_table, true)
|
||||||
.with_context(|| format!("failed to merge --config argument `{arg}`"))?;
|
.with_context(|| format!("failed to merge --config argument `{arg}`"))?;
|
||||||
|
@ -268,7 +268,7 @@ pub struct StartedServer {
|
|||||||
impl RustfixDiagnosticServer {
|
impl RustfixDiagnosticServer {
|
||||||
pub fn new() -> Result<Self, Error> {
|
pub fn new() -> Result<Self, Error> {
|
||||||
let listener = TcpListener::bind(&LOCALHOST[..])
|
let listener = TcpListener::bind(&LOCALHOST[..])
|
||||||
.with_context(|| "failed to bind TCP listener to manage locking")?;
|
.context("failed to bind TCP listener to manage locking")?;
|
||||||
let addr = listener.local_addr()?;
|
let addr = listener.local_addr()?;
|
||||||
|
|
||||||
Ok(RustfixDiagnosticServer { listener, addr })
|
Ok(RustfixDiagnosticServer { listener, addr })
|
||||||
|
@ -47,7 +47,7 @@ struct ServerClient {
|
|||||||
impl LockServer {
|
impl LockServer {
|
||||||
pub fn new() -> Result<LockServer, Error> {
|
pub fn new() -> Result<LockServer, Error> {
|
||||||
let listener = TcpListener::bind(&LOCALHOST[..])
|
let listener = TcpListener::bind(&LOCALHOST[..])
|
||||||
.with_context(|| "failed to bind TCP listener to manage locking")?;
|
.context("failed to bind TCP listener to manage locking")?;
|
||||||
let addr = listener.local_addr()?;
|
let addr = listener.local_addr()?;
|
||||||
Ok(LockServer {
|
Ok(LockServer {
|
||||||
listener,
|
listener,
|
||||||
@ -159,15 +159,15 @@ impl Drop for LockServerStarted {
|
|||||||
impl LockServerClient {
|
impl LockServerClient {
|
||||||
pub fn lock(addr: &SocketAddr, name: impl AsRef<[u8]>) -> Result<LockServerClient, Error> {
|
pub fn lock(addr: &SocketAddr, name: impl AsRef<[u8]>) -> Result<LockServerClient, Error> {
|
||||||
let mut client =
|
let mut client =
|
||||||
TcpStream::connect(&addr).with_context(|| "failed to connect to parent lock server")?;
|
TcpStream::connect(&addr).context("failed to connect to parent lock server")?;
|
||||||
client
|
client
|
||||||
.write_all(name.as_ref())
|
.write_all(name.as_ref())
|
||||||
.and_then(|_| client.write_all(b"\n"))
|
.and_then(|_| client.write_all(b"\n"))
|
||||||
.with_context(|| "failed to write to lock server")?;
|
.context("failed to write to lock server")?;
|
||||||
let mut buf = [0];
|
let mut buf = [0];
|
||||||
client
|
client
|
||||||
.read_exact(&mut buf)
|
.read_exact(&mut buf)
|
||||||
.with_context(|| "failed to acquire lock")?;
|
.context("failed to acquire lock")?;
|
||||||
Ok(LockServerClient { _socket: client })
|
Ok(LockServerClient { _socket: client })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1131,7 +1131,7 @@ pub fn to_real_manifest(
|
|||||||
{
|
{
|
||||||
let edition: Edition = edition
|
let edition: Edition = edition
|
||||||
.parse()
|
.parse()
|
||||||
.with_context(|| "failed to parse the `edition` key")?;
|
.context("failed to parse the `edition` key")?;
|
||||||
if let Some(pkg_msrv) = &rust_version {
|
if let Some(pkg_msrv) = &rust_version {
|
||||||
if let Some(edition_msrv) = edition.first_version() {
|
if let Some(edition_msrv) = edition.first_version() {
|
||||||
let edition_msrv = RustVersion::try_from(edition_msrv).unwrap();
|
let edition_msrv = RustVersion::try_from(edition_msrv).unwrap();
|
||||||
|
@ -853,7 +853,7 @@ fn configure(toml: &TomlTarget, target: &mut Target) -> CargoResult<()> {
|
|||||||
target.set_edition(
|
target.set_edition(
|
||||||
edition
|
edition
|
||||||
.parse()
|
.parse()
|
||||||
.with_context(|| "failed to parse the `edition` key")?,
|
.context("failed to parse the `edition` key")?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
Loading…
x
Reference in New Issue
Block a user