Auto merge of #6005 - alexcrichton:download-parallel, r=ehuss

Download crates in parallel with HTTP/2

This PR revives some of the work of https://github.com/rust-lang/cargo/pull/5161 by refactoring Cargo to make it much easier to add parallel downloads, and then it does so with the `curl` crate's new `http2` feature to compile `nghttp2` has a backend.

The primary refactoring done here is to remove the concept of "download this one package" deep within a `Source`. Instead a `Source` still has a `download` method but it's considered to be largely non-blocking. If a crate needs to be downloaded it immediately returns information as to such. The `PackageSet` abstraction is now a central location for all parallel downloads, and all users of it have been refactored to be amenable to parallel downloads, when added.

Many more details are in the commits...
This commit is contained in:
bors 2018-09-18 22:21:31 +00:00
commit 57ac39287b
42 changed files with 1264 additions and 448 deletions

View File

@ -18,10 +18,11 @@ path = "src/cargo/lib.rs"
[dependencies]
atty = "0.2"
bytesize = "1.0"
crates-io = { path = "src/crates-io", version = "0.20" }
crossbeam-utils = "0.5"
crypto-hash = "0.3.1"
curl = "0.4.13"
curl = { version = "0.4.17", features = ['http2'] }
env_logger = "0.5.11"
failure = "0.1.2"
filetime = "0.2"

View File

@ -5,7 +5,7 @@ use std::str;
use core::profiles::Profiles;
use core::{Dependency, Workspace};
use core::{Package, PackageId, PackageSet, Resolve};
use core::{PackageId, PackageSet, Resolve};
use util::errors::CargoResult;
use util::{profile, Cfg, CfgExpr, Config, Rustc};
@ -107,11 +107,6 @@ impl<'a, 'cfg> BuildContext<'a, 'cfg> {
platform.matches(name, info.cfg())
}
/// Gets a package for the given package id.
pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> {
self.packages.get(id)
}
/// Get the user-specified linker for a particular host or target
pub fn linker(&self, kind: Kind) -> Option<&Path> {
self.target_config(kind).linker.as_ref().map(|s| s.as_ref())
@ -198,18 +193,6 @@ impl<'a, 'cfg> BuildContext<'a, 'cfg> {
pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec<String>> {
self.extra_compiler_args.get(unit)
}
/// Return the list of filenames read by cargo to generate the BuildContext
/// (all Cargo.toml, etc).
pub fn inputs(&self) -> CargoResult<Vec<PathBuf>> {
let mut inputs = Vec::new();
for id in self.packages.package_ids() {
let pkg = self.get_package(id)?;
inputs.push(pkg.manifest_path().to_path_buf());
}
inputs.sort();
Ok(inputs)
}
}
/// Information required to build for a target

View File

@ -99,6 +99,7 @@ pub struct Context<'a, 'cfg: 'a> {
primary_packages: HashSet<&'a PackageId>,
unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
files: Option<CompilationFiles<'a, 'cfg>>,
package_cache: HashMap<&'a PackageId, &'a Package>,
}
impl<'a, 'cfg> Context<'a, 'cfg> {
@ -133,6 +134,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
primary_packages: HashSet::new(),
unit_dependencies: HashMap::new(),
files: None,
package_cache: HashMap::new(),
})
}
@ -165,7 +167,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
queue.execute(&mut self, &mut plan)?;
if build_plan {
plan.set_inputs(self.bcx.inputs()?);
plan.set_inputs(self.inputs()?);
plan.output_plan();
}
@ -326,7 +328,12 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
};
self.primary_packages.extend(units.iter().map(|u| u.pkg.package_id()));
build_unit_dependencies(units, self.bcx, &mut self.unit_dependencies)?;
build_unit_dependencies(
units,
self.bcx,
&mut self.unit_dependencies,
&mut self.package_cache,
)?;
self.build_used_in_plugin_map(units)?;
let files = CompilationFiles::new(
units,
@ -495,6 +502,25 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
pub fn is_primary_package(&self, unit: &Unit<'a>) -> bool {
self.primary_packages.contains(unit.pkg.package_id())
}
/// Gets a package for the given package id.
pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> {
self.package_cache.get(id)
.cloned()
.ok_or_else(|| format_err!("failed to find {}", id))
}
/// Return the list of filenames read by cargo to generate the BuildContext
/// (all Cargo.toml, etc).
pub fn inputs(&self) -> CargoResult<Vec<PathBuf>> {
let mut inputs = Vec::new();
for id in self.bcx.packages.package_ids() {
let pkg = self.get_package(id)?;
inputs.push(pkg.manifest_path().to_path_buf());
}
inputs.sort();
Ok(inputs)
}
}
#[derive(Default)]

View File

@ -15,22 +15,44 @@
//! (for example, with and without tests), so we actually build a dependency
//! graph of `Unit`s, which capture these properties.
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use CargoResult;
use core::dependency::Kind as DepKind;
use core::profiles::ProfileFor;
use core::{Package, Target};
use core::{Package, Target, PackageId};
use core::package::Downloads;
use super::{BuildContext, CompileMode, Kind, Unit};
struct State<'a: 'tmp, 'cfg: 'a, 'tmp> {
bcx: &'tmp BuildContext<'a, 'cfg>,
deps: &'tmp mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
pkgs: RefCell<&'tmp mut HashMap<&'a PackageId, &'a Package>>,
waiting_on_download: HashSet<&'a PackageId>,
downloads: Downloads<'a, 'cfg>,
}
pub fn build_unit_dependencies<'a, 'cfg>(
roots: &[Unit<'a>],
bcx: &BuildContext<'a, 'cfg>,
deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
pkgs: &mut HashMap<&'a PackageId, &'a Package>,
) -> CargoResult<()> {
assert!(deps.is_empty(), "can only build unit deps once");
let mut state = State {
bcx,
deps,
pkgs: RefCell::new(pkgs),
waiting_on_download: HashSet::new(),
downloads: bcx.packages.enable_download()?,
};
loop {
for unit in roots.iter() {
state.get(unit.pkg.package_id())?;
// Dependencies of tests/benches should not have `panic` set.
// We check the global test mode to see if we are running in `cargo
// test` in which case we ensure all dependencies have `panic`
@ -42,19 +64,26 @@ pub fn build_unit_dependencies<'a, 'cfg>(
} else {
ProfileFor::Any
};
deps_of(unit, bcx, deps, profile_for)?;
deps_of(unit, &mut state, profile_for)?;
}
trace!("ALL UNIT DEPENDENCIES {:#?}", deps);
connect_run_custom_build_deps(bcx, deps);
if state.waiting_on_download.len() > 0 {
state.finish_some_downloads()?;
state.deps.clear();
} else {
break
}
}
trace!("ALL UNIT DEPENDENCIES {:#?}", state.deps);
connect_run_custom_build_deps(&mut state);
Ok(())
}
fn deps_of<'a, 'cfg>(
fn deps_of<'a, 'cfg, 'tmp>(
unit: &Unit<'a>,
bcx: &BuildContext<'a, 'cfg>,
deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
state: &mut State<'a, 'cfg, 'tmp>,
profile_for: ProfileFor,
) -> CargoResult<()> {
// Currently the `deps` map does not include `profile_for`. This should
@ -63,12 +92,12 @@ fn deps_of<'a, 'cfg>(
// `TestDependency`. `CustomBuild` should also be fine since if the
// requested unit's settings are the same as `Any`, `CustomBuild` can't
// affect anything else in the hierarchy.
if !deps.contains_key(unit) {
let unit_deps = compute_deps(unit, bcx, profile_for)?;
if !state.deps.contains_key(unit) {
let unit_deps = compute_deps(unit, state, profile_for)?;
let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect();
deps.insert(*unit, to_insert);
state.deps.insert(*unit, to_insert);
for (unit, profile_for) in unit_deps {
deps_of(&unit, bcx, deps, profile_for)?;
deps_of(&unit, state, profile_for)?;
}
}
Ok(())
@ -78,21 +107,22 @@ fn deps_of<'a, 'cfg>(
/// for that package.
/// This returns a vec of `(Unit, ProfileFor)` pairs. The `ProfileFor`
/// is the profile type that should be used for dependencies of the unit.
fn compute_deps<'a, 'cfg>(
fn compute_deps<'a, 'cfg, 'tmp>(
unit: &Unit<'a>,
bcx: &BuildContext<'a, 'cfg>,
state: &mut State<'a, 'cfg, 'tmp>,
profile_for: ProfileFor,
) -> CargoResult<Vec<(Unit<'a>, ProfileFor)>> {
if unit.mode.is_run_custom_build() {
return compute_deps_custom_build(unit, bcx);
return compute_deps_custom_build(unit, state.bcx);
} else if unit.mode.is_doc() && !unit.mode.is_any_test() {
// Note: This does not include Doctest.
return compute_deps_doc(unit, bcx);
return compute_deps_doc(unit, state);
}
let bcx = state.bcx;
let id = unit.pkg.package_id();
let deps = bcx.resolve.deps(id);
let mut ret = deps.filter(|&(_id, deps)| {
let deps = bcx.resolve.deps(id)
.filter(|&(_id, deps)| {
assert!(!deps.is_empty());
deps.iter().any(|dep| {
// If this target is a build command, then we only want build
@ -104,8 +134,10 @@ fn compute_deps<'a, 'cfg>(
// If this dependency is *not* a transitive dependency, then it
// only applies to test/example targets
if !dep.is_transitive() && !unit.target.is_test() && !unit.target.is_example()
&& !unit.mode.is_any_test()
if !dep.is_transitive() &&
!unit.target.is_test() &&
!unit.target.is_example() &&
!unit.mode.is_any_test()
{
return false;
}
@ -118,7 +150,9 @@ fn compute_deps<'a, 'cfg>(
// If the dependency is optional, then we're only activating it
// if the corresponding feature was activated
if dep.is_optional() && !bcx.resolve.features(id).contains(&*dep.name_in_toml()) {
if dep.is_optional() &&
!bcx.resolve.features(id).contains(&*dep.name_in_toml())
{
return false;
}
@ -126,15 +160,29 @@ fn compute_deps<'a, 'cfg>(
// actually used!
true
})
}).filter_map(|(id, _)| match bcx.get_package(id) {
Ok(pkg) => pkg.targets().iter().find(|t| t.is_lib()).map(|t| {
let mode = check_or_build_mode(unit.mode, t);
let unit = new_unit(bcx, pkg, t, profile_for, unit.kind.for_target(t), mode);
Ok((unit, profile_for))
}),
Err(e) => Some(Err(e)),
})
.collect::<CargoResult<Vec<_>>>()?;
});
let mut ret = Vec::new();
for (id, _) in deps {
let pkg = match state.get(id)? {
Some(pkg) => pkg,
None => continue,
};
let lib = match pkg.targets().iter().find(|t| t.is_lib()) {
Some(t) => t,
None => continue,
};
let mode = check_or_build_mode(unit.mode, lib);
let unit = new_unit(
bcx,
pkg,
lib,
profile_for,
unit.kind.for_target(lib),
mode,
);
ret.push((unit, profile_for));
}
// If this target is a build script, then what we've collected so far is
// all we need. If this isn't a build script, then it depends on the
@ -221,10 +269,11 @@ fn compute_deps_custom_build<'a, 'cfg>(
}
/// Returns the dependencies necessary to document a package
fn compute_deps_doc<'a, 'cfg>(
fn compute_deps_doc<'a, 'cfg, 'tmp>(
unit: &Unit<'a>,
bcx: &BuildContext<'a, 'cfg>,
state: &mut State<'a, 'cfg, 'tmp>,
) -> CargoResult<Vec<(Unit<'a>, ProfileFor)>> {
let bcx = state.bcx;
let deps = bcx.resolve
.deps(unit.pkg.package_id())
.filter(|&(_id, deps)| {
@ -232,15 +281,17 @@ fn compute_deps_doc<'a, 'cfg>(
DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind),
_ => false,
})
})
.map(|(id, _deps)| bcx.get_package(id));
});
// To document a library, we depend on dependencies actually being
// built. If we're documenting *all* libraries, then we also depend on
// the documentation of the library being built.
let mut ret = Vec::new();
for dep in deps {
let dep = dep?;
for (id, _deps) in deps {
let dep = match state.get(id)? {
Some(dep) => dep,
None => continue,
};
let lib = match dep.targets().iter().find(|t| t.is_lib()) {
Some(lib) => lib,
None => continue,
@ -288,7 +339,14 @@ fn maybe_lib<'a>(
) -> Option<(Unit<'a>, ProfileFor)> {
unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| {
let mode = check_or_build_mode(unit.mode, t);
let unit = new_unit(bcx, unit.pkg, t, profile_for, unit.kind.for_target(t), mode);
let unit = new_unit(
bcx,
unit.pkg,
t,
profile_for,
unit.kind.for_target(t),
mode,
);
(unit, profile_for)
})
}
@ -373,10 +431,7 @@ fn new_unit<'a>(
///
/// Here we take the entire `deps` map and add more dependencies from execution
/// of one build script to execution of another build script.
fn connect_run_custom_build_deps<'a>(
bcx: &BuildContext,
deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
) {
fn connect_run_custom_build_deps(state: &mut State) {
let mut new_deps = Vec::new();
{
@ -386,7 +441,7 @@ fn connect_run_custom_build_deps<'a>(
// have the build script as the key and the library would be in the
// value's set.
let mut reverse_deps = HashMap::new();
for (unit, deps) in deps.iter() {
for (unit, deps) in state.deps.iter() {
for dep in deps {
if dep.mode == CompileMode::RunCustomBuild {
reverse_deps.entry(dep)
@ -405,7 +460,7 @@ fn connect_run_custom_build_deps<'a>(
// `links`, then we depend on that package's build script! Here we use
// `dep_build_script` to manufacture an appropriate build script unit to
// depend on.
for unit in deps.keys().filter(|k| k.mode == CompileMode::RunCustomBuild) {
for unit in state.deps.keys().filter(|k| k.mode == CompileMode::RunCustomBuild) {
let reverse_deps = match reverse_deps.get(unit) {
Some(set) => set,
None => continue,
@ -413,13 +468,13 @@ fn connect_run_custom_build_deps<'a>(
let to_add = reverse_deps
.iter()
.flat_map(|reverse_dep| deps[reverse_dep].iter())
.flat_map(|reverse_dep| state.deps[reverse_dep].iter())
.filter(|other| {
other.pkg != unit.pkg &&
other.target.linkable() &&
other.pkg.manifest().links().is_some()
})
.filter_map(|other| dep_build_script(other, bcx).map(|p| p.0))
.filter_map(|other| dep_build_script(other, state.bcx).map(|p| p.0))
.collect::<HashSet<_>>();
if !to_add.is_empty() {
@ -430,6 +485,50 @@ fn connect_run_custom_build_deps<'a>(
// And finally, add in all the missing dependencies!
for (unit, new_deps) in new_deps {
deps.get_mut(&unit).unwrap().extend(new_deps);
state.deps.get_mut(&unit).unwrap().extend(new_deps);
}
}
impl<'a, 'cfg, 'tmp> State<'a, 'cfg, 'tmp> {
fn get(&mut self, id: &'a PackageId) -> CargoResult<Option<&'a Package>> {
let mut pkgs = self.pkgs.borrow_mut();
if let Some(pkg) = pkgs.get(id) {
return Ok(Some(pkg))
}
if !self.waiting_on_download.insert(id) {
return Ok(None)
}
if let Some(pkg) = self.downloads.start(id)? {
pkgs.insert(id, pkg);
self.waiting_on_download.remove(id);
return Ok(Some(pkg))
}
Ok(None)
}
/// Completes at least one downloading, maybe waiting for more to complete.
///
/// This function will block the current thread waiting for at least one
/// crate to finish downloading. The function may continue to download more
/// crates if it looks like there's a long enough queue of crates to keep
/// downloading. When only a handful of packages remain this function
/// returns, and it's hoped that by returning we'll be able to push more
/// packages to download into the queue.
fn finish_some_downloads(&mut self) -> CargoResult<()> {
assert!(self.downloads.remaining() > 0);
loop {
let pkg = self.downloads.wait()?;
self.waiting_on_download.remove(pkg.package_id());
self.pkgs.borrow_mut().insert(pkg.package_id(), pkg);
// Arbitrarily choose that 5 or more packages concurrently download
// is a good enough number to "fill the network pipe". If we have
// less than this let's recompute the whole unit dependency graph
// again and try to find some more packages to download.
if self.downloads.remaining() < 5 {
break
}
}
Ok(())
}
}

View File

@ -14,6 +14,7 @@ use jobserver::{Acquired, HelperThread};
use core::profiles::Profile;
use core::{PackageId, Target, TargetKind};
use handle_error;
use util;
use util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder};
use util::{Config, DependencyQueue, Dirty, Fresh, Freshness};
use util::{Progress, ProgressStyle};
@ -368,16 +369,7 @@ impl<'a> JobQueue<'a> {
opt_type += " + debuginfo";
}
let time_elapsed = {
let duration = cx.bcx.config.creation_time().elapsed();
let secs = duration.as_secs();
if secs >= 60 {
format!("{}m {:02}s", secs / 60, secs % 60)
} else {
format!("{}.{:02}s", secs, duration.subsec_nanos() / 10_000_000)
}
};
let time_elapsed = util::elapsed(cx.bcx.config.creation_time().elapsed());
if self.queue.is_empty() {
let message = format!(
@ -535,7 +527,7 @@ impl<'a> Key<'a> {
fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) -> CargoResult<Vec<Key<'a>>> {
let unit = Unit {
pkg: cx.bcx.get_package(self.pkg)?,
pkg: cx.get_package(self.pkg)?,
target: self.target,
profile: self.profile,
kind: self.kind,

View File

@ -1,19 +1,27 @@
use std::cell::{Ref, RefCell};
use std::collections::HashMap;
use std::cell::{Ref, RefCell, Cell};
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::hash;
use std::mem;
use std::path::{Path, PathBuf};
use std::time::{Instant, Duration};
use bytesize::ByteSize;
use curl::easy::{Easy, HttpVersion};
use curl::multi::{Multi, EasyHandle};
use lazycell::LazyCell;
use semver::Version;
use serde::ser;
use toml;
use lazycell::LazyCell;
use core::{Dependency, Manifest, PackageId, SourceId, Target};
use core::{FeatureMap, SourceMap, Summary};
use core::source::MaybePackage;
use core::interning::InternedString;
use util::{internal, lev_distance, Config};
use util::errors::{CargoResult, CargoResultExt};
use ops;
use util::{self, internal, lev_distance, Config, Progress, ProgressStyle};
use util::errors::{CargoResult, CargoResultExt, HttpNot200};
use util::network::Retry;
/// Information about a package that is available somewhere in the file system.
///
@ -236,46 +244,491 @@ impl hash::Hash for Package {
}
}
#[derive(Debug)]
pub struct PackageSet<'cfg> {
packages: HashMap<PackageId, LazyCell<Package>>,
sources: RefCell<SourceMap<'cfg>>,
config: &'cfg Config,
multi: Multi,
downloading: Cell<bool>,
multiplexing: bool,
}
pub struct Downloads<'a, 'cfg: 'a> {
set: &'a PackageSet<'cfg>,
pending: HashMap<usize, (Download, EasyHandle)>,
pending_ids: HashSet<PackageId>,
results: Vec<(usize, CargoResult<()>)>,
next: usize,
retry: Retry<'cfg>,
progress: RefCell<Option<Progress<'cfg>>>,
downloads_finished: usize,
downloaded_bytes: u64,
largest: (u64, String),
start: Instant,
success: bool,
}
struct Download {
token: usize,
id: PackageId,
data: RefCell<Vec<u8>>,
url: String,
descriptor: String,
total: Cell<u64>,
current: Cell<u64>,
start: Instant,
}
impl<'cfg> PackageSet<'cfg> {
pub fn new(package_ids: &[PackageId], sources: SourceMap<'cfg>) -> PackageSet<'cfg> {
PackageSet {
pub fn new(
package_ids: &[PackageId],
sources: SourceMap<'cfg>,
config: &'cfg Config,
) -> CargoResult<PackageSet<'cfg>> {
// We've enabled the `http2` feature of `curl` in Cargo, so treat
// failures here as fatal as it would indicate a build-time problem.
//
// Note that the multiplexing support is pretty new so we're having it
// off-by-default temporarily.
//
// Also note that pipelining is disabled as curl authors have indicated
// that it's buggy, and we've empirically seen that it's buggy with HTTP
// proxies.
let mut multi = Multi::new();
let multiplexing = config.get::<Option<bool>>("http.multiplexing")?
.unwrap_or(false);
multi.pipelining(false, multiplexing)
.chain_err(|| "failed to enable multiplexing/pipelining in curl")?;
// let's not flood crates.io with connections
multi.set_max_host_connections(2)?;
Ok(PackageSet {
packages: package_ids
.iter()
.map(|id| (id.clone(), LazyCell::new()))
.collect(),
sources: RefCell::new(sources),
}
config,
multi,
downloading: Cell::new(false),
multiplexing,
})
}
pub fn package_ids<'a>(&'a self) -> Box<Iterator<Item = &'a PackageId> + 'a> {
Box::new(self.packages.keys())
}
pub fn get(&self, id: &PackageId) -> CargoResult<&Package> {
let slot = self.packages
.get(id)
.ok_or_else(|| internal(format!("couldn't find `{}` in package set", id)))?;
if let Some(pkg) = slot.borrow() {
return Ok(pkg);
pub fn enable_download<'a>(&'a self) -> CargoResult<Downloads<'a, 'cfg>> {
assert!(!self.downloading.replace(true));
Ok(Downloads {
start: Instant::now(),
set: self,
next: 0,
pending: HashMap::new(),
pending_ids: HashSet::new(),
results: Vec::new(),
retry: Retry::new(self.config)?,
progress: RefCell::new(Some(Progress::with_style(
"Downloading",
ProgressStyle::Ratio,
self.config,
))),
downloads_finished: 0,
downloaded_bytes: 0,
largest: (0, String::new()),
success: false,
})
}
let mut sources = self.sources.borrow_mut();
let source = sources
.get_mut(id.source_id())
.ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?;
let pkg = source
.download(id)
.chain_err(|| format_err!("unable to get packages from source"))?;
assert!(slot.fill(pkg).is_ok());
Ok(slot.borrow().unwrap())
pub fn get_one(&self, id: &PackageId) -> CargoResult<&Package> {
Ok(self.get_many(Some(id))?.remove(0))
}
pub fn get_many<'a>(&self, ids: impl IntoIterator<Item = &'a PackageId>)
-> CargoResult<Vec<&Package>>
{
let mut pkgs = Vec::new();
let mut downloads = self.enable_download()?;
for id in ids {
pkgs.extend(downloads.start(id)?);
}
while downloads.remaining() > 0 {
pkgs.push(downloads.wait()?);
}
downloads.success = true;
Ok(pkgs)
}
pub fn sources(&self) -> Ref<SourceMap<'cfg>> {
self.sources.borrow()
}
}
impl<'a, 'cfg> Downloads<'a, 'cfg> {
/// Starts to download the package for the `id` specified.
///
/// Returns `None` if the package is queued up for download and will
/// eventually be returned from `wait_for_download`. Returns `Some(pkg)` if
/// the package is ready and doesn't need to be downloaded.
pub fn start(&mut self, id: &PackageId) -> CargoResult<Option<&'a Package>> {
// First up see if we've already cached this package, in which case
// there's nothing to do.
let slot = self.set.packages
.get(id)
.ok_or_else(|| internal(format!("couldn't find `{}` in package set", id)))?;
if let Some(pkg) = slot.borrow() {
return Ok(Some(pkg));
}
// Ask the original source fo this `PackageId` for the corresponding
// package. That may immediately come back and tell us that the package
// is ready, or it could tell us that it needs to be downloaded.
let mut sources = self.set.sources.borrow_mut();
let source = sources
.get_mut(id.source_id())
.ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?;
let pkg = source
.download(id)
.chain_err(|| format_err!("unable to get packages from source"))?;
let (url, descriptor) = match pkg {
MaybePackage::Ready(pkg) => {
debug!("{} doesn't need a download", id);
assert!(slot.fill(pkg).is_ok());
return Ok(Some(slot.borrow().unwrap()))
}
MaybePackage::Download { url, descriptor } => (url, descriptor),
};
// Ok we're going to download this crate, so let's set up all our
// internal state and hand off an `Easy` handle to our libcurl `Multi`
// handle. This won't actually start the transfer, but later it'll
// hapen during `wait_for_download`
let token = self.next;
self.next += 1;
debug!("downloading {} as {}", id, token);
assert!(self.pending_ids.insert(id.clone()));
let mut handle = ops::http_handle(self.set.config)?;
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?; // follow redirects
// Enable HTTP/2 to be used as it'll allow true multiplexing which makes
// downloads much faster. Currently Cargo requests the `http2` feature
// of the `curl` crate which means it should always be built in, so
// treat it as a fatal error of http/2 support isn't found.
if self.set.multiplexing {
handle.http_version(HttpVersion::V2)
.chain_err(|| "failed to enable HTTP2, is curl not built right?")?;
}
// This is an option to `libcurl` which indicates that if there's a
// bunch of parallel requests to the same host they all wait until the
// pipelining status of the host is known. This means that we won't
// initiate dozens of connections to crates.io, but rather only one.
// Once the main one is opened we realized that pipelining is possible
// and multiplexing is possible with static.crates.io. All in all this
// reduces the number of connections done to a more manageable state.
handle.pipewait(true)?;
handle.write_function(move |buf| {
debug!("{} - {} bytes of data", token, buf.len());
tls::with(|downloads| {
if let Some(downloads) = downloads {
downloads.pending[&token].0.data
.borrow_mut()
.extend_from_slice(buf);
}
});
Ok(buf.len())
})?;
handle.progress(true)?;
handle.progress_function(move |dl_total, dl_cur, _, _| {
tls::with(|downloads| {
let downloads = match downloads {
Some(d) => d,
None => return false,
};
let dl = &downloads.pending[&token].0;
dl.total.set(dl_total as u64);
dl.current.set(dl_cur as u64);
downloads.tick(WhyTick::DownloadUpdate).is_ok()
})
})?;
// If the progress bar isn't enabled then it may be awhile before the
// first crate finishes downloading so we inform immediately that we're
// downloading crates here.
if self.downloads_finished == 0 &&
self.pending.len() == 0 &&
!self.progress.borrow().as_ref().unwrap().is_enabled()
{
self.set.config.shell().status("Downloading", "crates ...")?;
}
let dl = Download {
token,
data: RefCell::new(Vec::new()),
id: id.clone(),
url,
descriptor,
total: Cell::new(0),
current: Cell::new(0),
start: Instant::now(),
};
self.enqueue(dl, handle)?;
self.tick(WhyTick::DownloadStarted)?;
Ok(None)
}
/// Returns the number of crates that are still downloading
pub fn remaining(&self) -> usize {
self.pending.len()
}
/// Blocks the current thread waiting for a package to finish downloading.
///
/// This method will wait for a previously enqueued package to finish
/// downloading and return a reference to it after it's done downloading.
///
/// # Panics
///
/// This function will panic if there are no remaining downloads.
pub fn wait(&mut self) -> CargoResult<&'a Package> {
let (dl, data) = loop {
assert_eq!(self.pending.len(), self.pending_ids.len());
let (token, result) = self.wait_for_curl()?;
debug!("{} finished with {:?}", token, result);
let (mut dl, handle) = self.pending.remove(&token)
.expect("got a token for a non-in-progress transfer");
let data = mem::replace(&mut *dl.data.borrow_mut(), Vec::new());
let mut handle = self.set.multi.remove(handle)?;
self.pending_ids.remove(&dl.id);
// Check if this was a spurious error. If it was a spurious error
// then we want to re-enqueue our request for another attempt and
// then we wait for another request to finish.
let ret = {
self.retry.try(|| {
result?;
let code = handle.response_code()?;
if code != 200 && code != 0 {
let url = handle.effective_url()?.unwrap_or(&dl.url);
return Err(HttpNot200 {
code,
url: url.to_string(),
}.into())
}
Ok(())
}).chain_err(|| {
format!("failed to download from `{}`", dl.url)
})?
};
match ret {
Some(()) => break (dl, data),
None => {
self.pending_ids.insert(dl.id.clone());
self.enqueue(dl, handle)?
}
}
};
// If the progress bar isn't enabled then we still want to provide some
// semblance of progress of how we're downloading crates.
if !self.progress.borrow().as_ref().unwrap().is_enabled() {
self.set.config.shell().status("Downloaded", &dl.descriptor)?;
}
self.downloads_finished += 1;
self.downloaded_bytes += dl.total.get();
if dl.total.get() > self.largest.0 {
self.largest = (dl.total.get(), dl.id.name().to_string());
}
// We're about to synchronously extract the crate below. While we're
// doing that our download progress won't actually be updated, nor do we
// have a great view into the progress of the extraction. Let's prepare
// the user for this CPU-heavy step if it looks like it'll take some
// time to do so.
if dl.total.get() < ByteSize::kb(400).0 {
self.tick(WhyTick::DownloadFinished)?;
} else {
self.tick(WhyTick::Extracting(&dl.id.name()))?;
}
// Inform the original source that the download is finished which
// should allow us to actually get the package and fill it in now.
let mut sources = self.set.sources.borrow_mut();
let source = sources
.get_mut(dl.id.source_id())
.ok_or_else(|| internal(format!("couldn't find source for `{}`", dl.id)))?;
let pkg = source.finish_download(&dl.id, data)?;
let slot = &self.set.packages[&dl.id];
assert!(slot.fill(pkg).is_ok());
Ok(slot.borrow().unwrap())
}
fn enqueue(&mut self, dl: Download, handle: Easy) -> CargoResult<()> {
let mut handle = self.set.multi.add(handle)?;
handle.set_token(dl.token)?;
self.pending.insert(dl.token, (dl, handle));
Ok(())
}
fn wait_for_curl(&mut self) -> CargoResult<(usize, CargoResult<()>)> {
// This is the main workhorse loop. We use libcurl's portable `wait`
// method to actually perform blocking. This isn't necessarily too
// efficient in terms of fd management, but we should only be juggling
// a few anyway.
//
// Here we start off by asking the `multi` handle to do some work via
// the `perform` method. This will actually do I/O work (nonblocking)
// and attempt to make progress. Afterwards we ask about the `messages`
// contained in the handle which will inform us if anything has finished
// transferring.
//
// If we've got a finished transfer after all that work we break out
// and process the finished transfer at the end. Otherwise we need to
// actually block waiting for I/O to happen, which we achieve with the
// `wait` method on `multi`.
loop {
let n = tls::set(self, || {
self.set.multi.perform()
.chain_err(|| "failed to perform http requests")
})?;
debug!("handles remaining: {}", n);
let results = &mut self.results;
let pending = &self.pending;
self.set.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let handle = &pending[&token].1;
if let Some(result) = msg.result_for(&handle) {
results.push((token, result.map_err(|e| e.into())));
} else {
debug!("message without a result (?)");
}
});
if let Some(pair) = results.pop() {
break Ok(pair)
}
assert!(self.pending.len() > 0);
self.set.multi.wait(&mut [], Duration::new(60, 0))
.chain_err(|| "failed to wait on curl `Multi`")?;
}
}
fn tick(&self, why: WhyTick) -> CargoResult<()> {
let mut progress = self.progress.borrow_mut();
let progress = progress.as_mut().unwrap();
if let WhyTick::DownloadUpdate = why {
if !progress.update_allowed() {
return Ok(())
}
}
let mut msg = format!("{} crates", self.pending.len());
match why {
WhyTick::Extracting(krate) => {
msg.push_str(&format!(", extracting {} ...", krate));
}
_ => {
let mut dur = Duration::new(0, 0);
let mut remaining = 0;
for (dl, _) in self.pending.values() {
dur += dl.start.elapsed();
// If the total/current look weird just throw out the data
// point, sounds like curl has more to learn before we have
// the true information.
if dl.total.get() >= dl.current.get() {
remaining += dl.total.get() - dl.current.get();
}
}
if remaining > 0 && dur > Duration::from_millis(500) {
msg.push_str(&format!(", remaining bytes: {}", ByteSize(remaining)));
}
}
}
progress.print_now(&msg)
}
}
enum WhyTick<'a> {
DownloadStarted,
DownloadUpdate,
DownloadFinished,
Extracting(&'a str),
}
impl<'a, 'cfg> Drop for Downloads<'a, 'cfg> {
fn drop(&mut self) {
self.set.downloading.set(false);
let progress = self.progress.get_mut().take().unwrap();
// Don't print a download summary if we're not using a progress bar,
// we've already printed lots of `Downloading...` items.
if !progress.is_enabled() {
return
}
// If we didn't download anything, no need for a summary
if self.downloads_finished == 0 {
return
}
// If an error happened, let's not clutter up the output
if !self.success {
return
}
let mut status = format!("{} crates ({}) in {}",
self.downloads_finished,
ByteSize(self.downloaded_bytes),
util::elapsed(self.start.elapsed()));
if self.largest.0 > ByteSize::mb(1).0 {
status.push_str(&format!(
" (largest was `{}` at {})",
self.largest.1,
ByteSize(self.largest.0),
));
}
drop(self.set.config.shell().status("Downloaded", status));
}
}
mod tls {
use std::cell::Cell;
use super::Downloads;
thread_local!(static PTR: Cell<usize> = Cell::new(0));
pub(crate) fn with<R>(f: impl FnOnce(Option<&Downloads>) -> R) -> R {
let ptr = PTR.with(|p| p.get());
if ptr == 0 {
f(None)
} else {
unsafe {
f(Some(&*(ptr as *const Downloads)))
}
}
}
pub(crate) fn set<R>(dl: &Downloads, f: impl FnOnce() -> R) -> R {
struct Reset<'a, T: Copy + 'a>(&'a Cell<T>, T);
impl<'a, T: Copy> Drop for Reset<'a, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
PTR.with(|p| {
let _reset = Reset(p, p.get());
p.set(dl as *const Downloads as usize);
f()
})
}
}

View File

@ -37,6 +37,7 @@ pub trait Registry {
/// a `Source`. Each `Source` in the map has been updated (using network
/// operations if necessary) and is ready to be queried for packages.
pub struct PackageRegistry<'cfg> {
config: &'cfg Config,
sources: SourceMap<'cfg>,
// A list of sources which are considered "overrides" which take precedent
@ -81,6 +82,7 @@ impl<'cfg> PackageRegistry<'cfg> {
pub fn new(config: &'cfg Config) -> CargoResult<PackageRegistry<'cfg>> {
let source_config = SourceConfigMap::new(config)?;
Ok(PackageRegistry {
config,
sources: SourceMap::new(),
source_ids: HashMap::new(),
overrides: Vec::new(),
@ -92,9 +94,9 @@ impl<'cfg> PackageRegistry<'cfg> {
})
}
pub fn get(self, package_ids: &[PackageId]) -> PackageSet<'cfg> {
pub fn get(self, package_ids: &[PackageId]) -> CargoResult<PackageSet<'cfg>> {
trace!("getting packages; sources={}", self.sources.len());
PackageSet::new(package_ids, self.sources)
PackageSet::new(package_ids, self.sources, self.config)
}
fn ensure_loaded(&mut self, namespace: &SourceId, kind: Kind) -> CargoResult<()> {

View File

@ -49,7 +49,10 @@ pub trait Source {
/// The download method fetches the full package for each name and
/// version specified.
fn download(&mut self, package: &PackageId) -> CargoResult<Package>;
fn download(&mut self, package: &PackageId) -> CargoResult<MaybePackage>;
fn finish_download(&mut self, package: &PackageId, contents: Vec<u8>)
-> CargoResult<Package>;
/// Generates a unique string which represents the fingerprint of the
/// current state of the source.
@ -74,6 +77,14 @@ pub trait Source {
}
}
pub enum MaybePackage {
Ready(Package),
Download {
url: String,
descriptor: String,
}
}
impl<'a, T: Source + ?Sized + 'a> Source for Box<T> {
/// Forwards to `Source::supports_checksums`
fn supports_checksums(&self) -> bool {
@ -111,10 +122,14 @@ impl<'a, T: Source + ?Sized + 'a> Source for Box<T> {
}
/// Forwards to `Source::download`
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
fn download(&mut self, id: &PackageId) -> CargoResult<MaybePackage> {
(**self).download(id)
}
fn finish_download(&mut self, id: &PackageId, data: Vec<u8>) -> CargoResult<Package> {
(**self).finish_download(id, data)
}
/// Forwards to `Source::fingerprint`
fn fingerprint(&self, pkg: &Package) -> CargoResult<String> {
(**self).fingerprint(pkg)
@ -126,6 +141,52 @@ impl<'a, T: Source + ?Sized + 'a> Source for Box<T> {
}
}
impl<'a, T: Source + ?Sized + 'a> Source for &'a mut T {
fn supports_checksums(&self) -> bool {
(**self).supports_checksums()
}
fn requires_precise(&self) -> bool {
(**self).requires_precise()
}
fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> {
(**self).query(dep, f)
}
fn fuzzy_query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> {
(**self).fuzzy_query(dep, f)
}
fn source_id(&self) -> &SourceId {
(**self).source_id()
}
fn replaced_source_id(&self) -> &SourceId {
(**self).replaced_source_id()
}
fn update(&mut self) -> CargoResult<()> {
(**self).update()
}
fn download(&mut self, id: &PackageId) -> CargoResult<MaybePackage> {
(**self).download(id)
}
fn finish_download(&mut self, id: &PackageId, data: Vec<u8>) -> CargoResult<Package> {
(**self).finish_download(id, data)
}
fn fingerprint(&self, pkg: &Package) -> CargoResult<String> {
(**self).fingerprint(pkg)
}
fn verify(&self, pkg: &PackageId) -> CargoResult<()> {
(**self).verify(pkg)
}
}
/// A `HashMap` of `SourceId` -> `Box<Source>`
#[derive(Default)]
pub struct SourceMap<'src> {

View File

@ -15,6 +15,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(wrong_self_convention))] // perhaps Rc should be special cased in Clippy?
extern crate atty;
extern crate bytesize;
extern crate clap;
#[cfg(target_os = "macos")]
extern crate core_foundation;

View File

@ -52,7 +52,7 @@ pub fn clean(ws: &Workspace, opts: &CleanOptions) -> CargoResult<()> {
for spec in opts.spec.iter() {
// Translate the spec to a Package
let pkgid = resolve.query(spec)?;
let pkg = packages.get(pkgid)?;
let pkg = packages.get_one(pkgid)?;
// Generate all relevant `Unit` targets for this package
for target in pkg.targets() {

View File

@ -243,15 +243,19 @@ pub fn compile_ws<'a>(
let resolve = ops::resolve_ws_with_method(ws, source, method, &specs)?;
let (packages, resolve_with_overrides) = resolve;
let to_builds = specs
.iter()
.map(|p| {
let pkgid = p.query(resolve_with_overrides.iter())?;
let p = packages.get(pkgid)?;
p.manifest().print_teapot(ws.config());
Ok(p)
})
let to_build_ids = specs.iter()
.map(|s| s.query(resolve_with_overrides.iter()))
.collect::<CargoResult<Vec<_>>>()?;
let mut to_builds = packages.get_many(to_build_ids)?;
// The ordering here affects some error messages coming out of cargo, so
// let's be test and CLI friendly by always printing in the same order if
// there's an error.
to_builds.sort_by_key(|p| p.package_id());
for pkg in to_builds.iter() {
pkg.manifest().print_teapot(ws.config());
}
let (extra_args, extra_args_name) = match (target_rustc_args, target_rustdoc_args) {
(&Some(ref args), _) => (Some(args.clone()), "rustc"),

View File

@ -31,13 +31,10 @@ pub fn doc(ws: &Workspace, options: &DocOptions) -> CargoResult<()> {
)?;
let (packages, resolve_with_overrides) = resolve;
let pkgs = specs
.iter()
.map(|p| {
let pkgid = p.query(resolve_with_overrides.iter())?;
packages.get(pkgid)
})
let ids = specs.iter()
.map(|s| s.query(resolve_with_overrides.iter()))
.collect::<CargoResult<Vec<_>>>()?;
let pkgs = packages.get_many(ids)?;
let mut lib_names = HashMap::new();
let mut bin_names = HashMap::new();

View File

@ -27,13 +27,14 @@ pub fn fetch<'a>(
{
let mut fetched_packages = HashSet::new();
let mut deps_to_fetch = ws.members().map(|p| p.package_id()).collect::<Vec<_>>();
let mut to_download = Vec::new();
while let Some(id) = deps_to_fetch.pop() {
if !fetched_packages.insert(id) {
continue;
}
packages.get(id)?;
to_download.push(id.clone());
let deps = resolve.deps(id)
.filter(|&(_id, deps)| {
deps.iter()
@ -57,6 +58,7 @@ pub fn fetch<'a>(
.map(|(id, _deps)| id);
deps_to_fetch.extend(deps);
}
packages.get_many(&to_download)?;
}
Ok((resolve, packages))

View File

@ -12,6 +12,8 @@ use toml;
use core::{Dependency, Edition, Package, PackageIdSpec, Source, SourceId};
use core::{PackageId, Workspace};
use core::source::SourceMap;
use core::package::PackageSet;
use core::compiler::{DefaultExecutor, Executor};
use ops::{self, CompileFilter};
use sources::{GitSource, PathSource, SourceConfigMap};
@ -499,22 +501,28 @@ where
source.source_id(),
)?;
let deps = source.query_vec(&dep)?;
match deps.iter().map(|p| p.package_id()).max() {
Some(pkgid) => {
let pkg = source.download(pkgid)?;
Ok((pkg, Box::new(source)))
}
let pkgid = match deps.iter().map(|p| p.package_id()).max() {
Some(pkgid) => pkgid,
None => {
let vers_info = vers.map(|v| format!(" with version `{}`", v))
.unwrap_or_default();
Err(format_err!(
bail!(
"could not find `{}` in {}{}",
name,
source.source_id(),
vers_info
))
}
)
}
};
let pkg = {
let mut map = SourceMap::new();
map.insert(Box::new(&mut source));
PackageSet::new(&[pkgid.clone()], map, config)?
.get_one(&pkgid)?
.clone()
};
Ok((pkg, Box::new(source)))
}
None => {
let candidates = list_all(&mut source)?;

View File

@ -1,7 +1,9 @@
use std::collections::HashMap;
use serde::ser;
use core::resolver::Resolve;
use core::{Package, PackageId, Workspace, PackageSet};
use core::{Package, PackageId, Workspace};
use ops::{self, Packages};
use util::CargoResult;
@ -18,7 +20,7 @@ pub struct OutputMetadataOptions {
/// Loads the manifest, resolves the dependencies of the project to the concrete
/// used versions - considering overrides - and writes all dependencies in a JSON
/// format to stdout.
pub fn output_metadata<'a>(ws: &'a Workspace, opt: &OutputMetadataOptions) -> CargoResult<ExportInfo<'a>> {
pub fn output_metadata(ws: &Workspace, opt: &OutputMetadataOptions) -> CargoResult<ExportInfo> {
if opt.version != VERSION {
bail!(
"metadata version {} not supported, only {} is currently supported",
@ -33,7 +35,7 @@ pub fn output_metadata<'a>(ws: &'a Workspace, opt: &OutputMetadataOptions) -> Ca
}
}
fn metadata_no_deps<'a>(ws: &'a Workspace, _opt: &OutputMetadataOptions) -> CargoResult<ExportInfo<'a>> {
fn metadata_no_deps(ws: &Workspace, _opt: &OutputMetadataOptions) -> CargoResult<ExportInfo> {
Ok(ExportInfo {
packages: ws.members().cloned().collect(),
workspace_members: ws.members().map(|pkg| pkg.package_id().clone()).collect(),
@ -44,9 +46,9 @@ fn metadata_no_deps<'a>(ws: &'a Workspace, _opt: &OutputMetadataOptions) -> Carg
})
}
fn metadata_full<'a>(ws: &'a Workspace, opt: &OutputMetadataOptions) -> CargoResult<ExportInfo<'a>> {
fn metadata_full(ws: &Workspace, opt: &OutputMetadataOptions) -> CargoResult<ExportInfo> {
let specs = Packages::All.to_package_id_specs(ws)?;
let deps = ops::resolve_ws_precisely(
let (package_set, resolve) = ops::resolve_ws_precisely(
ws,
None,
&opt.features,
@ -54,18 +56,16 @@ fn metadata_full<'a>(ws: &'a Workspace, opt: &OutputMetadataOptions) -> CargoRes
opt.no_default_features,
&specs,
)?;
let (package_set, resolve) = deps;
let packages = package_set
.package_ids()
.map(|i| package_set.get(i).map(|p| p.clone()))
.collect::<CargoResult<Vec<_>>>()?;
let mut packages = HashMap::new();
for pkg in package_set.get_many(package_set.package_ids())? {
packages.insert(pkg.package_id().clone(), pkg.clone());
}
Ok(ExportInfo {
packages,
packages: packages.values().map(|p| (*p).clone()).collect(),
workspace_members: ws.members().map(|pkg| pkg.package_id().clone()).collect(),
resolve: Some(MetadataResolve {
resolve: (package_set, resolve),
resolve: (packages, resolve),
root: ws.current_opt().map(|pkg| pkg.package_id().clone()),
}),
target_directory: ws.target_dir().display().to_string(),
@ -75,10 +75,10 @@ fn metadata_full<'a>(ws: &'a Workspace, opt: &OutputMetadataOptions) -> CargoRes
}
#[derive(Serialize)]
pub struct ExportInfo<'a> {
pub struct ExportInfo {
packages: Vec<Package>,
workspace_members: Vec<PackageId>,
resolve: Option<MetadataResolve<'a>>,
resolve: Option<MetadataResolve>,
target_directory: String,
version: u32,
workspace_root: String,
@ -88,13 +88,13 @@ pub struct ExportInfo<'a> {
/// The one from lockfile does not fit because it uses a non-standard
/// format for `PackageId`s
#[derive(Serialize)]
struct MetadataResolve<'a> {
struct MetadataResolve {
#[serde(rename = "nodes", serialize_with = "serialize_resolve")]
resolve: (PackageSet<'a>, Resolve),
resolve: (HashMap<PackageId, Package>, Resolve),
root: Option<PackageId>,
}
fn serialize_resolve<S>((package_set, resolve): &(PackageSet, Resolve), s: S) -> Result<S::Ok, S::Error>
fn serialize_resolve<S>((packages, resolve): &(HashMap<PackageId, Package>, Resolve), s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
@ -119,7 +119,7 @@ where
dependencies: resolve.deps(id).map(|(pkg, _deps)| pkg).collect(),
deps: resolve.deps(id)
.map(|(pkg, _deps)| {
let name = package_set.get(pkg).ok()
let name = packages.get(pkg)
.and_then(|pkg| pkg.targets().iter().find(|t| t.is_lib()))
.and_then(|lib_target| {
resolve.extern_crate_name(id, pkg, lib_target).ok()

View File

@ -16,7 +16,7 @@ use util::profile;
pub fn resolve_ws<'a>(ws: &Workspace<'a>) -> CargoResult<(PackageSet<'a>, Resolve)> {
let mut registry = PackageRegistry::new(ws.config())?;
let resolve = resolve_with_registry(ws, &mut registry, true)?;
let packages = get_resolved_packages(&resolve, registry);
let packages = get_resolved_packages(&resolve, registry)?;
Ok((packages, resolve))
}
@ -96,7 +96,7 @@ pub fn resolve_ws_with_method<'a>(
true,
)?;
let packages = get_resolved_packages(&resolved_with_overrides, registry);
let packages = get_resolved_packages(&resolved_with_overrides, registry)?;
Ok((packages, resolved_with_overrides))
}
@ -374,7 +374,7 @@ pub fn add_overrides<'a>(
pub fn get_resolved_packages<'a>(
resolve: &Resolve,
registry: PackageRegistry<'a>,
) -> PackageSet<'a> {
) -> CargoResult<PackageSet<'a>> {
let ids: Vec<PackageId> = resolve.iter().cloned().collect();
registry.get(&ids)
}

View File

@ -9,6 +9,7 @@ use hex;
use serde_json;
use core::{Dependency, Package, PackageId, Source, SourceId, Summary};
use core::source::MaybePackage;
use sources::PathSource;
use util::{Config, Sha256};
use util::errors::{CargoResult, CargoResultExt};
@ -150,14 +151,19 @@ impl<'cfg> Source for DirectorySource<'cfg> {
Ok(())
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
fn download(&mut self, id: &PackageId) -> CargoResult<MaybePackage> {
self.packages
.get(id)
.map(|p| &p.0)
.cloned()
.map(MaybePackage::Ready)
.ok_or_else(|| format_err!("failed to find package with id: {}", id))
}
fn finish_download(&mut self, _id: &PackageId, _data: Vec<u8>) -> CargoResult<Package> {
panic!("no downloads to do")
}
fn fingerprint(&self, pkg: &Package) -> CargoResult<String> {
Ok(pkg.package_id().version().to_string())
}

View File

@ -2,7 +2,7 @@ use std::fmt::{self, Debug, Formatter};
use url::Url;
use core::source::{Source, SourceId};
use core::source::{Source, SourceId, MaybePackage};
use core::GitReference;
use core::{Dependency, Package, PackageId, Summary};
use util::Config;
@ -210,7 +210,7 @@ impl<'cfg> Source for GitSource<'cfg> {
self.path_source.as_mut().unwrap().update()
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
fn download(&mut self, id: &PackageId) -> CargoResult<MaybePackage> {
trace!(
"getting packages for package id `{}` from `{:?}`",
id,
@ -222,6 +222,10 @@ impl<'cfg> Source for GitSource<'cfg> {
.download(id)
}
fn finish_download(&mut self, _id: &PackageId, _data: Vec<u8>) -> CargoResult<Package> {
panic!("no download should have started")
}
fn fingerprint(&self, _pkg: &Package) -> CargoResult<String> {
Ok(self.rev.as_ref().unwrap().to_string())
}

View File

@ -9,6 +9,7 @@ use ignore::Match;
use ignore::gitignore::GitignoreBuilder;
use core::{Dependency, Package, PackageId, Source, SourceId, Summary};
use core::source::MaybePackage;
use ops;
use util::{self, internal, CargoResult};
use util::paths;
@ -540,14 +541,19 @@ impl<'cfg> Source for PathSource<'cfg> {
Ok(())
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
fn download(&mut self, id: &PackageId) -> CargoResult<MaybePackage> {
trace!("getting packages; id={}", id);
let pkg = self.packages.iter().find(|pkg| pkg.package_id() == id);
pkg.cloned()
.map(MaybePackage::Ready)
.ok_or_else(|| internal(format!("failed to find {} in path source", id)))
}
fn finish_download(&mut self, _id: &PackageId, _data: Vec<u8>) -> CargoResult<Package> {
panic!("no download should have started")
}
fn fingerprint(&self, pkg: &Package) -> CargoResult<String> {
let (max, max_path) = self.last_modified_file(pkg)?;
Ok(format!("{} ({})", max, max_path.display()))

View File

@ -4,10 +4,9 @@ use std::path::Path;
use core::PackageId;
use hex;
use sources::registry::{RegistryConfig, RegistryData};
use util::FileLock;
use sources::registry::{RegistryConfig, RegistryData, MaybeLock};
use util::paths;
use util::{Config, Filesystem, Sha256};
use util::{Config, Filesystem, Sha256, FileLock};
use util::errors::{CargoResult, CargoResultExt};
pub struct LocalRegistry<'cfg> {
@ -70,7 +69,7 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult<FileLock> {
fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult<MaybeLock> {
let crate_file = format!("{}-{}.crate", pkg.name(), pkg.version());
let mut crate_file = self.root.open_ro(&crate_file, self.config, "crate file")?;
@ -78,7 +77,7 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
// checksum below as it is in theory already verified.
let dst = format!("{}-{}", pkg.name(), pkg.version());
if self.src_path.join(dst).into_path_unlocked().exists() {
return Ok(crate_file);
return Ok(MaybeLock::Ready(crate_file));
}
self.config.shell().status("Unpacking", pkg)?;
@ -102,6 +101,12 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
crate_file.seek(SeekFrom::Start(0))?;
Ok(crate_file)
Ok(MaybeLock::Ready(crate_file))
}
fn finish_download(&mut self, _pkg: &PackageId, _checksum: &str, _data: &[u8])
-> CargoResult<FileLock>
{
panic!("this source doesn't download")
}
}

View File

@ -170,6 +170,7 @@ use serde_json;
use tar::Archive;
use core::dependency::{Dependency, Kind};
use core::source::MaybePackage;
use core::{Package, PackageId, Source, SourceId, Summary};
use sources::PathSource;
use util::errors::CargoResultExt;
@ -347,13 +348,20 @@ pub trait RegistryData {
) -> CargoResult<()>;
fn config(&mut self) -> CargoResult<Option<RegistryConfig>>;
fn update_index(&mut self) -> CargoResult<()>;
fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult<FileLock>;
fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult<MaybeLock>;
fn finish_download(&mut self, pkg: &PackageId, checksum: &str, data: &[u8])
-> CargoResult<FileLock>;
fn is_crate_downloaded(&self, _pkg: &PackageId) -> bool {
true
}
}
pub enum MaybeLock {
Ready(FileLock),
Download { url: String, descriptor: String }
}
mod index;
mod local;
mod remote;
@ -462,6 +470,34 @@ impl<'cfg> RegistrySource<'cfg> {
index::RegistryIndex::new(&self.source_id, path, self.config, self.index_locked);
Ok(())
}
fn get_pkg(&mut self, package: &PackageId, path: FileLock) -> CargoResult<Package> {
let path = self
.unpack_package(package, &path)
.chain_err(|| internal(format!("failed to unpack package `{}`", package)))?;
let mut src = PathSource::new(&path, &self.source_id, self.config);
src.update()?;
let pkg = match src.download(package)? {
MaybePackage::Ready(pkg) => pkg,
MaybePackage::Download { .. } => unreachable!(),
};
// Unfortunately the index and the actual Cargo.toml in the index can
// differ due to historical Cargo bugs. To paper over these we trash the
// *summary* loaded from the Cargo.toml we just downloaded with the one
// we loaded from the index.
let summaries = self
.index
.summaries(package.name().as_str(), &mut *self.ops)?;
let summary = summaries
.iter()
.map(|s| &s.0)
.find(|s| s.package_id() == package)
.expect("summary not found");
let mut manifest = pkg.manifest().clone();
manifest.set_summary(summary.clone());
Ok(Package::new(manifest, pkg.manifest_path()))
}
}
impl<'cfg> Source for RegistrySource<'cfg> {
@ -526,31 +562,24 @@ impl<'cfg> Source for RegistrySource<'cfg> {
Ok(())
}
fn download(&mut self, package: &PackageId) -> CargoResult<Package> {
fn download(&mut self, package: &PackageId) -> CargoResult<MaybePackage> {
let hash = self.index.hash(package, &mut *self.ops)?;
let path = self.ops.download(package, &hash)?;
let path = self
.unpack_package(package, &path)
.chain_err(|| internal(format!("failed to unpack package `{}`", package)))?;
let mut src = PathSource::new(&path, &self.source_id, self.config);
src.update()?;
let pkg = src.download(package)?;
match self.ops.download(package, &hash)? {
MaybeLock::Ready(file) => {
self.get_pkg(package, file).map(MaybePackage::Ready)
}
MaybeLock::Download { url, descriptor } => {
Ok(MaybePackage::Download { url, descriptor })
}
}
}
// Unfortunately the index and the actual Cargo.toml in the index can
// differ due to historical Cargo bugs. To paper over these we trash the
// *summary* loaded from the Cargo.toml we just downloaded with the one
// we loaded from the index.
let summaries = self
.index
.summaries(package.name().as_str(), &mut *self.ops)?;
let summary = summaries
.iter()
.map(|s| &s.0)
.find(|s| s.package_id() == package)
.expect("summary not found");
let mut manifest = pkg.manifest().clone();
manifest.set_summary(summary.clone());
Ok(Package::new(manifest, pkg.manifest_path()))
fn finish_download(&mut self, package: &PackageId, data: Vec<u8>)
-> CargoResult<Package>
{
let hash = self.index.hash(package, &mut *self.ops)?;
let file = self.ops.finish_download(package, &hash, &data)?;
self.get_pkg(package, file)
}
fn fingerprint(&self, pkg: &Package) -> CargoResult<String> {

View File

@ -14,10 +14,10 @@ use lazycell::LazyCell;
use core::{PackageId, SourceId};
use sources::git;
use sources::registry::{RegistryConfig, RegistryData, CRATE_TEMPLATE, INDEX_LOCK, VERSION_TEMPLATE};
use util::network;
use sources::registry::MaybeLock;
use util::{FileLock, Filesystem};
use util::{Config, Progress, Sha256, ToUrl};
use util::errors::{CargoResult, CargoResultExt, HttpNot200};
use util::{Config, Sha256};
use util::errors::{CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
@ -122,6 +122,10 @@ impl<'cfg> RemoteRegistry<'cfg> {
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
fn filename(&self, pkg: &PackageId) -> String {
format!("{}-{}.crate", pkg.name(), pkg.version())
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
@ -206,9 +210,8 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
fn download(&mut self, pkg: &PackageId, _checksum: &str) -> CargoResult<MaybeLock> {
let filename = self.filename(pkg);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
@ -216,18 +219,12 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
if let Ok(dst) = self.cache_path.open_ro(&filename, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst);
return Ok(MaybeLock::Ready(dst));
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst);
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.clone();
@ -235,56 +232,29 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
write!(url, "/{}/{}/download", CRATE_TEMPLATE, VERSION_TEMPLATE).unwrap();
}
let url = url.replace(CRATE_TEMPLATE, &*pkg.name())
.replace(VERSION_TEMPLATE, &pkg.version().to_string())
.to_url()?;
.replace(VERSION_TEMPLATE, &pkg.version().to_string());
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.config.http()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
let mut pb = Progress::new("Fetch", self.config);
Ok(MaybeLock::Download { url, descriptor: pkg.to_string() })
}
fn finish_download(&mut self, pkg: &PackageId, checksum: &str, data: &[u8])
-> CargoResult<FileLock>
{
handle.progress(true)?;
let mut handle = handle.transfer();
handle.progress_function(|dl_total, dl_cur, _, _| {
pb.tick(dl_cur as usize, dl_total as usize).is_ok()
})?;
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform().chain_err(|| {
format!("failed to download from `{}`", url)
})?;
}
let code = handle.response_code()?;
if code != 200 && code != 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(HttpNot200 {
code,
url: url.to_string(),
}.into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
let mut state = Sha256::new();
state.update(data);
if hex::encode(state.finish()) != checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
let filename = self.filename(pkg);
let mut dst = self.cache_path.open_rw(&filename, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst);
}
dst.write_all(data)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}

View File

@ -1,4 +1,5 @@
use core::{Dependency, Package, PackageId, Source, SourceId, Summary};
use core::source::MaybePackage;
use util::errors::{CargoResult, CargoResultExt};
pub struct ReplacedSource<'cfg> {
@ -71,11 +72,26 @@ impl<'cfg> Source for ReplacedSource<'cfg> {
Ok(())
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
fn download(&mut self, id: &PackageId) -> CargoResult<MaybePackage> {
let id = id.with_source_id(&self.replace_with);
let pkg = self.inner
.download(&id)
.chain_err(|| format!("failed to download replaced source {}", self.to_replace))?;
Ok(match pkg {
MaybePackage::Ready(pkg) => {
MaybePackage::Ready(pkg.map_source(&self.replace_with, &self.to_replace))
}
other @ MaybePackage::Download { .. } => other,
})
}
fn finish_download(&mut self, id: &PackageId, data: Vec<u8>)
-> CargoResult<Package>
{
let id = id.with_source_id(&self.replace_with);
let pkg = self.inner
.finish_download(&id, data)
.chain_err(|| format!("failed to download replaced source {}", self.to_replace))?;
Ok(pkg.map_source(&self.replace_with, &self.to_replace))
}

View File

@ -1,3 +1,5 @@
use std::time::Duration;
pub use self::cfg::{Cfg, CfgExpr};
pub use self::config::{homedir, Config, ConfigValue};
pub use self::dependency_queue::{DependencyQueue, Dirty, Fresh, Freshness};
@ -46,3 +48,13 @@ mod read2;
mod progress;
mod lockserver;
pub mod diagnostic_server;
pub fn elapsed(duration: Duration) -> String {
let secs = duration.as_secs();
if secs >= 60 {
format!("{}m {:02}s", secs / 60, secs % 60)
} else {
format!("{}.{:02}s", secs, duration.subsec_nanos() / 10_000_000)
}
}

View File

@ -6,6 +6,38 @@ use failure::Error;
use util::Config;
use util::errors::{CargoResult, HttpNot200};
pub struct Retry<'a> {
config: &'a Config,
remaining: u32,
}
impl<'a> Retry<'a> {
pub fn new(config: &'a Config) -> CargoResult<Retry<'a>> {
Ok(Retry {
config,
remaining: config.get::<Option<u32>>("net.retry")?.unwrap_or(2),
})
}
pub fn try<T>(&mut self, f: impl FnOnce() -> CargoResult<T>)
-> CargoResult<Option<T>>
{
match f() {
Err(ref e) if maybe_spurious(e) && self.remaining > 0 => {
let msg = format!(
"spurious network error ({} tries \
remaining): {}",
self.remaining, e
);
self.config.shell().warn(msg)?;
self.remaining -= 1;
Ok(None)
}
other => other.map(Some),
}
}
}
fn maybe_spurious(err: &Error) -> bool {
for e in err.iter_chain() {
if let Some(git_err) = e.downcast_ref::<git2::Error>() {
@ -48,21 +80,10 @@ pub fn with_retry<T, F>(config: &Config, mut callback: F) -> CargoResult<T>
where
F: FnMut() -> CargoResult<T>,
{
let mut remaining = config.get::<Option<u32>>("net.retry")?.unwrap_or(2);
let mut retry = Retry::new(config)?;
loop {
match callback() {
Ok(ret) => return Ok(ret),
Err(ref e) if maybe_spurious(e) && remaining > 0 => {
let msg = format!(
"spurious network error ({} tries \
remaining): {}",
remaining, e
);
config.shell().warn(msg)?;
remaining -= 1;
}
//todo impl from
Err(e) => return Err(e),
if let Some(ret) = retry.try(&mut callback)? {
return Ok(ret)
}
}
}

View File

@ -16,13 +16,17 @@ pub enum ProgressStyle {
Ratio,
}
struct Throttle {
first: bool,
last_update: Instant,
}
struct State<'cfg> {
config: &'cfg Config,
format: Format,
first: bool,
last_update: Instant,
name: String,
done: bool,
throttle: Throttle,
}
struct Format {
@ -50,10 +54,9 @@ impl<'cfg> Progress<'cfg> {
max_width: n,
max_print: 80,
},
first: true,
last_update: Instant::now(),
name: name.to_string(),
done: false,
throttle: Throttle::new(),
}),
}
}
@ -62,36 +65,19 @@ impl<'cfg> Progress<'cfg> {
self.state = None;
}
pub fn is_enabled(&self) -> bool {
self.state.is_some()
}
pub fn new(name: &str, cfg: &'cfg Config) -> Progress<'cfg> {
Self::with_style(name, ProgressStyle::Percentage, cfg)
}
pub fn tick(&mut self, cur: usize, max: usize) -> CargoResult<()> {
match self.state {
Some(ref mut s) => s.tick(cur, max, "", true),
None => Ok(()),
}
}
pub fn clear(&mut self) {
if let Some(ref mut s) = self.state {
s.clear();
}
}
pub fn tick_now(&mut self, cur: usize, max: usize, msg: &str) -> CargoResult<()> {
match self.state {
Some(ref mut s) => s.tick(cur, max, msg, false),
None => Ok(()),
}
}
}
impl<'cfg> State<'cfg> {
fn tick(&mut self, cur: usize, max: usize, msg: &str, throttle: bool) -> CargoResult<()> {
if self.done {
return Ok(());
}
let s = match &mut self.state {
Some(s) => s,
None => return Ok(()),
};
// Don't update too often as it can cause excessive performance loss
// just putting stuff onto the terminal. We also want to avoid
@ -105,36 +91,110 @@ impl<'cfg> State<'cfg> {
// 2. If we've drawn something, then we rate limit ourselves to only
// draw to the console every so often. Currently there's a 100ms
// delay between updates.
if throttle {
if !s.throttle.allowed() {
return Ok(())
}
s.tick(cur, max, "")
}
pub fn tick_now(&mut self, cur: usize, max: usize, msg: &str) -> CargoResult<()> {
match self.state {
Some(ref mut s) => s.tick(cur, max, msg),
None => Ok(()),
}
}
pub fn update_allowed(&mut self) -> bool {
match &mut self.state {
Some(s) => s.throttle.allowed(),
None => false,
}
}
pub fn print_now(&mut self, msg: &str) -> CargoResult<()> {
match &mut self.state {
Some(s) => s.print("", msg),
None => Ok(()),
}
}
pub fn clear(&mut self) {
if let Some(ref mut s) = self.state {
s.clear();
}
}
}
impl Throttle {
fn new() -> Throttle {
Throttle {
first: true,
last_update: Instant::now(),
}
}
fn allowed(&mut self) -> bool {
if self.first {
let delay = Duration::from_millis(500);
if self.last_update.elapsed() < delay {
return Ok(());
return false
}
self.first = false;
} else {
let interval = Duration::from_millis(100);
if self.last_update.elapsed() < interval {
return Ok(());
return false
}
}
self.last_update = Instant::now();
self.update();
true
}
if cur == max {
fn update(&mut self) {
self.first = false;
self.last_update = Instant::now();
}
}
impl<'cfg> State<'cfg> {
fn tick(&mut self, cur: usize, max: usize, msg: &str) -> CargoResult<()> {
if self.done {
return Ok(());
}
if max > 0 && cur == max {
self.done = true;
}
// Write out a pretty header, then the progress bar itself, and then
// return back to the beginning of the line for the next print.
self.try_update_max_width();
if let Some(string) = self.format.progress_status(cur, max, msg) {
self.config.shell().status_header(&self.name)?;
write!(self.config.shell().err(), "{}\r", string)?;
if let Some(pbar) = self.format.progress(cur, max) {
self.print(&pbar, msg)?;
}
Ok(())
}
fn print(&mut self, prefix: &str, msg: &str) -> CargoResult<()> {
self.throttle.update();
self.try_update_max_width();
// make sure we have enough room for the header
if self.format.max_width < 15 {
return Ok(())
}
self.config.shell().status_header(&self.name)?;
let mut line = prefix.to_string();
self.format.render(&mut line, msg);
while line.len() < self.format.max_width - 15 {
line.push(' ');
}
write!(self.config.shell().err(), "{}\r", line)?;
Ok(())
}
fn clear(&mut self) {
self.try_update_max_width();
let blank = " ".repeat(self.format.max_width);
@ -149,7 +209,7 @@ impl<'cfg> State<'cfg> {
}
impl Format {
fn progress_status(&self, cur: usize, max: usize, msg: &str) -> Option<String> {
fn progress(&self, cur: usize, max: usize) -> Option<String> {
// Render the percentage at the far right and then figure how long the
// progress bar is
let pct = (cur as f64) / (max as f64);
@ -188,9 +248,15 @@ impl Format {
string.push_str("]");
string.push_str(&stats);
let mut avail_msg_len = self.max_width - self.width();
Some(string)
}
fn render(&self, string: &mut String, msg: &str) {
let mut avail_msg_len = self.max_width - string.len() - 15;
let mut ellipsis_pos = 0;
if avail_msg_len > 3 {
if avail_msg_len <= 3 {
return
}
for c in msg.chars() {
let display_width = c.width().unwrap_or(0);
if avail_msg_len >= display_width {
@ -207,7 +273,11 @@ impl Format {
}
}
Some(string)
#[cfg(test)]
fn progress_status(&self, cur: usize, max: usize, msg: &str) -> Option<String> {
let mut ret = self.progress(cur, max)?;
self.render(&mut ret, msg);
Some(ret)
}
fn width(&self) -> usize {

View File

@ -101,6 +101,7 @@ timeout = 30 # Timeout for each HTTP request, in seconds
cainfo = "cert.pem" # Path to Certificate Authority (CA) bundle (optional)
check-revoke = true # Indicates whether SSL certs are checked for revocation
low-speed-limit = 5 # Lower threshold for bytes/sec (10 = default, 0 = disabled)
multiplexing = false # whether or not to use HTTP/2 multiplexing where possible
[build]
jobs = 1 # number of parallel jobs, defaults to # of CPUs

View File

@ -57,7 +57,8 @@ fn depend_on_alt_registry() {
.with_stderr(&format!(
"\
[UPDATING] `{reg}` index
[DOWNLOADING] bar v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -110,8 +111,9 @@ fn depend_on_alt_registry_depends_on_same_registry_no_index() {
.with_stderr(&format!(
"\
[UPDATING] `{reg}` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] baz v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] foo v0.0.1 ([CWD])
@ -152,8 +154,9 @@ fn depend_on_alt_registry_depends_on_same_registry() {
.with_stderr(&format!(
"\
[UPDATING] `{reg}` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] baz v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] foo v0.0.1 ([CWD])
@ -195,8 +198,9 @@ fn depend_on_alt_registry_depends_on_crates_io() {
"\
[UPDATING] `{alt_reg}` index
[UPDATING] `{reg}` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] baz v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] foo v0.0.1 ([CWD])
@ -363,8 +367,8 @@ fn alt_registry_and_crates_io_deps() {
)).with_stderr_contains(&format!(
"[UPDATING] `{}` index",
registry::registry_path().to_str().unwrap()))
.with_stderr_contains("[DOWNLOADING] crates_io_dep v0.0.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADING] alt_reg_dep v0.1.0 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADED] crates_io_dep v0.0.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADED] alt_reg_dep v0.1.0 (registry `[ROOT][..]`)")
.with_stderr_contains("[COMPILING] alt_reg_dep v0.1.0 (registry `[ROOT][..]`)")
.with_stderr_contains("[COMPILING] crates_io_dep v0.0.1")
.with_stderr_contains("[COMPILING] foo v0.0.1 ([CWD])")

View File

@ -3569,11 +3569,12 @@ fn build_all_member_dependency_same_name() {
p.cargo("build --all")
.with_stderr(
"[..] Updating `[..]` index\n\
[..] Downloading a v0.1.0 ([..])\n\
[..] Compiling a v0.1.0\n\
[..] Compiling a v0.1.0 ([..])\n\
[..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n",
"[UPDATING] `[..]` index\n\
[DOWNLOADING] crates ...\n\
[DOWNLOADED] a v0.1.0 ([..])\n\
[COMPILING] a v0.1.0\n\
[COMPILING] a v0.1.0 ([..])\n\
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n",
).run();
}

View File

@ -2707,7 +2707,8 @@ fn warnings_hidden_for_upstream() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 ([..])
[COMPILING] bar v0.1.0
[RUNNING] `rustc [..]`
[RUNNING] `[..]`
@ -2761,7 +2762,8 @@ fn warnings_printed_on_vv() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 ([..])
[COMPILING] bar v0.1.0
[RUNNING] `rustc [..]`
[RUNNING] `[..]`

View File

@ -223,8 +223,9 @@ fn works_through_the_registry() {
.with_stderr(
"\
[UPDATING] [..] index
[DOWNLOADING] [..]
[DOWNLOADING] [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
[DOWNLOADED] [..]
[COMPILING] baz v0.1.0
[COMPILING] bar v0.1.0
[COMPILING] foo v0.0.1 ([..])
@ -267,7 +268,8 @@ fn ignore_version_from_other_platform() {
.with_stderr(
"\
[UPDATING] [..] index
[DOWNLOADING] [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
[COMPILING] bar v0.1.0
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]

View File

@ -331,7 +331,8 @@ fn crates_io_then_directory() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 ([..])
[COMPILING] bar v0.1.0
[COMPILING] foo v0.1.0 ([CWD])
[FINISHED] [..]

View File

@ -53,8 +53,8 @@ fn fetch_all_platform_dependencies_when_no_target_is_given() {
.build();
p.cargo("fetch")
.with_stderr_contains("[..] Downloading d1 v1.2.3 [..]")
.with_stderr_contains("[..] Downloading d2 v0.1.2 [..]")
.with_stderr_contains("[DOWNLOADED] d1 v1.2.3 [..]")
.with_stderr_contains("[DOWNLOADED] d2 v0.1.2 [..]")
.run();
}
@ -100,13 +100,13 @@ fn fetch_platform_specific_dependencies() {
p.cargo("fetch --target")
.arg(&host)
.with_stderr_contains("[..] Downloading d1 v1.2.3 [..]")
.with_stderr_does_not_contain("[..] Downloading d2 v0.1.2 [..]")
.with_stderr_contains("[DOWNLOADED] d1 v1.2.3 [..]")
.with_stderr_does_not_contain("[DOWNLOADED] d2 v0.1.2 [..]")
.run();
p.cargo("fetch --target")
.arg(&target)
.with_stderr_contains("[..] Downloading d2 v0.1.2[..]")
.with_stderr_does_not_contain("[..] Downloading d1 v1.2.3 [..]")
.with_stderr_contains("[DOWNLOADED] d2 v0.1.2[..]")
.with_stderr_does_not_contain("[DOWNLOADED] d1 v1.2.3 [..]")
.run();
}

View File

@ -2359,8 +2359,8 @@ fn include_overrides_gitignore() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] filetime [..]
[DOWNLOADING] libc [..]
[DOWNLOADED] filetime [..]
[DOWNLOADED] libc [..]
[COMPILING] libc [..]
[RUNNING] `rustc --crate-name libc [..]`
[COMPILING] filetime [..]

View File

@ -27,7 +27,8 @@ fn simple() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] foo v0.0.1 (registry [..])
[DOWNLOADING] crates ...
[DOWNLOADED] foo v0.0.1 (registry [..])
[INSTALLING] foo v0.0.1
[COMPILING] foo v0.0.1
[FINISHED] release [optimized] target(s) in [..]
@ -53,12 +54,14 @@ fn multiple_pkgs() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] foo v0.0.1 (registry `[CWD]/registry`)
[DOWNLOADING] crates ...
[DOWNLOADED] foo v0.0.1 (registry `[CWD]/registry`)
[INSTALLING] foo v0.0.1
[COMPILING] foo v0.0.1
[FINISHED] release [optimized] target(s) in [..]
[INSTALLING] [CWD]/home/.cargo/bin/foo[EXE]
[DOWNLOADING] bar v0.0.2 (registry `[CWD]/registry`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.2 (registry `[CWD]/registry`)
[INSTALLING] bar v0.0.2
[COMPILING] bar v0.0.2
[FINISHED] release [optimized] target(s) in [..]
@ -97,7 +100,8 @@ fn pick_max_version() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] foo v0.2.1 (registry [..])
[DOWNLOADING] crates ...
[DOWNLOADED] foo v0.2.1 (registry [..])
[INSTALLING] foo v0.2.1
[COMPILING] foo v0.2.1
[FINISHED] release [optimized] target(s) in [..]
@ -1004,7 +1008,7 @@ fn vers_precise() {
pkg("foo", "0.1.2");
cargo_process("install foo --vers 0.1.1")
.with_stderr_contains("[DOWNLOADING] foo v0.1.1 (registry [..])")
.with_stderr_contains("[DOWNLOADED] foo v0.1.1 (registry [..])")
.run();
}
@ -1014,7 +1018,7 @@ fn version_too() {
pkg("foo", "0.1.2");
cargo_process("install foo --version 0.1.1")
.with_stderr_contains("[DOWNLOADING] foo v0.1.1 (registry [..])")
.with_stderr_contains("[DOWNLOADED] foo v0.1.1 (registry [..])")
.run();
}

View File

@ -185,7 +185,8 @@ fn transitive() {
"\
[UPDATING] `[ROOT][..]` index
[UPDATING] git repository `[..]`
[DOWNLOADING] baz v0.2.0 (registry [..])
[DOWNLOADING] crates ...
[DOWNLOADED] baz v0.2.0 (registry [..])
[COMPILING] bar v0.1.0 (file://[..])
[COMPILING] baz v0.2.0
[COMPILING] foo v0.0.1 ([CWD])
@ -338,8 +339,9 @@ fn use_a_spec_to_select() {
"\
[UPDATING] `[ROOT][..]` index
[UPDATING] git repository `[..]`
[DOWNLOADING] [..]
[DOWNLOADING] [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
[DOWNLOADED] [..]
[COMPILING] [..]
[COMPILING] [..]
[COMPILING] [..]
@ -395,7 +397,8 @@ fn override_adds_some_deps() {
"\
[UPDATING] `[ROOT][..]` index
[UPDATING] git repository `[..]`
[DOWNLOADING] baz v0.1.1 (registry [..])
[DOWNLOADING] crates ...
[DOWNLOADED] baz v0.1.1 (registry [..])
[COMPILING] baz v0.1.1
[COMPILING] bar v0.1.0 ([..])
[COMPILING] foo v0.0.1 ([CWD])
@ -832,7 +835,8 @@ documented online at the url below for more information.
https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#overriding-dependencies
[DOWNLOADING] [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
[COMPILING] [..]
[COMPILING] [..]
[COMPILING] [..]

View File

@ -51,7 +51,8 @@ fn replace() {
.with_stderr(
"\
[UPDATING] `[ROOT][..]` index
[DOWNLOADING] baz v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] baz v0.1.0 ([..])
[COMPILING] bar v0.1.0 ([CWD]/bar)
[COMPILING] baz v0.1.0
[COMPILING] foo v0.0.1 ([CWD])
@ -217,7 +218,8 @@ fn unused() {
.with_stderr(
"\
[UPDATING] `[ROOT][..]` index
[DOWNLOADING] bar v0.1.0 [..]
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 [..]
[COMPILING] bar v0.1.0
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -275,7 +277,8 @@ fn unused_git() {
"\
[UPDATING] git repository `file://[..]`
[UPDATING] `[ROOT][..]` index
[DOWNLOADING] bar v0.1.0 [..]
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 [..]
[COMPILING] bar v0.1.0
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -309,7 +312,8 @@ fn add_patch() {
.with_stderr(
"\
[UPDATING] `[ROOT][..]` index
[DOWNLOADING] bar v0.1.0 [..]
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 [..]
[COMPILING] bar v0.1.0
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -368,7 +372,8 @@ fn add_ignored_patch() {
.with_stderr(
"\
[UPDATING] `[ROOT][..]` index
[DOWNLOADING] bar v0.1.0 [..]
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 [..]
[COMPILING] bar v0.1.0
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -535,7 +540,8 @@ fn new_major() {
.with_stderr(
"\
[UPDATING] `[ROOT][..]` index
[DOWNLOADING] bar v0.2.0 [..]
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.2.0 [..]
[COMPILING] bar v0.2.0
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]

View File

@ -40,7 +40,8 @@ fn simple() {
.with_stderr(&format!(
"\
[UPDATING] `{reg}` index
[DOWNLOADING] bar v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -85,8 +86,9 @@ fn deps() {
.with_stderr(&format!(
"\
[UPDATING] `{reg}` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] baz v0.0.1
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
@ -265,11 +267,9 @@ fn bad_cksum() {
.with_stderr(
"\
[UPDATING] [..] index
[DOWNLOADING] bad-cksum [..]
[ERROR] unable to get packages from source
Caused by:
failed to download replaced source registry `https://[..]`
[DOWNLOADING] crates ...
[DOWNLOADED] bad-cksum [..]
[ERROR] failed to download replaced source registry `https://[..]`
Caused by:
failed to verify the checksum of `bad-cksum v0.0.1 (registry `[ROOT][..]`)`
@ -312,7 +312,8 @@ required by package `foo v0.0.1 ([..])`
.with_stderr(format!(
"\
[UPDATING] `{reg}` index
[DOWNLOADING] notyet v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] notyet v0.0.1 (registry `[ROOT][..]`)
[COMPILING] notyet v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -367,7 +368,8 @@ required by package `foo v0.0.1 ([..])`
[PACKAGING] foo v0.0.1 ([CWD])
[VERIFYING] foo v0.0.1 ([CWD])
[UPDATING] `[..]` index
[DOWNLOADING] notyet v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] notyet v0.0.1 (registry `[ROOT][..]`)
[COMPILING] notyet v0.0.1
[COMPILING] foo v0.0.1 ([CWD][..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -398,7 +400,8 @@ fn lockfile_locks() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -435,8 +438,9 @@ fn lockfile_locks_transitively() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] baz v0.0.1
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
@ -480,8 +484,9 @@ fn yanks_are_not_used() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] baz v0.0.1
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
@ -587,7 +592,8 @@ fn update_with_lockfile_if_packages_missing() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.1 (registry `[ROOT][..]`)
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
",
).run();
@ -630,7 +636,8 @@ fn update_lockfile() {
p.cargo("build")
.with_stderr(
"\
[DOWNLOADING] [..] v0.0.2 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.2 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.2
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -650,7 +657,8 @@ fn update_lockfile() {
p.cargo("build")
.with_stderr(
"\
[DOWNLOADING] [..] v0.0.3 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.3 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.3
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -728,7 +736,8 @@ fn dev_dependency_not_used() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] [..] v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] [..] v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -812,7 +821,8 @@ fn updating_a_dep() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.1 (registry `[ROOT][..]`)
[COMPILING] bar v0.0.1
[COMPILING] a v0.0.1 ([CWD]/a)
[COMPILING] foo v0.0.1 ([CWD])
@ -838,7 +848,8 @@ fn updating_a_dep() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.1.0 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.1.0 (registry `[ROOT][..]`)
[COMPILING] bar v0.1.0
[COMPILING] a v0.0.1 ([CWD]/a)
[COMPILING] foo v0.0.1 ([CWD])
@ -892,7 +903,8 @@ fn git_and_registry_dep() {
"\
[UPDATING] [..]
[UPDATING] [..]
[DOWNLOADING] a v0.0.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] a v0.0.1 (registry `[ROOT][..]`)
[COMPILING] a v0.0.1
[COMPILING] b v0.0.1 ([..])
[COMPILING] foo v0.0.1 ([CWD])
@ -965,7 +977,8 @@ fn update_publish_then_update() {
.with_stderr(
"\
[UPDATING] [..]
[DOWNLOADING] a v0.1.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] a v0.1.1 (registry `[ROOT][..]`)
[COMPILING] a v0.1.1
[COMPILING] foo v0.5.0 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -996,7 +1009,8 @@ fn fetch_downloads() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] a v0.1.0 (registry [..])
[DOWNLOADING] crates ...
[DOWNLOADED] a v0.1.0 (registry [..])
",
).run();
}
@ -1036,7 +1050,8 @@ fn update_transitive_dependency() {
p.cargo("build")
.with_stderr(
"\
[DOWNLOADING] b v0.1.1 (registry `[ROOT][..]`)
[DOWNLOADING] crates ...
[DOWNLOADED] b v0.1.1 (registry `[ROOT][..]`)
[COMPILING] b v0.1.1
[COMPILING] a v0.1.0
[COMPILING] foo v0.5.0 ([..])
@ -1139,9 +1154,9 @@ fn update_multiple_packages() {
).run();
p.cargo("build")
.with_stderr_contains("[DOWNLOADING] a v0.1.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADING] b v0.1.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADING] c v0.1.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADED] a v0.1.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADED] b v0.1.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[DOWNLOADED] c v0.1.1 (registry `[ROOT][..]`)")
.with_stderr_contains("[COMPILING] a v0.1.1")
.with_stderr_contains("[COMPILING] b v0.1.1")
.with_stderr_contains("[COMPILING] c v0.1.1")
@ -1266,7 +1281,8 @@ fn only_download_relevant() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] baz v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] baz v0.1.0 ([..])
[COMPILING] baz v0.1.0
[COMPILING] bar v0.5.0 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s
@ -1509,7 +1525,8 @@ update to a fixed version or contact the upstream maintainer about
this warning.
[UPDATING] [..]
[DOWNLOADING] [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
[COMPILING] [..]
[COMPILING] [..]
[FINISHED] [..]
@ -1554,7 +1571,8 @@ fn old_version_req_upstream() {
.with_stderr(
"\
[UPDATING] [..]
[DOWNLOADING] [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
warning: parsed version requirement `0.2*` is no longer valid
Previous versions of Cargo accepted this malformed requirement,
@ -1661,11 +1679,9 @@ fn bad_and_or_malicious_packages_rejected() {
.with_stderr(
"\
[UPDATING] [..]
[DOWNLOADING] [..]
error: unable to get packages from source
Caused by:
failed to download [..]
[DOWNLOADING] crates ...
[DOWNLOADED] [..]
error: failed to download [..]
Caused by:
failed to unpack [..]

View File

@ -248,7 +248,8 @@ fn rename_twice() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] foo v0.1.0 (registry [..])
[DOWNLOADING] crates ...
[DOWNLOADED] foo v0.1.0 (registry [..])
error: multiple dependencies listed for the same crate must all have the same \
name, but the dependency on `foo v0.1.0` is listed as having different names
",

View File

@ -1389,6 +1389,7 @@ fn substitute_macros(input: &str) -> String {
("[DOCTEST]", " Doc-tests"),
("[PACKAGING]", " Packaging"),
("[DOWNLOADING]", " Downloading"),
("[DOWNLOADED]", " Downloaded"),
("[UPLOADING]", " Uploading"),
("[VERIFYING]", " Verifying"),
("[ARCHIVING]", " Archiving"),

View File

@ -59,7 +59,8 @@ fn no_warning_on_success() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] bar v0.0.1 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] bar v0.0.1 ([..])
[COMPILING] bar v0.0.1
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -79,7 +80,7 @@ fn no_warning_on_bin_failure() {
.with_stderr_does_not_contain(&format!("[WARNING] {}", WARNING1))
.with_stderr_does_not_contain(&format!("[WARNING] {}", WARNING2))
.with_stderr_contains("[UPDATING] `[..]` index")
.with_stderr_contains("[DOWNLOADING] bar v0.0.1 ([..])")
.with_stderr_contains("[DOWNLOADED] bar v0.0.1 ([..])")
.with_stderr_contains("[COMPILING] bar v0.0.1")
.with_stderr_contains("[COMPILING] foo v0.0.1 ([..])")
.run();
@ -96,7 +97,7 @@ fn warning_on_lib_failure() {
.with_stderr_does_not_contain("hidden stderr")
.with_stderr_does_not_contain("[COMPILING] foo v0.0.1 ([..])")
.with_stderr_contains("[UPDATING] `[..]` index")
.with_stderr_contains("[DOWNLOADING] bar v0.0.1 ([..])")
.with_stderr_contains("[DOWNLOADED] bar v0.0.1 ([..])")
.with_stderr_contains("[COMPILING] bar v0.0.1")
.with_stderr_contains(&format!("[WARNING] {}", WARNING1))
.with_stderr_contains(&format!("[WARNING] {}", WARNING2))

View File

@ -557,7 +557,8 @@ fn share_dependencies() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] dep1 v0.1.3 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] dep1 v0.1.3 ([..])
[COMPILING] dep1 v0.1.3
[COMPILING] foo v0.1.0 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -600,7 +601,8 @@ fn fetch_fetches_all() {
.with_stderr(
"\
[UPDATING] `[..]` index
[DOWNLOADING] dep1 v0.1.3 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] dep1 v0.1.3 ([..])
",
).run();
}
@ -650,7 +652,8 @@ fn lock_works_for_everyone() {
p.cargo("build")
.with_stderr(
"\
[DOWNLOADING] dep2 v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] dep2 v0.1.0 ([..])
[COMPILING] dep2 v0.1.0
[COMPILING] foo v0.1.0 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
@ -661,7 +664,8 @@ fn lock_works_for_everyone() {
.cwd(p.root().join("bar"))
.with_stderr(
"\
[DOWNLOADING] dep1 v0.1.0 ([..])
[DOWNLOADING] crates ...
[DOWNLOADED] dep1 v0.1.0 ([..])
[COMPILING] dep1 v0.1.0
[COMPILING] bar v0.1.0 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]