Don't abort resolution on transitive updates

This commit is directed at fixing #4127, allowing the resolver to automatically
perform transitive updates when required. A few use casese and tagged links are
hanging off #4127 itself, but the crux of the issue happens when you either add
a dependency or update a version requirement in `Cargo.toml` which conflicts
with something listed in your `Cargo.lock`. In this case Cargo would previously
provide an obscure "cannot resolve" error whereas this commit updates Cargo to
automatically perform a conservative re-resolution of the dependency graph.

It's hoped that this commit will help reduce the number of "unresolvable"
dependency graphs we've seen in the wild and otherwise make Cargo a little more
ergonomic to use as well. More details can be found in the source's comments!

Closes #4127
Closes #5182
This commit is contained in:
Alex Crichton 2018-03-05 21:21:47 -08:00
parent 9659f56032
commit 51d235606a
15 changed files with 614 additions and 59 deletions

View File

@ -343,9 +343,8 @@ impl Dependency {
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches_ignoring_source(&self, sum: &Summary) -> bool {
self.name() == sum.package_id().name()
&& self.version_req().matches(sum.package_id().version())
pub fn matches_ignoring_source(&self, id: &PackageId) -> bool {
self.name() == id.name() && self.version_req().matches(id.version())
}
/// Returns true if the package (`id`) can fulfill this dependency request.

View File

@ -12,7 +12,6 @@ use lazycell::LazyCell;
use core::{Dependency, Manifest, PackageId, SourceId, Target};
use core::{SourceMap, Summary};
use core::interning::InternedString;
use ops;
use util::{internal, lev_distance, Config};
use util::errors::{CargoResult, CargoResultExt};

View File

@ -434,7 +434,7 @@ impl<'cfg> Registry for PackageRegistry<'cfg> {
patches.extend(
extra
.iter()
.filter(|s| dep.matches_ignoring_source(s))
.filter(|s| dep.matches_ignoring_source(s.package_id()))
.cloned(),
);
}

View File

@ -6,7 +6,7 @@ use serde::ser;
use serde::de;
use core::{Dependency, Package, PackageId, SourceId, Workspace};
use util::{internal, Config, Graph};
use util::{internal, Graph};
use util::errors::{CargoError, CargoResult, CargoResultExt};
use super::Resolve;
@ -188,7 +188,7 @@ impl EncodableResolve {
}
}
fn build_path_deps(ws: &Workspace) -> (HashMap<String, SourceId>) {
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a crate is *not* a path source, then we're probably in a situation
// such as `cargo install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not

View File

@ -384,10 +384,39 @@ struct Context {
type Activations = HashMap<(InternedString, SourceId), Rc<Vec<Summary>>>;
/// Builds the list of all packages required to build the first argument.
///
/// * `summaries` - the list of package summaries along with how to resolve
/// their features. This is a list of all top-level packages that are intended
/// to be part of the lock file (resolve output). These typically are a list
/// of all workspace members.
///
/// * `replacements` - this is a list of `[replace]` directives found in the
/// root of the workspace. The list here is a `PackageIdSpec` of what to
/// replace and a `Dependency` to replace that with. In general it's not
/// recommended to use `[replace]` any more and use `[patch]` instead, which
/// is supported elsewhere.
///
/// * `registry` - this is the source from which all package summaries are
/// loaded. It's expected that this is extensively configured ahead of time
/// and is idempotent with our requests to it (aka returns the same results
/// for the same query every time). Typically this is an instance of a
/// `PackageRegistry`.
///
/// * `try_to_use` - this is a list of package ids which were previously found
/// in the lock file. We heuristically prefer the ids listed in `try_to_use`
/// when sorting candidates to activate, but otherwise this isn't used
/// anywhere else.
///
/// * `config` - a location to print warnings and such, or `None` if no warnings
/// should be printed
///
/// * `print_warnings` - whether or not to print backwards-compatibility
/// warnings and such
pub fn resolve(
summaries: &[(Summary, Method)],
replacements: &[(PackageIdSpec, Dependency)],
registry: &mut Registry,
try_to_use: &[&PackageId],
config: Option<&Config>,
print_warnings: bool,
) -> CargoResult<Resolve> {
@ -400,12 +429,8 @@ pub fn resolve(
warnings: RcList::new(),
};
let _p = profile::start("resolving");
let cx = activate_deps_loop(
cx,
&mut RegistryQueryer::new(registry, replacements),
summaries,
config,
)?;
let mut registry = RegistryQueryer::new(registry, replacements, try_to_use);
let cx = activate_deps_loop(cx, &mut registry, summaries, config)?;
let mut resolve = Resolve {
graph: cx.graph(),
@ -637,16 +662,22 @@ impl ConflictReason {
struct RegistryQueryer<'a> {
registry: &'a mut (Registry + 'a),
replacements: &'a [(PackageIdSpec, Dependency)],
try_to_use: HashSet<&'a PackageId>,
// TODO: with nll the Rc can be removed
cache: HashMap<Dependency, Rc<Vec<Candidate>>>,
}
impl<'a> RegistryQueryer<'a> {
fn new(registry: &'a mut Registry, replacements: &'a [(PackageIdSpec, Dependency)]) -> Self {
fn new(
registry: &'a mut Registry,
replacements: &'a [(PackageIdSpec, Dependency)],
try_to_use: &'a [&'a PackageId],
) -> Self {
RegistryQueryer {
registry,
replacements,
cache: HashMap::new(),
try_to_use: try_to_use.iter().cloned().collect(),
}
}
@ -739,9 +770,17 @@ impl<'a> RegistryQueryer<'a> {
candidate.replace = replace;
}
// When we attempt versions for a package, we'll want to start at
// the maximum version and work our way down.
ret.sort_unstable_by(|a, b| b.summary.version().cmp(a.summary.version()));
// When we attempt versions for a package we'll want to do so in a
// sorted fashion to pick the "best candidates" first. Currently we try
// prioritized summaries (those in `try_to_use`) and failing that we
// list everything from the maximum version to the lowest version.
ret.sort_unstable_by(|a, b| {
let a_in_previous = self.try_to_use.contains(a.summary.package_id());
let b_in_previous = self.try_to_use.contains(b.summary.package_id());
let a = (a_in_previous, a.summary.version());
let b = (b_in_previous, b.summary.version());
a.cmp(&b).reverse()
});
let out = Rc::new(ret);

View File

@ -7,9 +7,11 @@ use std::slice;
use glob::glob;
use url::Url;
use core::{Dependency, PackageIdSpec, Profile, Profiles};
use core::registry::PackageRegistry;
use core::{EitherManifest, Package, SourceId, VirtualManifest};
use core::{Dependency, PackageIdSpec, Profile, Profiles};
use ops;
use sources::PathSource;
use util::errors::{CargoResult, CargoResultExt};
use util::paths;
use util::toml::read_manifest;
@ -70,6 +72,8 @@ pub struct Workspace<'cfg> {
// cases `false` also results in the non-enforcement of dev-dependencies.
require_optional_deps: bool,
// A cache of lodaed packages for particular paths which is disjoint from
// `packages` up above, used in the `load` method down below.
loaded_packages: RefCell<HashMap<PathBuf, Package>>,
}
@ -692,6 +696,37 @@ impl<'cfg> Workspace<'cfg> {
loaded.insert(manifest_path.to_path_buf(), package.clone());
Ok(package)
}
/// Preload the provided registry with already loaded packages.
///
/// A workspace may load packages during construction/parsing/early phases
/// for various operations, and this preload step avoids doubly-loading and
/// parsing crates on the filesystem by inserting them all into the registry
/// with their in-memory formats.
pub fn preload(&self, registry: &mut PackageRegistry<'cfg>) {
// These can get weird as this generally represents a workspace during
// `cargo install`. Things like git repositories will actually have a
// `PathSource` with multiple entries in it, so the logic below is
// mostly just an optimization for normal `cargo build` in workspaces
// during development.
if self.is_ephemeral {
return;
}
for pkg in self.packages.packages.values() {
let pkg = match *pkg {
MaybePackage::Package(ref p) => p.clone(),
MaybePackage::Virtual(_) => continue,
};
let mut src = PathSource::new(
pkg.manifest_path(),
pkg.package_id().source_id(),
self.config,
);
src.preload_with(pkg);
registry.add_preloaded(Box::new(src));
}
}
}
impl<'cfg> Packages<'cfg> {

View File

@ -1082,7 +1082,7 @@ fn build_deps_args<'a, 'cfg>(
.pkg
.dependencies()
.iter()
.filter(|d| d.matches_ignoring_source(dep.pkg.summary()))
.filter(|d| d.matches_ignoring_source(dep.pkg.package_id()))
.filter_map(|d| d.rename())
.next();

View File

@ -101,9 +101,9 @@ pub fn resolve_ws_with_method<'a>(
Ok((packages, resolved_with_overrides))
}
fn resolve_with_registry(
ws: &Workspace,
registry: &mut PackageRegistry,
fn resolve_with_registry<'cfg>(
ws: &Workspace<'cfg>,
registry: &mut PackageRegistry<'cfg>,
warn: bool,
) -> CargoResult<Resolve> {
let prev = ops::load_pkg_lockfile(ws)?;
@ -133,9 +133,9 @@ fn resolve_with_registry(
///
/// The previous resolve normally comes from a lockfile. This function does not
/// read or write lockfiles from the filesystem.
pub fn resolve_with_previous<'a>(
registry: &mut PackageRegistry,
ws: &Workspace,
pub fn resolve_with_previous<'a, 'cfg>(
registry: &mut PackageRegistry<'cfg>,
ws: &Workspace<'cfg>,
method: Method,
previous: Option<&'a Resolve>,
to_avoid: Option<&HashSet<&'a PackageId>>,
@ -169,31 +169,18 @@ pub fn resolve_with_previous<'a>(
// In the case where a previous instance of resolve is available, we
// want to lock as many packages as possible to the previous version
// without disturbing the graph structure. To this end we perform
// two actions here:
//
// 1. We inform the package registry of all locked packages. This
// involves informing it of both the locked package's id as well
// as the versions of all locked dependencies. The registry will
// then takes this information into account when it is queried.
//
// 2. The specified package's summary will have its dependencies
// modified to their precise variants. This will instruct the
// first step of the resolution process to not query for ranges
// but rather for precise dependency versions.
//
// This process must handle altered dependencies, however, as
// it's possible for a manifest to change over time to have
// dependencies added, removed, or modified to different version
// ranges. To deal with this, we only actually lock a dependency
// to the previously resolved version if the dependency listed
// still matches the locked version.
// without disturbing the graph structure.
let mut try_to_use = Vec::new();
if let Some(r) = previous {
trace!("previous: {:?}", r);
for node in r.iter().filter(keep) {
let deps = r.deps_not_replaced(node).filter(keep).cloned().collect();
registry.register_lock(node.clone(), deps);
}
register_previous_locks(ws, registry, r, keep);
// Everything in the previous lock file we want to keep is prioritized
// in dependency selection if it comes up, aka we want to have
// conservative updates.
try_to_use.extend(r.iter().filter(keep).inspect(|id| {
debug!("attempting to prefer {}", id);
}));
}
if register_patches {
@ -293,7 +280,15 @@ pub fn resolve_with_previous<'a>(
None => root_replace.to_vec(),
};
let mut resolved = resolver::resolve(&summaries, &replace, registry, Some(ws.config()), warn)?;
ws.preload(registry);
let mut resolved = resolver::resolve(
&summaries,
&replace,
registry,
&try_to_use,
Some(ws.config()),
warn,
)?;
resolved.register_used_patches(registry.patches());
if let Some(previous) = previous {
resolved.merge_from(previous)?;
@ -336,3 +331,145 @@ fn get_resolved_packages<'a>(resolve: &Resolve, registry: PackageRegistry<'a>) -
let ids: Vec<PackageId> = resolve.iter().cloned().collect();
registry.get(&ids)
}
/// In this function we're responsible for informing the `registry` of all
/// locked dependencies from the previous lock file we had, `resolve`.
///
/// This gets particularly tricky for a couple of reasons. The first is that we
/// want all updates to be conservative, so we actually want to take the
/// `resolve` into account (and avoid unnecessary registry updates and such).
/// the second, however, is that we want to be resilient to updates of
/// manifests. For example if a dependency is added or a version is changed we
/// want to make sure that we properly re-resolve (conservatively) instead of
/// providing an opaque error.
///
/// The logic here is somewhat subtle but there should be more comments below to
/// help out, and otherwise feel free to ask on IRC if there's questions!
///
/// Note that this function, at the time of this writing, is basically the
/// entire fix for #4127
fn register_previous_locks<'a>(
ws: &Workspace,
registry: &mut PackageRegistry,
resolve: &'a Resolve,
keep: &Fn(&&'a PackageId) -> bool,
) {
// Ok so we've been passed in a `keep` function which basically says "if I
// return true then this package wasn't listed for an update on the command
// line". AKA if we run `cargo update -p foo` then `keep(bar)` will return
// `true`, whereas `keep(foo)` will return `true` (roughly).
//
// This isn't actually quite what we want, however. Instead we want to
// further refine this `keep` function with *all transitive dependencies* of
// the packages we're not keeping. For example consider a case like this:
//
// * There's a crate `log`
// * There's a crate `serde` which depends on `log`
//
// Let's say we then run `cargo update -p serde`. This may *also* want to
// update the `log` dependency as our newer version of `serde` may have a
// new minimum version required for `log`. Now this isn't always guaranteed
// to work. What'll happen here is we *won't* lock the `log` dependency nor
// the `log` crate itself, but we will inform the registry "please prefer
// this version of `log`". That way if our newer version of serde works with
// the older version of `log`, we conservatively won't update `log`. If,
// however, nothing else in the dependency graph depends on `log` and the
// newer version of `serde` requires a new version of `log` it'll get pulled
// in (as we didn't accidentally lock it to an old version).
let mut avoid_locking = HashSet::new();
for node in resolve.iter() {
if !keep(&node) {
add_deps(resolve, node, &mut avoid_locking);
}
}
// Ok but the above loop isn't the entire story! Updates to the dependency
// graph can come from two locations, the `cargo update` command or
// manifests themselves. For example a manifest on the filesystem may
// have been updated to have an updated version requirement on `serde`. In
// this case both `keep(serde)` and `keep(log)` return `true` (the `keep`
// that's an argument to this function). We, however, don't want to keep
// either of those! Otherwise we'll get obscure resolve errors about locked
// versions.
//
// To solve this problem we iterate over all packages with path sources
// (aka ones with manifests that are changing) and take a look at all of
// their dependencies. If any dependency does not match something in the
// previous lock file, then we're guaranteed that the main resolver will
// update the source of this dependency no matter what. Knowing this we
// poison all packages from the same source, forcing them all to get
// updated.
//
// This may seem like a heavy hammer, and it is! It means that if you change
// anything from crates.io then all of crates.io becomes unlocked. Note,
// however, that we still want conservative updates. This currently happens
// because the first candidate the resolver picks is the previously locked
// version, and only if that fails to activate to we move on and try
// a different version. (giving the guise of conservative updates)
//
// For example let's say we had `serde = "0.1"` written in our lock file.
// When we later edit this to `serde = "0.1.3"` we don't want to lock serde
// at its old version, 0.1.1. Instead we want to allow it to update to
// `0.1.3` and update its own dependencies (like above). To do this *all
// crates from crates.io* are not locked (aka added to `avoid_locking`).
// For dependencies like `log` their previous version in the lock file will
// come up first before newer version, if newer version are available.
let mut path_deps = ws.members().cloned().collect::<Vec<_>>();
let mut visited = HashSet::new();
while let Some(member) = path_deps.pop() {
if !visited.insert(member.package_id().clone()) {
continue;
}
for dep in member.dependencies() {
let source = dep.source_id();
// If this is a path dependency then try to push it onto our
// worklist
if source.is_path() {
if let Ok(path) = source.url().to_file_path() {
if let Ok(pkg) = ws.load(&path.join("Cargo.toml")) {
path_deps.push(pkg);
}
continue;
}
}
// If we match *anything* in the dependency graph then we consider
// ourselves A-OK and assume that we'll resolve to that. If,
// however, nothing matches, then we poison the source of this
// dependencies and the previous lock file.
if resolve.iter().any(|id| dep.matches_ignoring_source(id)) {
continue;
}
for id in resolve.iter().filter(|id| id.source_id() == source) {
add_deps(resolve, id, &mut avoid_locking);
}
}
}
// Alright now that we've got our new, fresh, shiny, and refined `keep`
// function let's put it to action. Take a look at the previous lockfile,
// filter everything by this callback, and then shove everything else into
// the registry as a locked dependency.
let ref keep = |id: &&'a PackageId| keep(id) && !avoid_locking.contains(id);
for node in resolve.iter().filter(keep) {
let deps = resolve
.deps_not_replaced(node)
.filter(keep)
.cloned()
.collect();
registry.register_lock(node.clone(), deps);
}
/// recursively add `node` and all its transitive dependencies to `set`
fn add_deps<'a>(resolve: &'a Resolve, node: &'a PackageId, set: &mut HashSet<&'a PackageId>) {
if !set.insert(node) {
return;
}
debug!("ignoring any lock pointing directly at {}", node);
for dep in resolve.deps_not_replaced(node) {
add_deps(resolve, dep, set);
}
}
}

View File

@ -53,6 +53,14 @@ impl<'cfg> PathSource<'cfg> {
}
}
pub fn preload_with(&mut self, pkg: Package) {
assert!(!self.updated);
assert!(!self.recursive);
assert!(self.packages.is_empty());
self.updated = true;
self.packages.push(pkg);
}
pub fn root_package(&mut self) -> CargoResult<Package> {
trace!("root_package; source={:?}", self);

View File

@ -246,6 +246,18 @@ impl Project {
buffer
}
pub fn uncomment_root_manifest(&self) {
let mut contents = String::new();
fs::File::open(self.root().join("Cargo.toml"))
.unwrap()
.read_to_string(&mut contents)
.unwrap();
fs::File::create(self.root().join("Cargo.toml"))
.unwrap()
.write_all(contents.replace("#", "").as_bytes())
.unwrap();
}
fn get_lib_prefix(kind: &str) -> &str {
match kind {
"lib" | "rlib" => "lib",

View File

@ -81,6 +81,7 @@ mod search;
mod small_fd_limits;
mod test;
mod tool_paths;
mod update;
mod verify_project;
mod version;
mod warn_on_failure;

View File

@ -498,17 +498,22 @@ fn override_adds_some_deps() {
p.cargo("update")
.arg("-p")
.arg(&format!("{}#bar", foo.url())),
execs()
.with_status(0)
.with_stderr("[UPDATING] git repository `file://[..]`"),
execs().with_status(0).with_stderr(
"\
[UPDATING] git repository `file://[..]`
[UPDATING] registry `file://[..]`
",
),
);
assert_that(
p.cargo("update")
.arg("-p")
.arg("https://github.com/rust-lang/crates.io-index#bar"),
execs()
.with_status(0)
.with_stderr("[UPDATING] registry `file://[..]`"),
execs().with_status(0).with_stderr(
"\
[UPDATING] registry `file://[..]`
",
),
);
assert_that(p.cargo("build"), execs().with_status(0).with_stdout(""));

View File

@ -1149,9 +1149,13 @@ fn update_backtracking_ok() {
assert_that(
p.cargo("update").arg("-p").arg("hyper"),
execs()
.with_status(0)
.with_stderr("[UPDATING] registry `[..]`"),
execs().with_status(0).with_stderr(
"\
[UPDATING] registry `[..]`
[UPDATING] hyper v0.6.5 -> v0.6.6
[UPDATING] openssl v0.1.0 -> v0.1.1
",
),
);
}

View File

@ -35,7 +35,7 @@ fn resolve(
let mut registry = MyRegistry(registry);
let summary = Summary::new(pkg.clone(), deps, BTreeMap::new(), None).unwrap();
let method = Method::Everything;
let resolve = resolver::resolve(&[(summary, method)], &[], &mut registry, None, false)?;
let resolve = resolver::resolve(&[(summary, method)], &[], &mut registry, &[], None, false)?;
let res = resolve.iter().cloned().collect();
Ok(res)
}

316
tests/testsuite/update.rs Normal file
View File

@ -0,0 +1,316 @@
use std::fs::File;
use std::io::prelude::*;
use cargotest::support::{execs, project};
use cargotest::support::registry::Package;
use hamcrest::assert_that;
#[test]
fn minor_update_two_places() {
Package::new("log", "0.1.0").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1"
foo = { path = "foo" }
"#,
)
.file("src/lib.rs", "")
.file(
"foo/Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1"
"#,
)
.file("foo/src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
File::create(p.root().join("foo/Cargo.toml"))
.unwrap()
.write_all(
br#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1.1"
"#,
)
.unwrap();
assert_that(p.cargo("build"), execs().with_status(0));
}
#[test]
fn transitive_minor_update() {
Package::new("log", "0.1.0").publish();
Package::new("serde", "0.1.0").dep("log", "0.1").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[dependencies]
serde = "0.1"
log = "0.1"
foo = { path = "foo" }
"#,
)
.file("src/lib.rs", "")
.file(
"foo/Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
serde = "0.1"
"#,
)
.file("foo/src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
Package::new("serde", "0.1.1").dep("log", "0.1.1").publish();
// Note that `serde` isn't actually updated here! The default behavior for
// `update` right now is to as conservatively as possible attempt to satisfy
// an update. In this case we previously locked the dependency graph to `log
// 0.1.0`, but nothing on the command line says we're allowed to update
// that. As a result the update of `serde` here shouldn't update to `serde
// 0.1.1` as that would also force an update to `log 0.1.1`.
//
// Also note that this is probably counterintuitive and weird. We may wish
// to change this one day.
assert_that(
p.cargo("update").arg("-p").arg("serde"),
execs().with_status(0).with_stderr(
"\
[UPDATING] registry `[..]`
",
),
);
}
#[test]
fn conservative() {
Package::new("log", "0.1.0").publish();
Package::new("serde", "0.1.0").dep("log", "0.1").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[dependencies]
serde = "0.1"
log = "0.1"
foo = { path = "foo" }
"#,
)
.file("src/lib.rs", "")
.file(
"foo/Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
serde = "0.1"
"#,
)
.file("foo/src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
Package::new("serde", "0.1.1").dep("log", "0.1").publish();
assert_that(
p.cargo("update").arg("-p").arg("serde"),
execs().with_status(0).with_stderr(
"\
[UPDATING] registry `[..]`
[UPDATING] serde v0.1.0 -> v0.1.1
",
),
);
}
#[test]
fn update_via_new_dep() {
Package::new("log", "0.1.0").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1"
# foo = { path = "foo" }
"#,
)
.file("src/lib.rs", "")
.file(
"foo/Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1.1"
"#,
)
.file("foo/src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
p.uncomment_root_manifest();
assert_that(
p.cargo("build").env("RUST_LOG", "cargo=trace"),
execs().with_status(0),
);
}
#[test]
fn update_via_new_member() {
Package::new("log", "0.1.0").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[workspace]
# members = [ "foo" ]
[dependencies]
log = "0.1"
"#,
)
.file("src/lib.rs", "")
.file(
"foo/Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1.1"
"#,
)
.file("foo/src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
p.uncomment_root_manifest();
assert_that(p.cargo("build"), execs().with_status(0));
}
#[test]
fn add_dep_deep_new_requirement() {
Package::new("log", "0.1.0").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[dependencies]
log = "0.1"
# bar = "0.1"
"#,
)
.file("src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
Package::new("bar", "0.1.0").dep("log", "0.1.1").publish();
p.uncomment_root_manifest();
assert_that(p.cargo("build"), execs().with_status(0));
}
#[test]
fn everything_real_deep() {
Package::new("log", "0.1.0").publish();
Package::new("foo", "0.1.0").dep("log", "0.1").publish();
let p = project("foo")
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[dependencies]
foo = "0.1"
# bar = "0.1"
"#,
)
.file("src/lib.rs", "")
.build();
assert_that(p.cargo("build"), execs().with_status(0));
Package::new("log", "0.1.1").publish();
Package::new("bar", "0.1.0").dep("log", "0.1.1").publish();
p.uncomment_root_manifest();
assert_that(p.cargo("build"), execs().with_status(0));
}