Multi-package publishing

Co-authored-by: Tor Hovland <55164+torhovland@users.noreply.github.com>
Co-authored-by: Ed Page <eopage@gmail.com>
This commit is contained in:
Joe Neeman 2024-09-03 16:59:17 +07:00
parent 431d84a6bf
commit a016e5f5c2
6 changed files with 485 additions and 126 deletions

View File

@ -93,30 +93,6 @@ struct GitVcsInfo {
dirty: bool, dirty: bool,
} }
/// Packages a single package in a workspace, returning the resulting tar file.
///
/// # Panics
/// Panics if `opts.list` is true. In that case you probably don't want to
/// actually build the package tarball; you should just make and print the list
/// of files. (We don't currently provide a public API for that, but see how
/// [`package`] does it.)
pub fn package_one(
ws: &Workspace<'_>,
pkg: &Package,
opts: &PackageOpts<'_>,
) -> CargoResult<FileLock> {
assert!(!opts.list);
let ar_files = prepare_archive(ws, pkg, opts)?;
let tarball = create_package(ws, pkg, ar_files, None)?;
if opts.verify {
run_verify(ws, pkg, &tarball, None, opts)?;
}
Ok(tarball)
}
// Builds a tarball and places it in the output directory. // Builds a tarball and places it in the output directory.
fn create_package( fn create_package(
ws: &Workspace<'_>, ws: &Workspace<'_>,
@ -193,6 +169,34 @@ pub fn package(ws: &Workspace<'_>, opts: &PackageOpts<'_>) -> CargoResult<Vec<Fi
// So we need filter // So we need filter
pkgs.retain(|(pkg, _feats)| specs.iter().any(|spec| spec.matches(pkg.package_id()))); pkgs.retain(|(pkg, _feats)| specs.iter().any(|spec| spec.matches(pkg.package_id())));
Ok(do_package(ws, opts, pkgs)?
.into_iter()
.map(|x| x.2)
.collect())
}
/// Packages an entire workspace.
///
/// Returns the generated package files and the dependencies between them. If
/// `opts.list` is true, skips generating package files and returns an empty
/// list.
pub(crate) fn package_with_dep_graph(
ws: &Workspace<'_>,
opts: &PackageOpts<'_>,
pkgs: Vec<(&Package, CliFeatures)>,
) -> CargoResult<LocalDependencies<(CliFeatures, FileLock)>> {
let output = do_package(ws, opts, pkgs)?;
Ok(local_deps(output.into_iter().map(
|(pkg, opts, tarball)| (pkg, (opts.cli_features, tarball)),
)))
}
fn do_package<'a>(
ws: &Workspace<'_>,
opts: &PackageOpts<'a>,
pkgs: Vec<(&Package, CliFeatures)>,
) -> CargoResult<Vec<(Package, PackageOpts<'a>, FileLock)>> {
if ws if ws
.lock_root() .lock_root()
.as_path_unlocked() .as_path_unlocked()
@ -264,7 +268,7 @@ pub fn package(ws: &Workspace<'_>, opts: &PackageOpts<'_>) -> CargoResult<Vec<Fi
} }
} }
Ok(outputs.into_iter().map(|x| x.2).collect()) Ok(outputs)
} }
/// Determine which registry the packages are for. /// Determine which registry the packages are for.
@ -308,15 +312,14 @@ fn get_registry(
} }
/// Just the part of the dependency graph that's between the packages we're packaging. /// Just the part of the dependency graph that's between the packages we're packaging.
/// (Is the package name a good key? Does it uniquely identify packages?)
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
struct LocalDependencies { pub(crate) struct LocalDependencies<T> {
packages: HashMap<PackageId, (Package, CliFeatures)>, pub packages: HashMap<PackageId, (Package, T)>,
graph: Graph<PackageId, ()>, pub graph: Graph<PackageId, ()>,
} }
impl LocalDependencies { impl<T: Clone> LocalDependencies<T> {
fn sort(&self) -> Vec<(Package, CliFeatures)> { pub fn sort(&self) -> Vec<(Package, T)> {
self.graph self.graph
.sort() .sort()
.into_iter() .into_iter()
@ -335,9 +338,10 @@ impl LocalDependencies {
/// ignoring dev dependencies. /// ignoring dev dependencies.
/// ///
/// We assume that the packages all belong to this workspace. /// We assume that the packages all belong to this workspace.
fn local_deps(packages: impl Iterator<Item = (Package, CliFeatures)>) -> LocalDependencies { fn local_deps<T>(packages: impl Iterator<Item = (Package, T)>) -> LocalDependencies<T> {
let packages: HashMap<PackageId, (Package, CliFeatures)> = let packages: HashMap<PackageId, (Package, T)> = packages
packages.map(|pkg| (pkg.0.package_id(), pkg)).collect(); .map(|(pkg, payload)| (pkg.package_id(), (pkg, payload)))
.collect();
// Dependencies have source ids but not package ids. We draw an edge // Dependencies have source ids but not package ids. We draw an edge
// whenever a dependency's source id matches one of our packages. This is // whenever a dependency's source id matches one of our packages. This is
@ -349,7 +353,7 @@ fn local_deps(packages: impl Iterator<Item = (Package, CliFeatures)>) -> LocalDe
.collect(); .collect();
let mut graph = Graph::new(); let mut graph = Graph::new();
for (pkg, _features) in packages.values() { for (pkg, _payload) in packages.values() {
graph.add(pkg.package_id()); graph.add(pkg.package_id());
for dep in pkg.dependencies() { for dep in pkg.dependencies() {
// Ignore local dev-dependencies because they aren't needed for intra-workspace // Ignore local dev-dependencies because they aren't needed for intra-workspace

View File

@ -10,7 +10,7 @@ pub use self::cargo_fetch::{fetch, FetchOptions};
pub use self::cargo_install::{install, install_list}; pub use self::cargo_install::{install, install_list};
pub use self::cargo_new::{init, new, NewOptions, NewProjectKind, VersionControl}; pub use self::cargo_new::{init, new, NewOptions, NewProjectKind, VersionControl};
pub use self::cargo_output_metadata::{output_metadata, ExportInfo, OutputMetadataOptions}; pub use self::cargo_output_metadata::{output_metadata, ExportInfo, OutputMetadataOptions};
pub use self::cargo_package::{check_yanked, package, package_one, PackageOpts}; pub use self::cargo_package::{check_yanked, package, PackageOpts};
pub use self::cargo_pkgid::pkgid; pub use self::cargo_pkgid::pkgid;
pub use self::cargo_read_manifest::read_package; pub use self::cargo_read_manifest::read_package;
pub use self::cargo_run::run; pub use self::cargo_run::run;

View File

@ -3,8 +3,12 @@
//! [1]: https://doc.rust-lang.org/nightly/cargo/reference/registry-web-api.html#publish //! [1]: https://doc.rust-lang.org/nightly/cargo/reference/registry-web-api.html#publish
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::File; use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom;
use std::time::Duration; use std::time::Duration;
use anyhow::bail; use anyhow::bail;
@ -15,6 +19,7 @@ use cargo_util::paths;
use crates_io::NewCrate; use crates_io::NewCrate;
use crates_io::NewCrateDependency; use crates_io::NewCrateDependency;
use crates_io::Registry; use crates_io::Registry;
use itertools::Itertools;
use crate::core::dependency::DepKind; use crate::core::dependency::DepKind;
use crate::core::manifest::ManifestMetadata; use crate::core::manifest::ManifestMetadata;
@ -28,6 +33,7 @@ use crate::core::Workspace;
use crate::ops; use crate::ops;
use crate::ops::PackageOpts; use crate::ops::PackageOpts;
use crate::ops::Packages; use crate::ops::Packages;
use crate::ops::RegistryOrIndex;
use crate::sources::source::QueryKind; use crate::sources::source::QueryKind;
use crate::sources::source::Source; use crate::sources::source::Source;
use crate::sources::SourceConfigMap; use crate::sources::SourceConfigMap;
@ -36,13 +42,13 @@ use crate::util::auth;
use crate::util::cache_lock::CacheLockMode; use crate::util::cache_lock::CacheLockMode;
use crate::util::context::JobsConfig; use crate::util::context::JobsConfig;
use crate::util::toml::prepare_for_publish; use crate::util::toml::prepare_for_publish;
use crate::util::Graph;
use crate::util::Progress; use crate::util::Progress;
use crate::util::ProgressStyle; use crate::util::ProgressStyle;
use crate::CargoResult; use crate::CargoResult;
use crate::GlobalContext; use crate::GlobalContext;
use super::super::check_dep_has_version; use super::super::check_dep_has_version;
use super::RegistryOrIndex;
pub struct PublishOpts<'gctx> { pub struct PublishOpts<'gctx> {
pub gctx: &'gctx GlobalContext, pub gctx: &'gctx GlobalContext,
@ -59,16 +65,23 @@ pub struct PublishOpts<'gctx> {
} }
pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> { pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> {
let multi_package_mode = ws.gctx().cli_unstable().package_workspace;
let specs = opts.to_publish.to_package_id_specs(ws)?; let specs = opts.to_publish.to_package_id_specs(ws)?;
if specs.len() > 1 {
bail!("the `-p` argument must be specified to select a single package to publish") if !multi_package_mode {
if specs.len() > 1 {
bail!("the `-p` argument must be specified to select a single package to publish")
}
if Packages::Default == opts.to_publish && ws.is_virtual() {
bail!("the `-p` argument must be specified in the root of a virtual workspace")
}
} }
if Packages::Default == opts.to_publish && ws.is_virtual() {
bail!("the `-p` argument must be specified in the root of a virtual workspace") let member_ids: Vec<_> = ws.members().map(|p| p.package_id()).collect();
// Check that the specs match members.
for spec in &specs {
spec.query(member_ids.clone())?;
} }
let member_ids = ws.members().map(|p| p.package_id());
// Check that the spec matches exactly one member.
specs[0].query(member_ids)?;
let mut pkgs = ws.members_with_features(&specs, &opts.cli_features)?; let mut pkgs = ws.members_with_features(&specs, &opts.cli_features)?;
// In `members_with_features_old`, it will add "current" package (determined by the cwd) // In `members_with_features_old`, it will add "current" package (determined by the cwd)
// So we need filter // So we need filter
@ -76,8 +89,6 @@ pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> {
.into_iter() .into_iter()
.filter(|(m, _)| specs.iter().any(|spec| spec.matches(m.package_id()))) .filter(|(m, _)| specs.iter().any(|spec| spec.matches(m.package_id())))
.collect(); .collect();
// Double check. It is safe theoretically, unless logic has updated.
assert_eq!(pkgs.len(), 1);
let just_pkgs: Vec<_> = pkgs.iter().map(|p| p.0).collect(); let just_pkgs: Vec<_> = pkgs.iter().map(|p| p.0).collect();
let reg_or_index = match opts.reg_or_index.clone() { let reg_or_index = match opts.reg_or_index.clone() {
@ -103,9 +114,6 @@ pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> {
// This is only used to confirm that we can create a token before we build the package. // This is only used to confirm that we can create a token before we build the package.
// This causes the credential provider to be called an extra time, but keeps the same order of errors. // This causes the credential provider to be called an extra time, but keeps the same order of errors.
let (pkg, cli_features) = pkgs.pop().unwrap();
let ver = pkg.version().to_string();
let source_ids = super::get_source_id(opts.gctx, reg_or_index.as_ref())?; let source_ids = super::get_source_id(opts.gctx, reg_or_index.as_ref())?;
let mut registry = super::registry( let mut registry = super::registry(
opts.gctx, opts.gctx,
@ -115,82 +123,147 @@ pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> {
true, true,
Some(Operation::Read).filter(|_| !opts.dry_run), Some(Operation::Read).filter(|_| !opts.dry_run),
)?; )?;
verify_dependencies(pkg, &registry, source_ids.original)?;
// Prepare a tarball, with a non-suppressible warning if metadata // Validate all the packages before publishing any of them.
// is missing since this is being put online. for (pkg, _) in &pkgs {
let tarball = ops::package_one( verify_dependencies(pkg, &registry, source_ids.original)?;
}
let pkg_dep_graph = ops::cargo_package::package_with_dep_graph(
ws, ws,
pkg,
&PackageOpts { &PackageOpts {
gctx: opts.gctx, gctx: opts.gctx,
verify: opts.verify, verify: opts.verify,
list: false, list: false,
check_metadata: true, check_metadata: true,
allow_dirty: opts.allow_dirty, allow_dirty: opts.allow_dirty,
to_package: Packages::Default, // `package_with_dep_graph` ignores this field in favor of
// the already-resolved list of packages
to_package: ops::Packages::Default,
targets: opts.targets.clone(), targets: opts.targets.clone(),
jobs: opts.jobs.clone(), jobs: opts.jobs.clone(),
keep_going: opts.keep_going, keep_going: opts.keep_going,
cli_features, cli_features: opts.cli_features.clone(),
reg_or_index, reg_or_index: reg_or_index.clone(),
}, },
pkgs,
)?; )?;
if !opts.dry_run { let mut plan = PublishPlan::new(&pkg_dep_graph.graph);
let hash = cargo_util::Sha256::new() // May contains packages from previous rounds as `wait_for_any_publish_confirmation` returns
.update_file(tarball.file())? // after it confirms any packages, not all packages, requiring us to handle the rest in the next
.finish_hex(); // iteration.
let operation = Operation::Publish { //
name: pkg.name().as_str(), // As a side effect, any given package's "effective" timeout may be much larger.
vers: &ver, let mut to_confirm = BTreeSet::new();
cksum: &hash,
};
registry.set_token(Some(auth::auth_token(
&opts.gctx,
&source_ids.original,
None,
operation,
vec![],
false,
)?));
}
opts.gctx while !plan.is_empty() {
.shell() // There might not be any ready package, if the previous confirmations
.status("Uploading", pkg.package_id().to_string())?; // didn't unlock a new one. For example, if `c` depends on `a` and
transmit( // `b`, and we uploaded `a` and `b` but only confirmed `a`, then on
opts.gctx, // the following pass through the outer loop nothing will be ready for
ws, // upload.
pkg, for pkg_id in plan.take_ready() {
tarball.file(), let (pkg, (_features, tarball)) = &pkg_dep_graph.packages[&pkg_id];
&mut registry, opts.gctx.shell().status("Uploading", pkg.package_id())?;
source_ids.original,
opts.dry_run, if !opts.dry_run {
)?; let ver = pkg.version().to_string();
if !opts.dry_run {
const DEFAULT_TIMEOUT: u64 = 60; tarball.file().seek(SeekFrom::Start(0))?;
let timeout = if opts.gctx.cli_unstable().publish_timeout { let hash = cargo_util::Sha256::new()
let timeout: Option<u64> = opts.gctx.get("publish.timeout")?; .update_file(tarball.file())?
timeout.unwrap_or(DEFAULT_TIMEOUT) .finish_hex();
} else { let operation = Operation::Publish {
DEFAULT_TIMEOUT name: pkg.name().as_str(),
}; vers: &ver,
if 0 < timeout { cksum: &hash,
let timeout = Duration::from_secs(timeout); };
wait_for_publish(opts.gctx, source_ids.original, pkg, timeout)?; registry.set_token(Some(auth::auth_token(
&opts.gctx,
&source_ids.original,
None,
operation,
vec![],
false,
)?));
}
transmit(
opts.gctx,
ws,
pkg,
tarball.file(),
&mut registry,
source_ids.original,
opts.dry_run,
)?;
to_confirm.insert(pkg_id);
if !opts.dry_run {
// Short does not include the registry name.
let short_pkg_description = format!("{} v{}", pkg.name(), pkg.version());
let source_description = source_ids.original.to_string();
ws.gctx().shell().status(
"Uploaded",
format!("{short_pkg_description} to {source_description}"),
)?;
}
} }
let confirmed = if opts.dry_run {
to_confirm.clone()
} else {
const DEFAULT_TIMEOUT: u64 = 60;
let timeout = if opts.gctx.cli_unstable().publish_timeout {
let timeout: Option<u64> = opts.gctx.get("publish.timeout")?;
timeout.unwrap_or(DEFAULT_TIMEOUT)
} else {
DEFAULT_TIMEOUT
};
if 0 < timeout {
let timeout = Duration::from_secs(timeout);
wait_for_any_publish_confirmation(
opts.gctx,
source_ids.original,
&to_confirm,
timeout,
)?
} else {
BTreeSet::new()
}
};
if confirmed.is_empty() {
// If nothing finished, it means we timed out while waiting for confirmation.
// We're going to exit, but first we need to check: have we uploaded everything?
if plan.is_empty() {
// It's ok that we timed out, because nothing was waiting on dependencies to
// be confirmed.
break;
} else {
let failed_list = package_list(plan.iter(), "and");
bail!("unable to publish {failed_list} due to time out while waiting for published dependencies to be available.");
}
}
for id in &confirmed {
to_confirm.remove(id);
}
plan.mark_confirmed(confirmed);
} }
Ok(()) Ok(())
} }
fn wait_for_publish( /// Poll the registry for any packages that are ready for use.
///
/// Returns the subset of `pkgs` that are ready for use.
/// This will be an empty set if we timed out before confirming anything.
fn wait_for_any_publish_confirmation(
gctx: &GlobalContext, gctx: &GlobalContext,
registry_src: SourceId, registry_src: SourceId,
pkg: &Package, pkgs: &BTreeSet<PackageId>,
timeout: Duration, timeout: Duration,
) -> CargoResult<()> { ) -> CargoResult<BTreeSet<PackageId>> {
let mut source = SourceConfigMap::empty(gctx)?.load(registry_src, &HashSet::new())?; let mut source = SourceConfigMap::empty(gctx)?.load(registry_src, &HashSet::new())?;
// Disable the source's built-in progress bars. Repeatedly showing a bunch // Disable the source's built-in progress bars. Repeatedly showing a bunch
// of independent progress bars can be a little confusing. There is an // of independent progress bars can be a little confusing. There is an
@ -202,18 +275,14 @@ fn wait_for_publish(
let sleep_time = Duration::from_secs(1); let sleep_time = Duration::from_secs(1);
let max = timeout.as_secs() as usize; let max = timeout.as_secs() as usize;
// Short does not include the registry name. // Short does not include the registry name.
let short_pkg_description = format!("{} v{}", pkg.name(), pkg.version()); let short_pkg_descriptions = package_list(pkgs.iter().copied(), "or");
gctx.shell().status(
"Uploaded",
format!("{short_pkg_description} to {source_description}"),
)?;
gctx.shell().note(format!( gctx.shell().note(format!(
"waiting for `{short_pkg_description}` to be available at {source_description}.\n\ "waiting for {short_pkg_descriptions} to be available at {source_description}.\n\
You may press ctrl-c to skip waiting; the crate should be available shortly." You may press ctrl-c to skip waiting; the crate should be available shortly."
))?; ))?;
let mut progress = Progress::with_style("Waiting", ProgressStyle::Ratio, gctx); let mut progress = Progress::with_style("Waiting", ProgressStyle::Ratio, gctx);
progress.tick_now(0, max, "")?; progress.tick_now(0, max, "")?;
let is_available = loop { let available = loop {
{ {
let _lock = gctx.acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?; let _lock = gctx.acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?;
// Force re-fetching the source // Force re-fetching the source
@ -223,34 +292,48 @@ fn wait_for_publish(
// multiple times // multiple times
gctx.updated_sources().remove(&source.replaced_source_id()); gctx.updated_sources().remove(&source.replaced_source_id());
source.invalidate_cache(); source.invalidate_cache();
if poll_one_package(registry_src, &pkg.package_id(), &mut source)? { let mut available = BTreeSet::new();
break true; for pkg in pkgs {
if poll_one_package(registry_src, pkg, &mut source)? {
available.insert(*pkg);
}
}
// As soon as any package is available, break this loop so we can see if another
// one can be uploaded.
if !available.is_empty() {
break available;
} }
} }
let elapsed = now.elapsed(); let elapsed = now.elapsed();
if timeout < elapsed { if timeout < elapsed {
gctx.shell().warn(format!( gctx.shell().warn(format!(
"timed out waiting for `{short_pkg_description}` to be available in {source_description}", "timed out waiting for {short_pkg_descriptions} to be available in {source_description}",
))?; ))?;
gctx.shell().note( gctx.shell().note(
"the registry may have a backlog that is delaying making the \ "the registry may have a backlog that is delaying making the \
crate available. The crate should be available soon.", crate available. The crate should be available soon.",
)?; )?;
break false; break BTreeSet::new();
} }
progress.tick_now(elapsed.as_secs() as usize, max, "")?; progress.tick_now(elapsed.as_secs() as usize, max, "")?;
std::thread::sleep(sleep_time); std::thread::sleep(sleep_time);
}; };
if is_available { if !available.is_empty() {
let short_pkg_description = available
.iter()
.map(|pkg| format!("{} v{}", pkg.name(), pkg.version()))
.sorted()
.join(", ");
gctx.shell().status( gctx.shell().status(
"Published", "Published",
format!("{short_pkg_description} at {source_description}"), format!("{short_pkg_description} at {source_description}"),
)?; )?;
} }
Ok(()) Ok(available)
} }
fn poll_one_package( fn poll_one_package(
@ -485,6 +568,87 @@ fn transmit(
Ok(()) Ok(())
} }
/// State for tracking dependencies during upload.
struct PublishPlan {
/// Graph of publishable packages where the edges are `(dependency -> dependent)`
dependents: Graph<PackageId, ()>,
/// The weight of a package is the number of unpublished dependencies it has.
dependencies_count: HashMap<PackageId, usize>,
}
impl PublishPlan {
/// Given a package dependency graph, creates a `PublishPlan` for tracking state.
fn new(graph: &Graph<PackageId, ()>) -> Self {
let dependents = graph.reversed();
let dependencies_count: HashMap<_, _> = dependents
.iter()
.map(|id| (*id, graph.edges(id).count()))
.collect();
Self {
dependents,
dependencies_count,
}
}
fn iter(&self) -> impl Iterator<Item = PackageId> + '_ {
self.dependencies_count.iter().map(|(id, _)| *id)
}
fn is_empty(&self) -> bool {
self.dependencies_count.is_empty()
}
/// Returns the set of packages that are ready for publishing (i.e. have no outstanding dependencies).
///
/// These will not be returned in future calls.
fn take_ready(&mut self) -> BTreeSet<PackageId> {
let ready: BTreeSet<_> = self
.dependencies_count
.iter()
.filter_map(|(id, weight)| (*weight == 0).then_some(*id))
.collect();
for pkg in &ready {
self.dependencies_count.remove(pkg);
}
ready
}
/// Packages confirmed to be available in the registry, potentially allowing additional
/// packages to be "ready".
fn mark_confirmed(&mut self, published: impl IntoIterator<Item = PackageId>) {
for id in published {
for (dependent_id, _) in self.dependents.edges(&id) {
if let Some(weight) = self.dependencies_count.get_mut(dependent_id) {
*weight = weight.saturating_sub(1);
}
}
}
}
}
/// Format a collection of packages as a list
///
/// e.g. "foo v0.1.0, bar v0.2.0, and baz v0.3.0".
///
/// Note: the final separator (e.g. "and" in the previous example) can be chosen.
fn package_list(pkgs: impl IntoIterator<Item = PackageId>, final_sep: &str) -> String {
let mut names: Vec<_> = pkgs
.into_iter()
.map(|pkg| format!("`{} v{}`", pkg.name(), pkg.version()))
.collect();
names.sort();
match &names[..] {
[] => String::new(),
[a] => a.clone(),
[a, b] => format!("{a} {final_sep} {b}"),
[names @ .., last] => {
format!("{}, {final_sep} {last}", names.join(", "))
}
}
}
fn validate_registry(pkgs: &[&Package], reg_or_index: Option<&RegistryOrIndex>) -> CargoResult<()> { fn validate_registry(pkgs: &[&Package], reg_or_index: Option<&RegistryOrIndex>) -> CargoResult<()> {
for pkg in pkgs { for pkg in pkgs {
if pkg.publish() == &Some(Vec::new()) { if pkg.publish() == &Some(Vec::new()) {
@ -518,3 +682,59 @@ fn validate_registry(pkgs: &[&Package], reg_or_index: Option<&RegistryOrIndex>)
Ok(()) Ok(())
} }
#[cfg(test)]
mod tests {
use crate::{
core::{PackageId, SourceId},
sources::CRATES_IO_INDEX,
util::{Graph, IntoUrl},
};
use super::PublishPlan;
fn pkg_id(name: &str) -> PackageId {
let loc = CRATES_IO_INDEX.into_url().unwrap();
PackageId::try_new(name, "1.0.0", SourceId::for_registry(&loc).unwrap()).unwrap()
}
#[test]
fn parallel_schedule() {
let mut graph: Graph<PackageId, ()> = Graph::new();
let a = pkg_id("a");
let b = pkg_id("b");
let c = pkg_id("c");
let d = pkg_id("d");
let e = pkg_id("e");
graph.add(a);
graph.add(b);
graph.add(c);
graph.add(d);
graph.add(e);
graph.link(a, c);
graph.link(b, c);
graph.link(c, d);
graph.link(c, e);
let mut order = PublishPlan::new(&graph);
let ready: Vec<_> = order.take_ready().into_iter().collect();
assert_eq!(ready, vec![d, e]);
order.mark_confirmed(vec![d]);
let ready: Vec<_> = order.take_ready().into_iter().collect();
assert!(ready.is_empty());
order.mark_confirmed(vec![e]);
let ready: Vec<_> = order.take_ready().into_iter().collect();
assert_eq!(ready, vec![c]);
order.mark_confirmed(vec![c]);
let ready: Vec<_> = order.take_ready().into_iter().collect();
assert_eq!(ready, vec![a, b]);
order.mark_confirmed(vec![a, b]);
let ready: Vec<_> = order.take_ready().into_iter().collect();
assert!(ready.is_empty());
}
}

View File

@ -25,6 +25,20 @@ impl<N: Eq + Ord + Clone, E: Default + Clone> Graph<N, E> {
.or_insert_with(Default::default) .or_insert_with(Default::default)
} }
/// Returns the graph obtained by reversing all edges.
pub fn reversed(&self) -> Graph<N, E> {
let mut ret = Graph::new();
for n in self.iter() {
ret.add(n.clone());
for (m, e) in self.edges(n) {
*ret.link(m.clone(), n.clone()) = e.clone();
}
}
ret
}
pub fn contains<Q: ?Sized>(&self, k: &Q) -> bool pub fn contains<Q: ?Sized>(&self, k: &Q) -> bool
where where
N: Borrow<Q>, N: Borrow<Q>,
@ -206,6 +220,19 @@ fn path_to_self() {
assert_eq!(new.path_to_bottom(&0), vec![(&0, Some(&()))]); assert_eq!(new.path_to_bottom(&0), vec![(&0, Some(&()))]);
} }
#[test]
fn reverse() {
let mut new: Graph<i32, ()> = Graph::new();
new.link(0, 1);
new.link(0, 2);
let mut expected: Graph<i32, ()> = Graph::new();
expected.add(0);
expected.link(1, 0);
expected.link(2, 0);
assert_eq!(new.reversed(), expected);
}
impl<N: Eq + Ord + Clone, E: Default + Clone> Default for Graph<N, E> { impl<N: Eq + Ord + Clone, E: Default + Clone> Default for Graph<N, E> {
fn default() -> Graph<N, E> { fn default() -> Graph<N, E> {
Graph::new() Graph::new()

View File

@ -76,8 +76,8 @@ fn publish() {
{"v":1,"registry":{"index-url":"[..]","name":"alternative","headers":[..]},"kind":"get","operation":"read"} {"v":1,"registry":{"index-url":"[..]","name":"alternative","headers":[..]},"kind":"get","operation":"read"}
[PACKAGING] foo v0.1.0 ([ROOT]/foo) [PACKAGING] foo v0.1.0 ([ROOT]/foo)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed) [PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
{"v":1,"registry":{"index-url":"[..]","name":"alternative"},"kind":"get","operation":"publish","name":"foo","vers":"0.1.0","cksum":"[..]"}
[UPLOADING] foo v0.1.0 ([ROOT]/foo) [UPLOADING] foo v0.1.0 ([ROOT]/foo)
{"v":1,"registry":{"index-url":"[..]","name":"alternative"},"kind":"get","operation":"publish","name":"foo","vers":"0.1.0","cksum":"[..]"}
[UPLOADED] foo v0.1.0 to registry `alternative` [UPLOADED] foo v0.1.0 to registry `alternative`
[NOTE] waiting for `foo v0.1.0` to be available at registry `alternative`. [NOTE] waiting for `foo v0.1.0` to be available at registry `alternative`.
You may press ctrl-c [..] You may press ctrl-c [..]
@ -529,8 +529,8 @@ fn token_caching() {
{"v":1,"registry":{"index-url":"[..]","name":"alternative"},"kind":"get","operation":"read"} {"v":1,"registry":{"index-url":"[..]","name":"alternative"},"kind":"get","operation":"read"}
[PACKAGING] foo v0.1.0 ([ROOT]/foo) [PACKAGING] foo v0.1.0 ([ROOT]/foo)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed) [PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
{"v":1,"registry":{"index-url":"[..]","name":"alternative"},"kind":"get","operation":"publish","name":"foo","vers":"0.1.0","cksum":"[..]"}
[UPLOADING] foo v0.1.0 ([ROOT]/foo) [UPLOADING] foo v0.1.0 ([ROOT]/foo)
{"v":1,"registry":{"index-url":"[..]","name":"alternative"},"kind":"get","operation":"publish","name":"foo","vers":"0.1.0","cksum":"[..]"}
[UPLOADED] foo v0.1.0 to registry `alternative` [UPLOADED] foo v0.1.0 to registry `alternative`
[NOTE] waiting [..] [NOTE] waiting [..]
You may press ctrl-c [..] You may press ctrl-c [..]

View File

@ -3173,6 +3173,7 @@ See https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata for
[PACKAGING] foo v0.0.1 ([ROOT]/foo) [PACKAGING] foo v0.0.1 ([ROOT]/foo)
[PACKAGED] 4 files, [FILE_SIZE]B ([FILE_SIZE]B compressed) [PACKAGED] 4 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[UPLOADING] foo v0.0.1 ([ROOT]/foo) [UPLOADING] foo v0.0.1 ([ROOT]/foo)
[UPLOADED] foo v0.0.1 to registry `crates-io`
"#]]) "#]])
.run(); .run();
@ -3305,7 +3306,26 @@ fn timeout_waiting_for_dependency_publish() {
.masquerade_as_nightly_cargo(&["publish-timeout", "package-workspace"]) .masquerade_as_nightly_cargo(&["publish-timeout", "package-workspace"])
.with_status(101) .with_status(101)
.with_stderr_data(str![[r#" .with_stderr_data(str![[r#"
[ERROR] the `-p` argument must be specified to select a single package to publish [UPDATING] crates.io index
[WARNING] manifest has no documentation, homepage or repository.
See https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata for more info.
[PACKAGING] dep v0.0.1 ([ROOT]/foo/dep)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[WARNING] manifest has no documentation, homepage or repository.
See https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata for more info.
[PACKAGING] main v0.0.1 ([ROOT]/foo/main)
[PACKAGED] 4 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[WARNING] manifest has no documentation, homepage or repository.
See https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata for more info.
[PACKAGING] other v0.0.1 ([ROOT]/foo/other)
[PACKAGED] 4 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[UPLOADING] dep v0.0.1 ([ROOT]/foo/dep)
[UPLOADED] dep v0.0.1 to registry `crates-io`
[NOTE] waiting for `dep v0.0.1` to be available at registry `crates-io`.
You may press ctrl-c to skip waiting; the crate should be available shortly.
[WARNING] timed out waiting for `dep v0.0.1` to be available in registry `crates-io`
[NOTE] the registry may have a backlog that is delaying making the crate available. The crate should be available soon.
[ERROR] unable to publish `main v0.0.1` and `other v0.0.1` due to time out while waiting for published dependencies to be available.
"#]]) "#]])
.run(); .run();
@ -3588,10 +3608,47 @@ fn workspace_with_local_deps_nightly() {
p.cargo("publish -Zpackage-workspace") p.cargo("publish -Zpackage-workspace")
.masquerade_as_nightly_cargo(&["package-workspace"]) .masquerade_as_nightly_cargo(&["package-workspace"])
.with_status(101)
.replace_crates_io(registry.index_url()) .replace_crates_io(registry.index_url())
.with_stderr_data(str![[r#" .with_stderr_data(str![[r#"
[ERROR] the `-p` argument must be specified to select a single package to publish [UPDATING] crates.io index
[PACKAGING] level3 v0.0.1 ([ROOT]/foo/level3)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[PACKAGING] level2 v0.0.1 ([ROOT]/foo/level2)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[PACKAGING] level1 v0.0.1 ([ROOT]/foo/level1)
[UPDATING] crates.io index
[PACKAGED] 4 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[VERIFYING] level3 v0.0.1 ([ROOT]/foo/level3)
[COMPILING] level3 v0.0.1 ([ROOT]/foo/target/package/level3-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[VERIFYING] level2 v0.0.1 ([ROOT]/foo/level2)
[UPDATING] crates.io index
[UNPACKING] level3 v0.0.1 (registry `[ROOT]/foo/target/package/tmp-registry`)
[COMPILING] level3 v0.0.1
[COMPILING] level2 v0.0.1 ([ROOT]/foo/target/package/level2-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[VERIFYING] level1 v0.0.1 ([ROOT]/foo/level1)
[UPDATING] crates.io index
[UNPACKING] level2 v0.0.1 (registry `[ROOT]/foo/target/package/tmp-registry`)
[COMPILING] level3 v0.0.1
[COMPILING] level2 v0.0.1
[COMPILING] level1 v0.0.1 ([ROOT]/foo/target/package/level1-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[UPLOADING] level3 v0.0.1 ([ROOT]/foo/level3)
[UPLOADED] level3 v0.0.1 to registry `crates-io`
[NOTE] waiting for `level3 v0.0.1` to be available at registry `crates-io`.
You may press ctrl-c to skip waiting; the crate should be available shortly.
[PUBLISHED] level3 v0.0.1 at registry `crates-io`
[UPLOADING] level2 v0.0.1 ([ROOT]/foo/level2)
[UPLOADED] level2 v0.0.1 to registry `crates-io`
[NOTE] waiting for `level2 v0.0.1` to be available at registry `crates-io`.
You may press ctrl-c to skip waiting; the crate should be available shortly.
[PUBLISHED] level2 v0.0.1 at registry `crates-io`
[UPLOADING] level1 v0.0.1 ([ROOT]/foo/level1)
[UPLOADED] level1 v0.0.1 to registry `crates-io`
[NOTE] waiting for `level1 v0.0.1` to be available at registry `crates-io`.
You may press ctrl-c to skip waiting; the crate should be available shortly.
[PUBLISHED] level1 v0.0.1 at registry `crates-io`
"#]]) "#]])
.run(); .run();
@ -3659,10 +3716,41 @@ fn workspace_parallel() {
p.cargo("publish -Zpackage-workspace") p.cargo("publish -Zpackage-workspace")
.masquerade_as_nightly_cargo(&["package-workspace"]) .masquerade_as_nightly_cargo(&["package-workspace"])
.replace_crates_io(registry.index_url()) .replace_crates_io(registry.index_url())
.with_status(101)
.with_stderr_data( .with_stderr_data(
str![[r#" str![[r#"
[ERROR] the `-p` argument must be specified to select a single package to publish [UPDATING] crates.io index
[PACKAGING] a v0.0.1 ([ROOT]/foo/a)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[PACKAGING] b v0.0.1 ([ROOT]/foo/b)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[PACKAGING] c v0.0.1 ([ROOT]/foo/c)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[VERIFYING] a v0.0.1 ([ROOT]/foo/a)
[COMPILING] a v0.0.1 ([ROOT]/foo/target/package/a-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[VERIFYING] b v0.0.1 ([ROOT]/foo/b)
[COMPILING] b v0.0.1 ([ROOT]/foo/target/package/b-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[VERIFYING] c v0.0.1 ([ROOT]/foo/c)
[UPDATING] crates.io index
[UNPACKING] a v0.0.1 (registry `[ROOT]/foo/target/package/tmp-registry`)
[UNPACKING] b v0.0.1 (registry `[ROOT]/foo/target/package/tmp-registry`)
[COMPILING] a v0.0.1
[COMPILING] b v0.0.1
[COMPILING] c v0.0.1 ([ROOT]/foo/target/package/c-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[UPLOADED] b v0.0.1 to registry `crates-io`
[UPLOADED] a v0.0.1 to registry `crates-io`
[NOTE] waiting for `a v0.0.1` or `b v0.0.1` to be available at registry `crates-io`.
You may press ctrl-c to skip waiting; the crate should be available shortly.
[PUBLISHED] a v0.0.1, b v0.0.1 at registry `crates-io`
[UPLOADING] c v0.0.1 ([ROOT]/foo/c)
[UPLOADED] c v0.0.1 to registry `crates-io`
[NOTE] waiting for `c v0.0.1` to be available at registry `crates-io`.
You may press ctrl-c to skip waiting; the crate should be available shortly.
[PUBLISHED] c v0.0.1 at registry `crates-io`
[UPLOADING] a v0.0.1 ([ROOT]/foo/a)
[UPLOADING] b v0.0.1 ([ROOT]/foo/b)
"#]] "#]]
.unordered(), .unordered(),
@ -3724,9 +3812,12 @@ fn workspace_missing_dependency() {
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed) [PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[VERIFYING] b v0.0.1 ([ROOT]/foo/b) [VERIFYING] b v0.0.1 ([ROOT]/foo/b)
[UPDATING] crates.io index [UPDATING] crates.io index
[ERROR] no matching package named `a` found [ERROR] failed to verify package tarball
location searched: registry `crates-io`
required by package `b v0.0.1 ([ROOT]/foo/target/package/b-0.0.1)` Caused by:
no matching package named `a` found
location searched: registry `crates-io`
required by package `b v0.0.1 ([ROOT]/foo/target/package/b-0.0.1)`
"#]]) "#]])
.run(); .run();
@ -3756,7 +3847,23 @@ You may press ctrl-c to skip waiting; the crate should be available shortly.
.replace_crates_io(registry.index_url()) .replace_crates_io(registry.index_url())
.with_status(101) .with_status(101)
.with_stderr_data(str![[r#" .with_stderr_data(str![[r#"
[ERROR] the `-p` argument must be specified to select a single package to publish [UPDATING] crates.io index
[PACKAGING] a v0.0.1 ([ROOT]/foo/a)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[PACKAGING] b v0.0.1 ([ROOT]/foo/b)
[PACKAGED] 3 files, [FILE_SIZE]B ([FILE_SIZE]B compressed)
[VERIFYING] a v0.0.1 ([ROOT]/foo/a)
[COMPILING] a v0.0.1 ([ROOT]/foo/target/package/a-0.0.1)
[FINISHED] `dev` profile [unoptimized + debuginfo] target(s) in [ELAPSED]s
[VERIFYING] b v0.0.1 ([ROOT]/foo/b)
[UPDATING] crates.io index
[ERROR] failed to verify package tarball
Caused by:
failed to get `a` as a dependency of package `b v0.0.1 ([ROOT]/foo/target/package/b-0.0.1)`
Caused by:
found a package in the remote registry and the local overlay: a@0.0.1
"#]]) "#]])
.run(); .run();
@ -3817,7 +3924,8 @@ fn one_unpublishable_package() {
.masquerade_as_nightly_cargo(&["package-workspace"]) .masquerade_as_nightly_cargo(&["package-workspace"])
.with_status(101) .with_status(101)
.with_stderr_data(str![[r#" .with_stderr_data(str![[r#"
[ERROR] the `-p` argument must be specified to select a single package to publish [ERROR] `main` cannot be published.
`package.publish` must be set to `true` or a non-empty list in Cargo.toml to publish.
"#]]) "#]])
.run(); .run();