diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index a1f2bba16..8504ef83b 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -69,7 +69,7 @@ use super::job::{ }; use super::timings::Timings; use super::{BuildContext, BuildPlan, CompileMode, Context, Unit}; -use crate::core::{PackageId, TargetKind}; +use crate::core::{PackageId, Shell, TargetKind}; use crate::util::diagnostic_server::{self, DiagnosticPrinter}; use crate::util::machine_message::{self, Message as _}; use crate::util::{self, internal, profile}; @@ -412,7 +412,6 @@ impl<'cfg> DrainState<'cfg> { cx: &mut Context<'_, '_>, jobserver_helper: &HelperThread, scope: &Scope<'_>, - has_errored: bool, ) -> CargoResult<()> { // Dequeue as much work as we can, learning about everything // possible that can run. Note that this is also the point where we @@ -425,11 +424,6 @@ impl<'cfg> DrainState<'cfg> { } } - // Do not actually spawn the new work if we've errored out - if has_errored { - return Ok(()); - } - // Now that we've learned of all possible work that we can execute // try to spawn it so long as we've got a jobserver token which says // we're able to perform some parallel work. @@ -487,7 +481,7 @@ impl<'cfg> DrainState<'cfg> { jobserver_helper: &HelperThread, plan: &mut BuildPlan, event: Message, - ) -> CargoResult> { + ) -> CargoResult<()> { match event { Message::Run(id, cmd) => { cx.bcx @@ -545,17 +539,7 @@ impl<'cfg> DrainState<'cfg> { Err(e) => { let msg = "The following warnings were emitted during compilation:"; self.emit_warnings(Some(msg), &unit, cx)?; - - if !self.active.is_empty() { - crate::display_error(&e, &mut *cx.bcx.config.shell()); - cx.bcx.config.shell().warn( - "build failed, waiting for other \ - jobs to finish...", - )?; - return Ok(Some(anyhow::format_err!("build failed"))); - } else { - return Ok(Some(e)); - } + return Err(e); } } } @@ -590,7 +574,7 @@ impl<'cfg> DrainState<'cfg> { } } - Ok(None) + Ok(()) } // This will also tick the progress bar as appropriate @@ -651,8 +635,14 @@ impl<'cfg> DrainState<'cfg> { // successful and otherwise wait for pending work to finish if it failed // and then immediately return. let mut error = None; + // CAUTION! From here on out, do not use `?`. Every error must be handled + // in such a way that the loop is still allowed to drain event messages. loop { - self.spawn_work_if_possible(cx, jobserver_helper, scope, error.is_some())?; + if error.is_none() { + if let Err(e) = self.spawn_work_if_possible(cx, jobserver_helper, scope) { + self.handle_error(&mut cx.bcx.config.shell(), &mut error, e); + } + } // If after all that we're not actually running anything then we're // done! @@ -660,7 +650,9 @@ impl<'cfg> DrainState<'cfg> { break; } - self.grant_rustc_token_requests()?; + if let Err(e) = self.grant_rustc_token_requests() { + self.handle_error(&mut cx.bcx.config.shell(), &mut error, e); + } // And finally, before we block waiting for the next event, drop any // excess tokens we may have accidentally acquired. Due to how our @@ -668,8 +660,8 @@ impl<'cfg> DrainState<'cfg> { // don't actually use, and if this happens just relinquish it back // to the jobserver itself. for event in self.wait_for_events() { - if let Some(err) = self.handle_event(cx, jobserver_helper, plan, event)? { - error = Some(err); + if let Err(event_err) = self.handle_event(cx, jobserver_helper, plan, event) { + self.handle_error(&mut cx.bcx.config.shell(), &mut error, event_err); } } } @@ -694,13 +686,25 @@ impl<'cfg> DrainState<'cfg> { } let time_elapsed = util::elapsed(cx.bcx.config.creation_time().elapsed()); - self.timings.finished(cx.bcx, &error)?; + if let Err(e) = self.timings.finished(cx.bcx, &error) { + if error.is_some() { + crate::display_error(&e, &mut cx.bcx.config.shell()); + } else { + return Err(e); + } + } if cx.bcx.build_config.emit_json() { let msg = machine_message::BuildFinished { success: error.is_none(), } .to_json_string(); - writeln!(cx.bcx.config.shell().out(), "{}", msg)?; + if let Err(e) = writeln!(cx.bcx.config.shell().out(), "{}", msg) { + if error.is_some() { + crate::display_error(&e.into(), &mut cx.bcx.config.shell()); + } else { + return Err(e.into()); + } + } } if let Some(e) = error { @@ -711,7 +715,8 @@ impl<'cfg> DrainState<'cfg> { profile_name, opt_type, time_elapsed ); if !cx.bcx.build_config.build_plan { - cx.bcx.config.shell().status("Finished", message)?; + // It doesn't really matter if this fails. + drop(cx.bcx.config.shell().status("Finished", message)); } Ok(()) } else { @@ -720,6 +725,26 @@ impl<'cfg> DrainState<'cfg> { } } + fn handle_error( + &self, + shell: &mut Shell, + err_state: &mut Option, + new_err: anyhow::Error, + ) { + if err_state.is_some() { + // Already encountered one error. + log::warn!("{:?}", new_err); + } else { + if !self.active.is_empty() { + crate::display_error(&new_err, shell); + drop(shell.warn("build failed, waiting for other jobs to finish...")); + *err_state = Some(anyhow::format_err!("build failed")); + } else { + *err_state = Some(new_err); + } + } + } + // This also records CPU usage and marks concurrency; we roughly want to do // this as often as we spin on the events receiver (at least every 500ms or // so). diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 8302475de..17b72b261 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -8,7 +8,7 @@ use crate::core::compiler::BuildContext; use crate::core::PackageId; use crate::util::cpu::State; use crate::util::machine_message::{self, Message}; -use crate::util::{paths, CargoResult, Config}; +use crate::util::{paths, CargoResult, CargoResultExt, Config}; use std::collections::HashMap; use std::io::{BufWriter, Write}; use std::time::{Duration, Instant, SystemTime}; @@ -322,7 +322,8 @@ impl<'cfg> Timings<'cfg> { self.unit_times .sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap()); if self.report_html { - self.report_html(bcx, error)?; + self.report_html(bcx, error) + .chain_err(|| "failed to save timing report")?; } Ok(()) } diff --git a/tests/testsuite/build.rs b/tests/testsuite/build.rs index dc987c6e6..6605e1f53 100644 --- a/tests/testsuite/build.rs +++ b/tests/testsuite/build.rs @@ -4,11 +4,13 @@ use cargo::util::paths::dylib_path_envvar; use cargo_test_support::paths::{root, CargoPathExt}; use cargo_test_support::registry::Package; use cargo_test_support::{ - basic_bin_manifest, basic_lib_manifest, basic_manifest, main_file, project, rustc_host, - sleep_ms, symlink_supported, t, Execs, ProjectBuilder, + basic_bin_manifest, basic_lib_manifest, basic_manifest, lines_match, main_file, project, + rustc_host, sleep_ms, symlink_supported, t, Execs, ProjectBuilder, }; use std::env; use std::fs; +use std::io::Read; +use std::process::Stdio; #[cargo_test] fn cargo_compile_simple() { @@ -4818,3 +4820,112 @@ fn user_specific_cfgs_are_filtered_out() { .run(); p.process(&p.bin("foo")).run(); } + +#[cargo_test] +fn close_output() { + // What happens when stdout or stderr is closed during a build. + + // Server to know when rustc has spawned. + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [lib] + proc-macro = true + "#, + ) + .file( + "src/lib.rs", + &r#" + use proc_macro::TokenStream; + use std::io::Read; + + #[proc_macro] + pub fn repro(_input: TokenStream) -> TokenStream { + println!("hello stdout!"); + eprintln!("hello stderr!"); + // Tell the test we have started. + let mut socket = std::net::TcpStream::connect("__ADDR__").unwrap(); + // Wait for the test to tell us to start printing. + let mut buf = [0]; + drop(socket.read_exact(&mut buf)); + let use_stderr = std::env::var("__CARGO_REPRO_STDERR").is_ok(); + for i in 0..10000 { + if use_stderr { + eprintln!("{}", i); + } else { + println!("{}", i); + } + std::thread::sleep(std::time::Duration::new(0, 1)); + } + TokenStream::new() + } + "# + .replace("__ADDR__", &addr.to_string()), + ) + .file( + "src/main.rs", + r#" + foo::repro!(); + + fn main() {} + "#, + ) + .build(); + + // The `stderr` flag here indicates if this should forcefully close stderr or stdout. + let spawn = |stderr: bool| { + let mut cmd = p.cargo("build").build_command(); + cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + if stderr { + cmd.env("__CARGO_REPRO_STDERR", "1"); + } + let mut child = cmd.spawn().unwrap(); + // Wait for proc macro to start. + let pm_conn = listener.accept().unwrap().0; + // Close stderr or stdout. + if stderr { + drop(child.stderr.take()); + } else { + drop(child.stdout.take()); + } + // Tell the proc-macro to continue; + drop(pm_conn); + // Read the output from the other channel. + let out: &mut dyn Read = if stderr { + child.stdout.as_mut().unwrap() + } else { + child.stderr.as_mut().unwrap() + }; + let mut result = String::new(); + out.read_to_string(&mut result).unwrap(); + let status = child.wait().unwrap(); + assert!(!status.success()); + result + }; + + let stderr = spawn(false); + lines_match( + "\ +[COMPILING] foo [..] +hello stderr! +[ERROR] [..] +[WARNING] build failed, waiting for other jobs to finish... +[ERROR] build failed +", + &stderr, + ); + + // Try again with stderr. + p.build_dir().rm_rf(); + let stdout = spawn(true); + lines_match("hello_stdout!", &stdout); +}