mirror of
https://github.com/rust-lang/rust.git
synced 2024-10-31 14:31:55 +00:00
Auto merge of #108659 - ferrocene:pa-test-metrics, r=Mark-Simulacrum
Include executed tests in the build metrics (and use a custom test display impl) The main goal of this PR is to include all tests executed in CI inside the build metrics JSON files. I need this for Ferrocene, and `@Mark-Simulacrum` expressed desire to have this as well to ensure all tests are executed at least once somewhere in CI. Unfortunately implementing this required rewriting inside of bootstrap all of the code to render the test output to console. libtest supports outputting JSON instead of raw text, which we can indeed use to populate the build metrics. Doing that suppresses the console output though, and compared to rustc and Cargo the console output is not included as a JSON field. Because of that, this PR had to reimplement both the "pretty" format (one test per line, with `rust.verbose-tests = true`), and the "terse" format (the wall of dots, with `rust.verbose-tests = false`). The current implementation should have the exact same output as libtest, except for the benchmark output. libtest's benchmark output is broken in the "terse" format, so since that's our default I slightly improved how it's rendered. Also, to bring parity with libtest I had to introduce support for coloring output from bootstrap, using the same dependencies `annotate-snippets` uses. It's now possible to use `builder.color_for_stdout(Color::Red, "text")` and `builder.color_for_stderr(Color::Green, "text")` across all of bootstrap, automatically respecting the `--color` flag and whether the stream is a terminal or not. I recommend reviewing the PR commit-by-commit. r? `@Mark-Simulacrum`
This commit is contained in:
commit
66676820eb
@ -11,6 +11,17 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
@ -36,6 +47,7 @@ dependencies = [
|
||||
name = "bootstrap"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"build_helper",
|
||||
"cc",
|
||||
"cmake",
|
||||
@ -55,6 +67,7 @@ dependencies = [
|
||||
"sha2",
|
||||
"sysinfo",
|
||||
"tar",
|
||||
"termcolor",
|
||||
"toml",
|
||||
"walkdir",
|
||||
"windows",
|
||||
@ -636,6 +649,15 @@ dependencies = [
|
||||
"xattr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.1.4"
|
||||
|
@ -30,6 +30,7 @@ path = "bin/sccache-plus-cl.rs"
|
||||
test = false
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.14"
|
||||
build_helper = { path = "../tools/build_helper" }
|
||||
cmake = "0.1.38"
|
||||
filetime = "0.2"
|
||||
@ -45,6 +46,7 @@ serde_derive = "1.0.137"
|
||||
serde_json = "1.0.2"
|
||||
sha2 = "0.10"
|
||||
tar = "0.4"
|
||||
termcolor = "1.2.0"
|
||||
toml = "0.5"
|
||||
ignore = "0.4.10"
|
||||
opener = "0.5"
|
||||
|
@ -87,6 +87,9 @@ pub struct Config {
|
||||
pub patch_binaries_for_nix: bool,
|
||||
pub stage0_metadata: Stage0Metadata,
|
||||
|
||||
pub stdout_is_tty: bool,
|
||||
pub stderr_is_tty: bool,
|
||||
|
||||
pub on_fail: Option<String>,
|
||||
pub stage: u32,
|
||||
pub keep_stage: Vec<u32>,
|
||||
@ -825,6 +828,9 @@ impl Config {
|
||||
config.dist_include_mingw_linker = true;
|
||||
config.dist_compression_profile = "fast".into();
|
||||
|
||||
config.stdout_is_tty = atty::is(atty::Stream::Stdout);
|
||||
config.stderr_is_tty = atty::is(atty::Stream::Stderr);
|
||||
|
||||
// set by build.rs
|
||||
config.build = TargetSelection::from_user(&env!("BUILD_TRIPLE"));
|
||||
|
||||
|
@ -55,6 +55,7 @@ mod format;
|
||||
mod install;
|
||||
mod metadata;
|
||||
mod native;
|
||||
mod render_tests;
|
||||
mod run;
|
||||
mod sanity;
|
||||
mod setup;
|
||||
@ -88,6 +89,7 @@ pub use crate::builder::PathSet;
|
||||
use crate::cache::{Interned, INTERNER};
|
||||
pub use crate::config::Config;
|
||||
pub use crate::flags::Subcommand;
|
||||
use termcolor::{ColorChoice, StandardStream, WriteColor};
|
||||
|
||||
const LLVM_TOOLS: &[&str] = &[
|
||||
"llvm-cov", // used to generate coverage report
|
||||
@ -1582,6 +1584,31 @@ to download LLVM rather than building it.
|
||||
|
||||
self.config.ninja_in_file
|
||||
}
|
||||
|
||||
pub fn colored_stdout<R, F: FnOnce(&mut dyn WriteColor) -> R>(&self, f: F) -> R {
|
||||
self.colored_stream_inner(StandardStream::stdout, self.config.stdout_is_tty, f)
|
||||
}
|
||||
|
||||
pub fn colored_stderr<R, F: FnOnce(&mut dyn WriteColor) -> R>(&self, f: F) -> R {
|
||||
self.colored_stream_inner(StandardStream::stderr, self.config.stderr_is_tty, f)
|
||||
}
|
||||
|
||||
fn colored_stream_inner<R, F, C>(&self, constructor: C, is_tty: bool, f: F) -> R
|
||||
where
|
||||
C: Fn(ColorChoice) -> StandardStream,
|
||||
F: FnOnce(&mut dyn WriteColor) -> R,
|
||||
{
|
||||
let choice = match self.config.color {
|
||||
flags::Color::Always => ColorChoice::Always,
|
||||
flags::Color::Never => ColorChoice::Never,
|
||||
flags::Color::Auto if !is_tty => ColorChoice::Never,
|
||||
flags::Color::Auto => ColorChoice::Auto,
|
||||
};
|
||||
let mut stream = constructor(choice);
|
||||
let result = f(&mut stream);
|
||||
stream.reset().unwrap();
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
|
@ -51,6 +51,7 @@ impl BuildMetrics {
|
||||
duration_excluding_children_sec: Duration::ZERO,
|
||||
|
||||
children: Vec::new(),
|
||||
tests: Vec::new(),
|
||||
});
|
||||
}
|
||||
|
||||
@ -72,6 +73,16 @@ impl BuildMetrics {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn record_test(&self, name: &str, outcome: TestOutcome) {
|
||||
let mut state = self.state.borrow_mut();
|
||||
state
|
||||
.running_steps
|
||||
.last_mut()
|
||||
.unwrap()
|
||||
.tests
|
||||
.push(Test { name: name.to_string(), outcome });
|
||||
}
|
||||
|
||||
fn collect_stats(&self, state: &mut MetricsState) {
|
||||
let step = state.running_steps.last_mut().unwrap();
|
||||
|
||||
@ -125,6 +136,14 @@ impl BuildMetrics {
|
||||
}
|
||||
|
||||
fn prepare_json_step(&self, step: StepMetrics) -> JsonNode {
|
||||
let mut children = Vec::new();
|
||||
children.extend(step.children.into_iter().map(|child| self.prepare_json_step(child)));
|
||||
children.extend(
|
||||
step.tests
|
||||
.into_iter()
|
||||
.map(|test| JsonNode::Test { name: test.name, outcome: test.outcome }),
|
||||
);
|
||||
|
||||
JsonNode::RustbuildStep {
|
||||
type_: step.type_,
|
||||
debug_repr: step.debug_repr,
|
||||
@ -135,11 +154,7 @@ impl BuildMetrics {
|
||||
/ step.duration_excluding_children_sec.as_secs_f64(),
|
||||
},
|
||||
|
||||
children: step
|
||||
.children
|
||||
.into_iter()
|
||||
.map(|child| self.prepare_json_step(child))
|
||||
.collect(),
|
||||
children,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -161,6 +176,12 @@ struct StepMetrics {
|
||||
duration_excluding_children_sec: Duration,
|
||||
|
||||
children: Vec<StepMetrics>,
|
||||
tests: Vec<Test>,
|
||||
}
|
||||
|
||||
struct Test {
|
||||
name: String,
|
||||
outcome: TestOutcome,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -190,6 +211,19 @@ enum JsonNode {
|
||||
|
||||
children: Vec<JsonNode>,
|
||||
},
|
||||
Test {
|
||||
name: String,
|
||||
#[serde(flatten)]
|
||||
outcome: TestOutcome,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(tag = "outcome", rename_all = "snake_case")]
|
||||
pub(crate) enum TestOutcome {
|
||||
Passed,
|
||||
Failed,
|
||||
Ignored { ignore_reason: Option<String> },
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
376
src/bootstrap/render_tests.rs
Normal file
376
src/bootstrap/render_tests.rs
Normal file
@ -0,0 +1,376 @@
|
||||
//! This module renders the JSON output of libtest into a human-readable form, trying to be as
|
||||
//! similar to libtest's native output as possible.
|
||||
//!
|
||||
//! This is needed because we need to use libtest in JSON mode to extract granluar information
|
||||
//! about the executed tests. Doing so suppresses the human-readable output, and (compared to Cargo
|
||||
//! and rustc) libtest doesn't include the rendered human-readable output as a JSON field. We had
|
||||
//! to reimplement all the rendering logic in this module because of that.
|
||||
|
||||
use crate::builder::Builder;
|
||||
use std::io::{BufRead, BufReader, Write};
|
||||
use std::process::{ChildStdout, Command, Stdio};
|
||||
use std::time::Duration;
|
||||
use termcolor::{Color, ColorSpec, WriteColor};
|
||||
|
||||
const TERSE_TESTS_PER_LINE: usize = 88;
|
||||
|
||||
pub(crate) fn add_flags_and_try_run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool {
|
||||
if cmd.get_args().position(|arg| arg == "--").is_none() {
|
||||
cmd.arg("--");
|
||||
}
|
||||
cmd.args(&["-Z", "unstable-options", "--format", "json"]);
|
||||
|
||||
try_run_tests(builder, cmd)
|
||||
}
|
||||
|
||||
pub(crate) fn try_run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool {
|
||||
if builder.config.dry_run() {
|
||||
return true;
|
||||
}
|
||||
|
||||
if !run_tests(builder, cmd) {
|
||||
if builder.fail_fast {
|
||||
crate::detail_exit(1);
|
||||
} else {
|
||||
let mut failures = builder.delayed_failures.borrow_mut();
|
||||
failures.push(format!("{cmd:?}"));
|
||||
false
|
||||
}
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn run_tests(builder: &Builder<'_>, cmd: &mut Command) -> bool {
|
||||
cmd.stdout(Stdio::piped());
|
||||
|
||||
builder.verbose(&format!("running: {cmd:?}"));
|
||||
|
||||
let mut process = cmd.spawn().unwrap();
|
||||
|
||||
// This runs until the stdout of the child is closed, which means the child exited. We don't
|
||||
// run this on another thread since the builder is not Sync.
|
||||
Renderer::new(process.stdout.take().unwrap(), builder).render_all();
|
||||
|
||||
let result = process.wait_with_output().unwrap();
|
||||
if !result.status.success() && builder.is_verbose() {
|
||||
println!(
|
||||
"\n\ncommand did not execute successfully: {cmd:?}\n\
|
||||
expected success, got: {}",
|
||||
result.status
|
||||
);
|
||||
}
|
||||
|
||||
result.status.success()
|
||||
}
|
||||
|
||||
struct Renderer<'a> {
|
||||
stdout: BufReader<ChildStdout>,
|
||||
failures: Vec<TestOutcome>,
|
||||
benches: Vec<BenchOutcome>,
|
||||
builder: &'a Builder<'a>,
|
||||
tests_count: Option<usize>,
|
||||
executed_tests: usize,
|
||||
terse_tests_in_line: usize,
|
||||
}
|
||||
|
||||
impl<'a> Renderer<'a> {
|
||||
fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self {
|
||||
Self {
|
||||
stdout: BufReader::new(stdout),
|
||||
benches: Vec::new(),
|
||||
failures: Vec::new(),
|
||||
builder,
|
||||
tests_count: None,
|
||||
executed_tests: 0,
|
||||
terse_tests_in_line: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn render_all(mut self) {
|
||||
let mut line = String::new();
|
||||
loop {
|
||||
line.clear();
|
||||
match self.stdout.read_line(&mut line) {
|
||||
Ok(_) => {}
|
||||
Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
|
||||
Err(err) => panic!("failed to read output of test runner: {err}"),
|
||||
}
|
||||
if line.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
let trimmed = line.trim();
|
||||
if trimmed.starts_with("{") && trimmed.ends_with("}") {
|
||||
self.render_message(match serde_json::from_str(&trimmed) {
|
||||
Ok(parsed) => parsed,
|
||||
Err(err) => {
|
||||
panic!("failed to parse libtest json output; error: {err}, line: {line:?}");
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Handle non-JSON output, for example when --nocapture is passed.
|
||||
print!("{line}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
|
||||
self.executed_tests += 1;
|
||||
|
||||
#[cfg(feature = "build-metrics")]
|
||||
self.builder.metrics.record_test(
|
||||
&test.name,
|
||||
match outcome {
|
||||
Outcome::Ok | Outcome::BenchOk => crate::metrics::TestOutcome::Passed,
|
||||
Outcome::Failed => crate::metrics::TestOutcome::Failed,
|
||||
Outcome::Ignored { reason } => crate::metrics::TestOutcome::Ignored {
|
||||
ignore_reason: reason.map(|s| s.to_string()),
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if self.builder.config.verbose_tests {
|
||||
self.render_test_outcome_verbose(outcome, test);
|
||||
} else {
|
||||
self.render_test_outcome_terse(outcome, test);
|
||||
}
|
||||
}
|
||||
|
||||
fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) {
|
||||
print!("test {} ... ", test.name);
|
||||
self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
|
||||
if let Some(exec_time) = test.exec_time {
|
||||
print!(" ({exec_time:.2?})");
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, _: &TestOutcome) {
|
||||
if self.terse_tests_in_line != 0 && self.terse_tests_in_line % TERSE_TESTS_PER_LINE == 0 {
|
||||
if let Some(total) = self.tests_count {
|
||||
let total = total.to_string();
|
||||
let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len());
|
||||
print!(" {executed}/{total}");
|
||||
}
|
||||
println!();
|
||||
self.terse_tests_in_line = 0;
|
||||
}
|
||||
|
||||
self.terse_tests_in_line += 1;
|
||||
self.builder.colored_stdout(|stdout| outcome.write_short(stdout)).unwrap();
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) {
|
||||
// The terse output doesn't end with a newline, so we need to add it ourselves.
|
||||
if !self.builder.config.verbose_tests {
|
||||
println!();
|
||||
}
|
||||
|
||||
if !self.failures.is_empty() {
|
||||
println!("\nfailures:\n");
|
||||
for failure in &self.failures {
|
||||
if let Some(stdout) = &failure.stdout {
|
||||
println!("---- {} stdout ----", failure.name);
|
||||
println!("{stdout}");
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nfailures:");
|
||||
for failure in &self.failures {
|
||||
println!(" {}", failure.name);
|
||||
}
|
||||
}
|
||||
|
||||
if !self.benches.is_empty() {
|
||||
println!("\nbenchmarks:");
|
||||
|
||||
let mut rows = Vec::new();
|
||||
for bench in &self.benches {
|
||||
rows.push((
|
||||
&bench.name,
|
||||
format!("{:.2?}/iter", Duration::from_nanos(bench.median)),
|
||||
format!("+/- {:.2?}", Duration::from_nanos(bench.deviation)),
|
||||
));
|
||||
}
|
||||
|
||||
let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0);
|
||||
let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0);
|
||||
let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0);
|
||||
for row in &rows {
|
||||
println!(" {:<max_0$} {:>max_1$} {:>max_2$}", row.0, row.1, row.2);
|
||||
}
|
||||
}
|
||||
|
||||
print!("\ntest result: ");
|
||||
self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
|
||||
println!(
|
||||
". {} passed; {} failed; {} ignored; {} measured; {} filtered out; \
|
||||
finished in {:.2?}\n",
|
||||
suite.passed,
|
||||
suite.failed,
|
||||
suite.ignored,
|
||||
suite.measured,
|
||||
suite.filtered_out,
|
||||
Duration::from_secs_f64(suite.exec_time)
|
||||
);
|
||||
}
|
||||
|
||||
fn render_message(&mut self, message: Message) {
|
||||
match message {
|
||||
Message::Suite(SuiteMessage::Started { test_count }) => {
|
||||
println!("\nrunning {test_count} tests");
|
||||
self.executed_tests = 0;
|
||||
self.terse_tests_in_line = 0;
|
||||
self.tests_count = Some(test_count);
|
||||
}
|
||||
Message::Suite(SuiteMessage::Ok(outcome)) => {
|
||||
self.render_suite_outcome(Outcome::Ok, &outcome);
|
||||
}
|
||||
Message::Suite(SuiteMessage::Failed(outcome)) => {
|
||||
self.render_suite_outcome(Outcome::Failed, &outcome);
|
||||
}
|
||||
Message::Bench(outcome) => {
|
||||
// The formatting for benchmarks doesn't replicate 1:1 the formatting libtest
|
||||
// outputs, mostly because libtest's formatting is broken in terse mode, which is
|
||||
// the default used by our monorepo. We use a different formatting instead:
|
||||
// successful benchmarks are just showed as "benchmarked"/"b", and the details are
|
||||
// outputted at the bottom like failures.
|
||||
let fake_test_outcome = TestOutcome {
|
||||
name: outcome.name.clone(),
|
||||
exec_time: None,
|
||||
stdout: None,
|
||||
message: None,
|
||||
};
|
||||
self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome);
|
||||
self.benches.push(outcome);
|
||||
}
|
||||
Message::Test(TestMessage::Ok(outcome)) => {
|
||||
self.render_test_outcome(Outcome::Ok, &outcome);
|
||||
}
|
||||
Message::Test(TestMessage::Ignored(outcome)) => {
|
||||
self.render_test_outcome(
|
||||
Outcome::Ignored { reason: outcome.message.as_deref() },
|
||||
&outcome,
|
||||
);
|
||||
}
|
||||
Message::Test(TestMessage::Failed(outcome)) => {
|
||||
self.render_test_outcome(Outcome::Failed, &outcome);
|
||||
self.failures.push(outcome);
|
||||
}
|
||||
Message::Test(TestMessage::Timeout { name }) => {
|
||||
println!("test {name} has been running for a long time");
|
||||
}
|
||||
Message::Test(TestMessage::Started) => {} // Not useful
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum Outcome<'a> {
|
||||
Ok,
|
||||
BenchOk,
|
||||
Failed,
|
||||
Ignored { reason: Option<&'a str> },
|
||||
}
|
||||
|
||||
impl Outcome<'_> {
|
||||
fn write_short(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
|
||||
match self {
|
||||
Outcome::Ok => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Green)))?;
|
||||
write!(writer, ".")?;
|
||||
}
|
||||
Outcome::BenchOk => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Cyan)))?;
|
||||
write!(writer, "b")?;
|
||||
}
|
||||
Outcome::Failed => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Red)))?;
|
||||
write!(writer, "F")?;
|
||||
}
|
||||
Outcome::Ignored { .. } => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Yellow)))?;
|
||||
write!(writer, "i")?;
|
||||
}
|
||||
}
|
||||
writer.reset()
|
||||
}
|
||||
|
||||
fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
|
||||
match self {
|
||||
Outcome::Ok => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Green)))?;
|
||||
write!(writer, "ok")?;
|
||||
}
|
||||
Outcome::BenchOk => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Cyan)))?;
|
||||
write!(writer, "benchmarked")?;
|
||||
}
|
||||
Outcome::Failed => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Red)))?;
|
||||
write!(writer, "FAILED")?;
|
||||
}
|
||||
Outcome::Ignored { reason } => {
|
||||
writer.set_color(&ColorSpec::new().set_fg(Some(Color::Yellow)))?;
|
||||
write!(writer, "ignored")?;
|
||||
if let Some(reason) = reason {
|
||||
write!(writer, ", {reason}")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
writer.reset()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde_derive::Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
enum Message {
|
||||
Suite(SuiteMessage),
|
||||
Test(TestMessage),
|
||||
Bench(BenchOutcome),
|
||||
}
|
||||
|
||||
#[derive(serde_derive::Deserialize)]
|
||||
#[serde(tag = "event", rename_all = "snake_case")]
|
||||
enum SuiteMessage {
|
||||
Ok(SuiteOutcome),
|
||||
Failed(SuiteOutcome),
|
||||
Started { test_count: usize },
|
||||
}
|
||||
|
||||
#[derive(serde_derive::Deserialize)]
|
||||
struct SuiteOutcome {
|
||||
passed: usize,
|
||||
failed: usize,
|
||||
ignored: usize,
|
||||
measured: usize,
|
||||
filtered_out: usize,
|
||||
exec_time: f64,
|
||||
}
|
||||
|
||||
#[derive(serde_derive::Deserialize)]
|
||||
#[serde(tag = "event", rename_all = "snake_case")]
|
||||
enum TestMessage {
|
||||
Ok(TestOutcome),
|
||||
Failed(TestOutcome),
|
||||
Ignored(TestOutcome),
|
||||
Timeout { name: String },
|
||||
Started,
|
||||
}
|
||||
|
||||
#[derive(serde_derive::Deserialize)]
|
||||
struct BenchOutcome {
|
||||
name: String,
|
||||
median: u64,
|
||||
deviation: u64,
|
||||
}
|
||||
|
||||
#[derive(serde_derive::Deserialize)]
|
||||
struct TestOutcome {
|
||||
name: String,
|
||||
exec_time: Option<f64>,
|
||||
stdout: Option<String>,
|
||||
message: Option<String>,
|
||||
}
|
@ -20,6 +20,7 @@ use crate::dist;
|
||||
use crate::doc::DocumentationFormat;
|
||||
use crate::flags::Subcommand;
|
||||
use crate::native;
|
||||
use crate::render_tests::add_flags_and_try_run_tests;
|
||||
use crate::tool::{self, SourceType, Tool};
|
||||
use crate::toolstate::ToolState;
|
||||
use crate::util::{self, add_link_lib_path, dylib_path, dylib_path_var, output, t};
|
||||
@ -123,7 +124,7 @@ impl Step for CrateJsonDocLint {
|
||||
SourceType::InTree,
|
||||
&[],
|
||||
);
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,7 +173,7 @@ You can skip linkcheck with --exclude src/tools/linkchecker"
|
||||
SourceType::InTree,
|
||||
&[],
|
||||
);
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
|
||||
// Build all the default documentation.
|
||||
builder.default_doc(&[]);
|
||||
@ -333,7 +334,7 @@ impl Step for Cargo {
|
||||
|
||||
cargo.env("PATH", &path_for_cargo(builder, compiler));
|
||||
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -392,7 +393,7 @@ impl Step for RustAnalyzer {
|
||||
cargo.add_rustc_lib_path(builder, compiler);
|
||||
cargo.arg("--").args(builder.config.cmd.test_args());
|
||||
|
||||
builder.run(&mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -445,7 +446,7 @@ impl Step for Rustfmt {
|
||||
|
||||
cargo.add_rustc_lib_path(builder, compiler);
|
||||
|
||||
builder.run(&mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -496,7 +497,7 @@ impl Step for RustDemangler {
|
||||
|
||||
cargo.add_rustc_lib_path(builder, compiler);
|
||||
|
||||
builder.run(&mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -637,8 +638,7 @@ impl Step for Miri {
|
||||
// Forward test filters.
|
||||
cargo.arg("--").args(builder.config.cmd.test_args());
|
||||
|
||||
let mut cargo = Command::from(cargo);
|
||||
builder.run(&mut cargo);
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
|
||||
// # Run `cargo miri test`.
|
||||
// This is just a smoke test (Miri's own CI invokes this in a bunch of different ways and ensures
|
||||
@ -711,7 +711,7 @@ impl Step for CompiletestTest {
|
||||
);
|
||||
cargo.allow_features("test");
|
||||
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1193,7 +1193,7 @@ impl Step for TidySelfTest {
|
||||
SourceType::InTree,
|
||||
&[],
|
||||
);
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1620,9 +1620,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the
|
||||
cmd.arg("--verbose");
|
||||
}
|
||||
|
||||
if !builder.config.verbose_tests {
|
||||
cmd.arg("--quiet");
|
||||
}
|
||||
cmd.arg("--json");
|
||||
|
||||
let mut llvm_components_passed = false;
|
||||
let mut copts_passed = false;
|
||||
@ -1773,7 +1771,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the
|
||||
suite, mode, &compiler.host, target
|
||||
));
|
||||
let _time = util::timeit(&builder);
|
||||
try_run(builder, &mut cmd);
|
||||
crate::render_tests::try_run_tests(builder, &mut cmd);
|
||||
|
||||
if let Some(compare_mode) = compare_mode {
|
||||
cmd.arg("--compare-mode").arg(compare_mode);
|
||||
@ -1782,7 +1780,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the
|
||||
suite, mode, compare_mode, &compiler.host, target
|
||||
));
|
||||
let _time = util::timeit(&builder);
|
||||
try_run(builder, &mut cmd);
|
||||
crate::render_tests::try_run_tests(builder, &mut cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2184,9 +2182,8 @@ impl Step for Crate {
|
||||
cargo.arg("--");
|
||||
cargo.args(&builder.config.cmd.test_args());
|
||||
|
||||
if !builder.config.verbose_tests {
|
||||
cargo.arg("--quiet");
|
||||
}
|
||||
cargo.arg("-Z").arg("unstable-options");
|
||||
cargo.arg("--format").arg("json");
|
||||
|
||||
if target.contains("emscripten") {
|
||||
cargo.env(
|
||||
@ -2214,7 +2211,7 @@ impl Step for Crate {
|
||||
target
|
||||
));
|
||||
let _time = util::timeit(&builder);
|
||||
try_run(builder, &mut cargo.into());
|
||||
crate::render_tests::try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2334,7 +2331,7 @@ impl Step for CrateRustdoc {
|
||||
));
|
||||
let _time = util::timeit(&builder);
|
||||
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2395,17 +2392,13 @@ impl Step for CrateRustdocJsonTypes {
|
||||
cargo.arg("'-Ctarget-feature=-crt-static'");
|
||||
}
|
||||
|
||||
if !builder.config.verbose_tests {
|
||||
cargo.arg("--quiet");
|
||||
}
|
||||
|
||||
builder.info(&format!(
|
||||
"{} rustdoc-json-types stage{} ({} -> {})",
|
||||
test_kind, compiler.stage, &compiler.host, target
|
||||
));
|
||||
let _time = util::timeit(&builder);
|
||||
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2574,7 +2567,7 @@ impl Step for Bootstrap {
|
||||
// rustbuild tests are racy on directory creation so just run them one at a time.
|
||||
// Since there's not many this shouldn't be a problem.
|
||||
cmd.arg("--test-threads=1");
|
||||
try_run(builder, &mut cmd);
|
||||
add_flags_and_try_run_tests(builder, &mut cmd);
|
||||
}
|
||||
|
||||
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
|
||||
@ -2655,7 +2648,7 @@ impl Step for ReplacePlaceholderTest {
|
||||
SourceType::InTree,
|
||||
&[],
|
||||
);
|
||||
try_run(builder, &mut cargo.into());
|
||||
add_flags_and_try_run_tests(builder, &mut cargo.into());
|
||||
}
|
||||
|
||||
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
|
||||
|
@ -9,7 +9,7 @@ use std::str::FromStr;
|
||||
|
||||
use crate::util::{add_dylib_path, PathBufExt};
|
||||
use lazycell::LazyCell;
|
||||
use test::ColorConfig;
|
||||
use test::{ColorConfig, OutputFormat};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||
pub enum Mode {
|
||||
@ -337,7 +337,7 @@ pub struct Config {
|
||||
pub verbose: bool,
|
||||
|
||||
/// Print one character per test instead of one line
|
||||
pub quiet: bool,
|
||||
pub format: OutputFormat,
|
||||
|
||||
/// Whether to use colors in test.
|
||||
pub color: ColorConfig,
|
||||
|
@ -114,6 +114,7 @@ pub fn parse_config(args: Vec<String>) -> Config {
|
||||
)
|
||||
.optflag("", "quiet", "print one character per test instead of one line")
|
||||
.optopt("", "color", "coloring: auto, always, never", "WHEN")
|
||||
.optflag("", "json", "emit json output instead of plaintext output")
|
||||
.optopt("", "logfile", "file to log test execution to", "FILE")
|
||||
.optopt("", "target", "the target to build for", "TARGET")
|
||||
.optopt("", "host", "the host to build for", "HOST")
|
||||
@ -281,7 +282,12 @@ pub fn parse_config(args: Vec<String>) -> Config {
|
||||
&& !opt_str2(matches.opt_str("adb-test-dir")).is_empty(),
|
||||
lldb_python_dir: matches.opt_str("lldb-python-dir"),
|
||||
verbose: matches.opt_present("verbose"),
|
||||
quiet: matches.opt_present("quiet"),
|
||||
format: match (matches.opt_present("quiet"), matches.opt_present("json")) {
|
||||
(true, true) => panic!("--quiet and --json are incompatible"),
|
||||
(true, false) => test::OutputFormat::Terse,
|
||||
(false, true) => test::OutputFormat::Json,
|
||||
(false, false) => test::OutputFormat::Pretty,
|
||||
},
|
||||
only_modified: matches.opt_present("only-modified"),
|
||||
color,
|
||||
remote_test_client: matches.opt_str("remote-test-client").map(PathBuf::from),
|
||||
@ -339,7 +345,7 @@ pub fn log_config(config: &Config) {
|
||||
logv(c, format!("ar: {}", config.ar));
|
||||
logv(c, format!("linker: {:?}", config.linker));
|
||||
logv(c, format!("verbose: {}", config.verbose));
|
||||
logv(c, format!("quiet: {}", config.quiet));
|
||||
logv(c, format!("format: {:?}", config.format));
|
||||
logv(c, "\n".to_string());
|
||||
}
|
||||
|
||||
@ -416,7 +422,7 @@ pub fn run_tests(config: Config) {
|
||||
// easy to miss which tests failed, and as such fail to reproduce
|
||||
// the failure locally.
|
||||
|
||||
eprintln!(
|
||||
println!(
|
||||
"Some tests failed in compiletest suite={}{} mode={} host={} target={}",
|
||||
config.suite,
|
||||
config.compare_mode.map(|c| format!(" compare_mode={:?}", c)).unwrap_or_default(),
|
||||
@ -501,7 +507,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts {
|
||||
filters: config.filters.clone(),
|
||||
filter_exact: config.filter_exact,
|
||||
run_ignored: if config.run_ignored { test::RunIgnored::Yes } else { test::RunIgnored::No },
|
||||
format: if config.quiet { test::OutputFormat::Terse } else { test::OutputFormat::Pretty },
|
||||
format: config.format,
|
||||
logfile: config.logfile.clone(),
|
||||
run_tests: true,
|
||||
bench_benchmarks: true,
|
||||
|
Loading…
Reference in New Issue
Block a user