mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 14:55:26 +00:00
Auto merge of #79867 - tmandry:rollup-7mubs3b, r=tmandry
Rollup of 12 pull requests Successful merges: - #79732 (minor stylistic clippy cleanups) - #79750 (Fix trimming of lint docs) - #79777 (Remove `first_merge` from liveness debug logs) - #79795 (Privatize some of libcore unicode_internals) - #79803 (Update xsv to prevent random CI failures) - #79810 (Account for gaps in def path table during decoding) - #79818 (Fixes to Rust coverage) - #79824 (Strip prefix instead of replacing it with empty string) - #79826 (Simplify visit_{foreign,trait}_item) - #79844 (Move RWUTable to a separate module) - #79861 (Update LLVM submodule) - #79862 (Remove tab-lock and replace it with ctrl+up/down arrows to switch between search result tabs) Failed merges: r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
1cc4107109
@ -241,7 +241,7 @@ fn save_function_record(
|
||||
/// (functions referenced by other "used" or public items). Any other functions considered unused,
|
||||
/// or "Unreachable" were still parsed and processed through the MIR stage.
|
||||
///
|
||||
/// We can find the unreachable functions by the set different of all MIR `DefId`s (`tcx` query
|
||||
/// We can find the unreachable functions by the set difference of all MIR `DefId`s (`tcx` query
|
||||
/// `mir_keys`) minus the codegenned `DefId`s (`tcx` query `collect_and_partition_mono_items`).
|
||||
///
|
||||
/// *HOWEVER* the codegenned `DefId`s are partitioned across multiple `CodegenUnit`s (CGUs), and
|
||||
|
@ -121,7 +121,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
|
||||
|
||||
(Some(ret_span), _) => {
|
||||
let sup_future = self.future_return_type(scope_def_id_sup);
|
||||
let (return_type, action) = if let Some(_) = sup_future {
|
||||
let (return_type, action) = if sup_future.is_some() {
|
||||
("returned future", "held across an await point")
|
||||
} else {
|
||||
("return type", "returned")
|
||||
@ -140,7 +140,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
|
||||
}
|
||||
(_, Some(ret_span)) => {
|
||||
let sub_future = self.future_return_type(scope_def_id_sub);
|
||||
let (return_type, action) = if let Some(_) = sub_future {
|
||||
let (return_type, action) = if sub_future.is_some() {
|
||||
("returned future", "held across an await point")
|
||||
} else {
|
||||
("return type", "returned")
|
||||
|
@ -131,7 +131,7 @@ impl NonCamelCaseTypes {
|
||||
let cc = to_camel_case(name);
|
||||
// We cannot provide meaningful suggestions
|
||||
// if the characters are in the category of "Lowercase Letter".
|
||||
if name.to_string() != cc {
|
||||
if *name != cc {
|
||||
err.span_suggestion(
|
||||
ident.span,
|
||||
"convert the identifier to upper camel case",
|
||||
@ -271,7 +271,7 @@ impl NonSnakeCase {
|
||||
let mut err = lint.build(&msg);
|
||||
// We cannot provide meaningful suggestions
|
||||
// if the characters are in the category of "Uppercase Letter".
|
||||
if name.to_string() != sc {
|
||||
if *name != sc {
|
||||
// We have a valid span in almost all cases, but we don't have one when linting a crate
|
||||
// name provided via the command line.
|
||||
if !ident.span.is_dummy() {
|
||||
@ -455,7 +455,7 @@ impl NonUpperCaseGlobals {
|
||||
lint.build(&format!("{} `{}` should have an upper case name", sort, name));
|
||||
// We cannot provide meaningful suggestions
|
||||
// if the characters are in the category of "Lowercase Letter".
|
||||
if name.to_string() != uc {
|
||||
if *name != uc {
|
||||
err.span_suggestion(
|
||||
ident.span,
|
||||
"convert the identifier to upper case",
|
||||
|
@ -1553,6 +1553,8 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
|
||||
return Some(DefId { krate, index: def_index_guess });
|
||||
}
|
||||
|
||||
let is_proc_macro = self.is_proc_macro_crate();
|
||||
|
||||
// Slow path: We need to find out the new `DefIndex` of the provided
|
||||
// `DefPathHash`, if its still exists. This requires decoding every `DefPathHash`
|
||||
// stored in this crate.
|
||||
@ -1561,9 +1563,12 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
|
||||
let mut map = FxHashMap::with_capacity_and_hasher(end_id as usize, Default::default());
|
||||
for i in 0..end_id {
|
||||
let def_index = DefIndex::from_u32(i);
|
||||
let hash =
|
||||
self.root.tables.def_path_hashes.get(self, def_index).unwrap().decode(self);
|
||||
map.insert(hash, def_index);
|
||||
// There may be gaps in the encoded table if we're decoding a proc-macro crate
|
||||
if let Some(hash) = self.root.tables.def_path_hashes.get(self, def_index) {
|
||||
map.insert(hash.decode(self), def_index);
|
||||
} else if !is_proc_macro {
|
||||
panic!("Missing def_path_hashes entry for {:?}", def_index);
|
||||
}
|
||||
}
|
||||
map
|
||||
});
|
||||
|
@ -445,7 +445,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
|
||||
"highlight_if_we_cannot_match_hir_ty: type_name={:?} needle_fr={:?}",
|
||||
type_name, needle_fr
|
||||
);
|
||||
if type_name.find(&format!("'{}", counter)).is_some() {
|
||||
if type_name.contains(&format!("'{}", counter)) {
|
||||
// Only add a label if we can confirm that a region was labelled.
|
||||
RegionNameHighlight::CannotMatchHirTy(span, type_name)
|
||||
} else {
|
||||
|
@ -33,7 +33,7 @@ impl CoverageGraph {
|
||||
// Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
|
||||
// equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
|
||||
// each predecessor of a BCB leader_bb should be in a unique BCB, and each successor of a
|
||||
// BCB last_bb should bin in its own unique BCB. Therefore, collecting the BCBs using
|
||||
// BCB last_bb should be in its own unique BCB. Therefore, collecting the BCBs using
|
||||
// `bb_to_bcb` should work without requiring a deduplication step.
|
||||
|
||||
let successors = IndexVec::from_fn_n(
|
||||
@ -283,7 +283,9 @@ rustc_index::newtype_index! {
|
||||
}
|
||||
}
|
||||
|
||||
/// A BasicCoverageBlockData (BCB) represents the maximal-length sequence of MIR BasicBlocks without
|
||||
/// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
|
||||
///
|
||||
/// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
|
||||
/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
|
||||
/// altering the original MIR CFG.
|
||||
///
|
||||
|
@ -88,6 +88,7 @@ struct Instrumentor<'a, 'tcx> {
|
||||
pass_name: &'a str,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
mir_body: &'a mut mir::Body<'tcx>,
|
||||
source_file: Lrc<SourceFile>,
|
||||
fn_sig_span: Span,
|
||||
body_span: Span,
|
||||
basic_coverage_blocks: CoverageGraph,
|
||||
@ -96,9 +97,13 @@ struct Instrumentor<'a, 'tcx> {
|
||||
|
||||
impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
|
||||
fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
|
||||
let source_map = tcx.sess.source_map();
|
||||
let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, mir_body.source.def_id());
|
||||
let body_span = hir_body.value.span;
|
||||
let fn_sig_span = match some_fn_sig {
|
||||
let source_file = source_map.lookup_source_file(body_span.lo());
|
||||
let fn_sig_span = match some_fn_sig.filter(|fn_sig| {
|
||||
Lrc::ptr_eq(&source_file, &source_map.lookup_source_file(fn_sig.span.hi()))
|
||||
}) {
|
||||
Some(fn_sig) => fn_sig.span.with_hi(body_span.lo()),
|
||||
None => body_span.shrink_to_lo(),
|
||||
};
|
||||
@ -108,6 +113,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
|
||||
pass_name,
|
||||
tcx,
|
||||
mir_body,
|
||||
source_file,
|
||||
fn_sig_span,
|
||||
body_span,
|
||||
basic_coverage_blocks,
|
||||
@ -268,8 +274,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
|
||||
let tcx = self.tcx;
|
||||
let source_map = tcx.sess.source_map();
|
||||
let body_span = self.body_span;
|
||||
let source_file = source_map.lookup_source_file(body_span.lo());
|
||||
let file_name = Symbol::intern(&source_file.name.to_string());
|
||||
let file_name = Symbol::intern(&self.source_file.name.to_string());
|
||||
|
||||
let mut bcb_counters = IndexVec::from_elem_n(None, self.basic_coverage_blocks.num_nodes());
|
||||
for covspan in coverage_spans {
|
||||
@ -285,11 +290,20 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
|
||||
bug!("Every BasicCoverageBlock should have a Counter or Expression");
|
||||
};
|
||||
graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind);
|
||||
|
||||
debug!(
|
||||
"Calling make_code_region(file_name={}, source_file={:?}, span={}, body_span={})",
|
||||
file_name,
|
||||
self.source_file,
|
||||
source_map.span_to_string(span),
|
||||
source_map.span_to_string(body_span)
|
||||
);
|
||||
|
||||
inject_statement(
|
||||
self.mir_body,
|
||||
counter_kind,
|
||||
self.bcb_last_bb(bcb),
|
||||
Some(make_code_region(file_name, &source_file, span, body_span)),
|
||||
Some(make_code_region(file_name, &self.source_file, span, body_span)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -217,6 +217,27 @@ pub struct CoverageSpans<'a, 'tcx> {
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
|
||||
/// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
|
||||
/// counted.
|
||||
///
|
||||
/// The basic steps are:
|
||||
///
|
||||
/// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each
|
||||
/// `BasicCoverageBlockData`.
|
||||
/// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position
|
||||
/// are sorted with longer spans before shorter spans; and equal spans are sorted
|
||||
/// (deterministically) based on "dominator" relationship (if any).
|
||||
/// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance,
|
||||
/// if another span or spans are already counting the same code region), or should be merged
|
||||
/// into a broader combined span (because it represents a contiguous, non-branching, and
|
||||
/// uninterrupted region of source code).
|
||||
///
|
||||
/// Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since
|
||||
/// closures have their own MIR, their `Span` in their enclosing function should be left
|
||||
/// "uncovered".
|
||||
///
|
||||
/// Note the resulting vector of `CoverageSpan`s may not be fully sorted (and does not need
|
||||
/// to be).
|
||||
pub(super) fn generate_coverage_spans(
|
||||
mir_body: &'a mir::Body<'tcx>,
|
||||
fn_sig_span: Span,
|
||||
@ -247,27 +268,6 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
|
||||
coverage_spans.to_refined_spans()
|
||||
}
|
||||
|
||||
/// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
|
||||
/// counted.
|
||||
///
|
||||
/// The basic steps are:
|
||||
///
|
||||
/// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each
|
||||
/// `BasicCoverageBlockData`.
|
||||
/// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position
|
||||
/// are sorted with longer spans before shorter spans; and equal spans are sorted
|
||||
/// (deterministically) based on "dominator" relationship (if any).
|
||||
/// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance,
|
||||
/// if another span or spans are already counting the same code region), or should be merged
|
||||
/// into a broader combined span (because it represents a contiguous, non-branching, and
|
||||
/// uninterrupted region of source code).
|
||||
///
|
||||
/// Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since
|
||||
/// closures have their own MIR, their `Span` in their enclosing function should be left
|
||||
/// "uncovered".
|
||||
///
|
||||
/// Note the resulting vector of `CoverageSpan`s does may not be fully sorted (and does not need
|
||||
/// to be).
|
||||
fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> {
|
||||
let mut initial_spans = Vec::<CoverageSpan>::with_capacity(self.mir_body.num_nodes() * 2);
|
||||
for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() {
|
||||
|
@ -423,15 +423,11 @@ impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> {
|
||||
}
|
||||
|
||||
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem<'_>) {
|
||||
match trait_item.kind {
|
||||
hir::TraitItemKind::Const(_, Some(_))
|
||||
| hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)) => {
|
||||
if has_allow_dead_code_or_lang_attr(self.tcx, trait_item.hir_id, &trait_item.attrs)
|
||||
{
|
||||
self.worklist.push(trait_item.hir_id);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
use hir::TraitItemKind::{Const, Fn};
|
||||
if matches!(trait_item.kind, Const(_, Some(_)) | Fn(_, hir::TraitFn::Provided(_)))
|
||||
&& has_allow_dead_code_or_lang_attr(self.tcx, trait_item.hir_id, &trait_item.attrs)
|
||||
{
|
||||
self.worklist.push(trait_item.hir_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -440,17 +436,11 @@ impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> {
|
||||
}
|
||||
|
||||
fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem<'_>) {
|
||||
match foreign_item.kind {
|
||||
hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Fn(..) => {
|
||||
if has_allow_dead_code_or_lang_attr(
|
||||
self.tcx,
|
||||
foreign_item.hir_id,
|
||||
&foreign_item.attrs,
|
||||
) {
|
||||
self.worklist.push(foreign_item.hir_id);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
use hir::ForeignItemKind::{Fn, Static};
|
||||
if matches!(foreign_item.kind, Static(..) | Fn(..))
|
||||
&& has_allow_dead_code_or_lang_attr(self.tcx, foreign_item.hir_id, &foreign_item.attrs)
|
||||
{
|
||||
self.worklist.push(foreign_item.hir_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -105,6 +105,8 @@ use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::rc::Rc;
|
||||
|
||||
mod rwu_table;
|
||||
|
||||
rustc_index::newtype_index! {
|
||||
pub struct Variable {
|
||||
DEBUG_FORMAT = "v({})",
|
||||
@ -468,149 +470,6 @@ impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> {
|
||||
// Actually we compute just a bit more than just liveness, but we use
|
||||
// the same basic propagation framework in all cases.
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct RWU {
|
||||
reader: bool,
|
||||
writer: bool,
|
||||
used: bool,
|
||||
}
|
||||
|
||||
/// Conceptually, this is like a `Vec<Vec<RWU>>`. But the number of
|
||||
/// RWU`s can get very large, so it uses a more compact representation.
|
||||
struct RWUTable {
|
||||
/// Total number of live nodes.
|
||||
live_nodes: usize,
|
||||
/// Total number of variables.
|
||||
vars: usize,
|
||||
|
||||
/// A compressed representation of `RWU`s.
|
||||
///
|
||||
/// Each word represents 2 different `RWU`s packed together. Each packed RWU
|
||||
/// is stored in 4 bits: a reader bit, a writer bit, a used bit and a
|
||||
/// padding bit.
|
||||
///
|
||||
/// The data for each live node is contiguous and starts at a word boundary,
|
||||
/// so there might be an unused space left.
|
||||
words: Vec<u8>,
|
||||
/// Number of words per each live node.
|
||||
live_node_words: usize,
|
||||
}
|
||||
|
||||
impl RWUTable {
|
||||
const RWU_READER: u8 = 0b0001;
|
||||
const RWU_WRITER: u8 = 0b0010;
|
||||
const RWU_USED: u8 = 0b0100;
|
||||
const RWU_MASK: u8 = 0b1111;
|
||||
|
||||
/// Size of packed RWU in bits.
|
||||
const RWU_BITS: usize = 4;
|
||||
/// Size of a word in bits.
|
||||
const WORD_BITS: usize = std::mem::size_of::<u8>() * 8;
|
||||
/// Number of packed RWUs that fit into a single word.
|
||||
const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS;
|
||||
|
||||
fn new(live_nodes: usize, vars: usize) -> RWUTable {
|
||||
let live_node_words = (vars + Self::WORD_RWU_COUNT - 1) / Self::WORD_RWU_COUNT;
|
||||
Self { live_nodes, vars, live_node_words, words: vec![0u8; live_node_words * live_nodes] }
|
||||
}
|
||||
|
||||
fn word_and_shift(&self, ln: LiveNode, var: Variable) -> (usize, u32) {
|
||||
assert!(ln.index() < self.live_nodes);
|
||||
assert!(var.index() < self.vars);
|
||||
|
||||
let var = var.index();
|
||||
let word = var / Self::WORD_RWU_COUNT;
|
||||
let shift = Self::RWU_BITS * (var % Self::WORD_RWU_COUNT);
|
||||
(ln.index() * self.live_node_words + word, shift as u32)
|
||||
}
|
||||
|
||||
fn pick2_rows_mut(&mut self, a: LiveNode, b: LiveNode) -> (&mut [u8], &mut [u8]) {
|
||||
assert!(a.index() < self.live_nodes);
|
||||
assert!(b.index() < self.live_nodes);
|
||||
assert!(a != b);
|
||||
|
||||
let a_start = a.index() * self.live_node_words;
|
||||
let b_start = b.index() * self.live_node_words;
|
||||
|
||||
unsafe {
|
||||
let ptr = self.words.as_mut_ptr();
|
||||
(
|
||||
std::slice::from_raw_parts_mut(ptr.add(a_start), self.live_node_words),
|
||||
std::slice::from_raw_parts_mut(ptr.add(b_start), self.live_node_words),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn copy(&mut self, dst: LiveNode, src: LiveNode) {
|
||||
if dst == src {
|
||||
return;
|
||||
}
|
||||
|
||||
let (dst_row, src_row) = self.pick2_rows_mut(dst, src);
|
||||
dst_row.copy_from_slice(src_row);
|
||||
}
|
||||
|
||||
/// Sets `dst` to the union of `dst` and `src`, returns true if `dst` was
|
||||
/// changed.
|
||||
fn union(&mut self, dst: LiveNode, src: LiveNode) -> bool {
|
||||
if dst == src {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut changed = false;
|
||||
let (dst_row, src_row) = self.pick2_rows_mut(dst, src);
|
||||
for (dst_word, src_word) in dst_row.iter_mut().zip(src_row.iter()) {
|
||||
let old = *dst_word;
|
||||
let new = *dst_word | src_word;
|
||||
*dst_word = new;
|
||||
changed |= old != new;
|
||||
}
|
||||
changed
|
||||
}
|
||||
|
||||
fn get_reader(&self, ln: LiveNode, var: Variable) -> bool {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
(self.words[word] >> shift) & Self::RWU_READER != 0
|
||||
}
|
||||
|
||||
fn get_writer(&self, ln: LiveNode, var: Variable) -> bool {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
(self.words[word] >> shift) & Self::RWU_WRITER != 0
|
||||
}
|
||||
|
||||
fn get_used(&self, ln: LiveNode, var: Variable) -> bool {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
(self.words[word] >> shift) & Self::RWU_USED != 0
|
||||
}
|
||||
|
||||
fn get(&self, ln: LiveNode, var: Variable) -> RWU {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
let rwu_packed = self.words[word] >> shift;
|
||||
RWU {
|
||||
reader: rwu_packed & Self::RWU_READER != 0,
|
||||
writer: rwu_packed & Self::RWU_WRITER != 0,
|
||||
used: rwu_packed & Self::RWU_USED != 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn set(&mut self, ln: LiveNode, var: Variable, rwu: RWU) {
|
||||
let mut packed = 0;
|
||||
if rwu.reader {
|
||||
packed |= Self::RWU_READER;
|
||||
}
|
||||
if rwu.writer {
|
||||
packed |= Self::RWU_WRITER;
|
||||
}
|
||||
if rwu.used {
|
||||
packed |= Self::RWU_USED;
|
||||
}
|
||||
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
let word = &mut self.words[word];
|
||||
*word = (*word & !(Self::RWU_MASK << shift)) | (packed << shift)
|
||||
}
|
||||
}
|
||||
|
||||
const ACC_READ: u32 = 1;
|
||||
const ACC_WRITE: u32 = 2;
|
||||
const ACC_USE: u32 = 4;
|
||||
@ -623,7 +482,7 @@ struct Liveness<'a, 'tcx> {
|
||||
upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
|
||||
closure_captures: Option<&'tcx FxIndexMap<hir::HirId, ty::UpvarId>>,
|
||||
successors: IndexVec<LiveNode, Option<LiveNode>>,
|
||||
rwu_table: RWUTable,
|
||||
rwu_table: rwu_table::RWUTable,
|
||||
|
||||
/// A live node representing a point of execution before closure entry &
|
||||
/// after closure exit. Used to calculate liveness of captured variables
|
||||
@ -661,7 +520,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
upvars,
|
||||
closure_captures,
|
||||
successors: IndexVec::from_elem_n(None, num_live_nodes),
|
||||
rwu_table: RWUTable::new(num_live_nodes, num_vars),
|
||||
rwu_table: rwu_table::RWUTable::new(num_live_nodes, num_vars),
|
||||
closure_ln,
|
||||
exit_ln,
|
||||
break_ln: Default::default(),
|
||||
@ -781,19 +640,13 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
debug!("init_from_succ(ln={}, succ={})", self.ln_str(ln), self.ln_str(succ_ln));
|
||||
}
|
||||
|
||||
fn merge_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode, first_merge: bool) -> bool {
|
||||
fn merge_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode) -> bool {
|
||||
if ln == succ_ln {
|
||||
return false;
|
||||
}
|
||||
|
||||
let changed = self.rwu_table.union(ln, succ_ln);
|
||||
debug!(
|
||||
"merge_from_succ(ln={:?}, succ={}, first_merge={}, changed={})",
|
||||
ln,
|
||||
self.ln_str(succ_ln),
|
||||
first_merge,
|
||||
changed
|
||||
);
|
||||
debug!("merge_from_succ(ln={:?}, succ={}, changed={})", ln, self.ln_str(succ_ln), changed);
|
||||
changed
|
||||
}
|
||||
|
||||
@ -802,7 +655,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
// this) so we just clear out all the data.
|
||||
fn define(&mut self, writer: LiveNode, var: Variable) {
|
||||
let used = self.rwu_table.get_used(writer, var);
|
||||
self.rwu_table.set(writer, var, RWU { reader: false, writer: false, used });
|
||||
self.rwu_table.set(writer, var, rwu_table::RWU { reader: false, writer: false, used });
|
||||
debug!("{:?} defines {:?}: {}", writer, var, self.ln_str(writer));
|
||||
}
|
||||
|
||||
@ -893,7 +746,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
};
|
||||
|
||||
// Propagate through calls to the closure.
|
||||
let mut first_merge = true;
|
||||
loop {
|
||||
self.init_from_succ(self.closure_ln, succ);
|
||||
for param in body.params {
|
||||
@ -903,10 +755,9 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
})
|
||||
}
|
||||
|
||||
if !self.merge_from_succ(self.exit_ln, self.closure_ln, first_merge) {
|
||||
if !self.merge_from_succ(self.exit_ln, self.closure_ln) {
|
||||
break;
|
||||
}
|
||||
first_merge = false;
|
||||
assert_eq!(succ, self.propagate_through_expr(&body.value, self.exit_ln));
|
||||
}
|
||||
|
||||
@ -1012,7 +863,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
//
|
||||
let ln = self.live_node(expr.hir_id, expr.span);
|
||||
self.init_empty(ln, succ);
|
||||
let mut first_merge = true;
|
||||
for arm in arms {
|
||||
let body_succ = self.propagate_through_expr(&arm.body, succ);
|
||||
|
||||
@ -1021,8 +871,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
body_succ,
|
||||
);
|
||||
let arm_succ = self.define_bindings_in_pat(&arm.pat, guard_succ);
|
||||
self.merge_from_succ(ln, arm_succ, first_merge);
|
||||
first_merge = false;
|
||||
self.merge_from_succ(ln, arm_succ);
|
||||
}
|
||||
self.propagate_through_expr(&e, ln)
|
||||
}
|
||||
@ -1133,7 +982,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
|
||||
let ln = self.live_node(expr.hir_id, expr.span);
|
||||
self.init_from_succ(ln, succ);
|
||||
self.merge_from_succ(ln, r_succ, false);
|
||||
self.merge_from_succ(ln, r_succ);
|
||||
|
||||
self.propagate_through_expr(&l, ln)
|
||||
}
|
||||
@ -1377,7 +1226,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
*/
|
||||
|
||||
// first iteration:
|
||||
let mut first_merge = true;
|
||||
let ln = self.live_node(expr.hir_id, expr.span);
|
||||
self.init_empty(ln, succ);
|
||||
debug!("propagate_through_loop: using id for loop body {} {:?}", expr.hir_id, body);
|
||||
@ -1389,8 +1237,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
|
||||
let body_ln = self.propagate_through_block(body, ln);
|
||||
|
||||
// repeat until fixed point is reached:
|
||||
while self.merge_from_succ(ln, body_ln, first_merge) {
|
||||
first_merge = false;
|
||||
while self.merge_from_succ(ln, body_ln) {
|
||||
assert_eq!(body_ln, self.propagate_through_block(body, ln));
|
||||
}
|
||||
|
||||
|
144
compiler/rustc_passes/src/liveness/rwu_table.rs
Normal file
144
compiler/rustc_passes/src/liveness/rwu_table.rs
Normal file
@ -0,0 +1,144 @@
|
||||
use crate::liveness::{LiveNode, Variable};
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(super) struct RWU {
|
||||
pub(super) reader: bool,
|
||||
pub(super) writer: bool,
|
||||
pub(super) used: bool,
|
||||
}
|
||||
|
||||
/// Conceptually, this is like a `Vec<Vec<RWU>>`. But the number of
|
||||
/// RWU`s can get very large, so it uses a more compact representation.
|
||||
pub(super) struct RWUTable {
|
||||
/// Total number of live nodes.
|
||||
live_nodes: usize,
|
||||
/// Total number of variables.
|
||||
vars: usize,
|
||||
|
||||
/// A compressed representation of `RWU`s.
|
||||
///
|
||||
/// Each word represents 2 different `RWU`s packed together. Each packed RWU
|
||||
/// is stored in 4 bits: a reader bit, a writer bit, a used bit and a
|
||||
/// padding bit.
|
||||
///
|
||||
/// The data for each live node is contiguous and starts at a word boundary,
|
||||
/// so there might be an unused space left.
|
||||
words: Vec<u8>,
|
||||
/// Number of words per each live node.
|
||||
live_node_words: usize,
|
||||
}
|
||||
|
||||
impl RWUTable {
|
||||
const RWU_READER: u8 = 0b0001;
|
||||
const RWU_WRITER: u8 = 0b0010;
|
||||
const RWU_USED: u8 = 0b0100;
|
||||
const RWU_MASK: u8 = 0b1111;
|
||||
|
||||
/// Size of packed RWU in bits.
|
||||
const RWU_BITS: usize = 4;
|
||||
/// Size of a word in bits.
|
||||
const WORD_BITS: usize = std::mem::size_of::<u8>() * 8;
|
||||
/// Number of packed RWUs that fit into a single word.
|
||||
const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS;
|
||||
|
||||
pub(super) fn new(live_nodes: usize, vars: usize) -> RWUTable {
|
||||
let live_node_words = (vars + Self::WORD_RWU_COUNT - 1) / Self::WORD_RWU_COUNT;
|
||||
Self { live_nodes, vars, live_node_words, words: vec![0u8; live_node_words * live_nodes] }
|
||||
}
|
||||
|
||||
fn word_and_shift(&self, ln: LiveNode, var: Variable) -> (usize, u32) {
|
||||
assert!(ln.index() < self.live_nodes);
|
||||
assert!(var.index() < self.vars);
|
||||
|
||||
let var = var.index();
|
||||
let word = var / Self::WORD_RWU_COUNT;
|
||||
let shift = Self::RWU_BITS * (var % Self::WORD_RWU_COUNT);
|
||||
(ln.index() * self.live_node_words + word, shift as u32)
|
||||
}
|
||||
|
||||
fn pick2_rows_mut(&mut self, a: LiveNode, b: LiveNode) -> (&mut [u8], &mut [u8]) {
|
||||
assert!(a.index() < self.live_nodes);
|
||||
assert!(b.index() < self.live_nodes);
|
||||
assert!(a != b);
|
||||
|
||||
let a_start = a.index() * self.live_node_words;
|
||||
let b_start = b.index() * self.live_node_words;
|
||||
|
||||
unsafe {
|
||||
let ptr = self.words.as_mut_ptr();
|
||||
(
|
||||
std::slice::from_raw_parts_mut(ptr.add(a_start), self.live_node_words),
|
||||
std::slice::from_raw_parts_mut(ptr.add(b_start), self.live_node_words),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn copy(&mut self, dst: LiveNode, src: LiveNode) {
|
||||
if dst == src {
|
||||
return;
|
||||
}
|
||||
|
||||
let (dst_row, src_row) = self.pick2_rows_mut(dst, src);
|
||||
dst_row.copy_from_slice(src_row);
|
||||
}
|
||||
|
||||
/// Sets `dst` to the union of `dst` and `src`, returns true if `dst` was
|
||||
/// changed.
|
||||
pub(super) fn union(&mut self, dst: LiveNode, src: LiveNode) -> bool {
|
||||
if dst == src {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut changed = false;
|
||||
let (dst_row, src_row) = self.pick2_rows_mut(dst, src);
|
||||
for (dst_word, src_word) in dst_row.iter_mut().zip(src_row.iter()) {
|
||||
let old = *dst_word;
|
||||
let new = *dst_word | src_word;
|
||||
*dst_word = new;
|
||||
changed |= old != new;
|
||||
}
|
||||
changed
|
||||
}
|
||||
|
||||
pub(super) fn get_reader(&self, ln: LiveNode, var: Variable) -> bool {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
(self.words[word] >> shift) & Self::RWU_READER != 0
|
||||
}
|
||||
|
||||
pub(super) fn get_writer(&self, ln: LiveNode, var: Variable) -> bool {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
(self.words[word] >> shift) & Self::RWU_WRITER != 0
|
||||
}
|
||||
|
||||
pub(super) fn get_used(&self, ln: LiveNode, var: Variable) -> bool {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
(self.words[word] >> shift) & Self::RWU_USED != 0
|
||||
}
|
||||
|
||||
pub(super) fn get(&self, ln: LiveNode, var: Variable) -> RWU {
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
let rwu_packed = self.words[word] >> shift;
|
||||
RWU {
|
||||
reader: rwu_packed & Self::RWU_READER != 0,
|
||||
writer: rwu_packed & Self::RWU_WRITER != 0,
|
||||
used: rwu_packed & Self::RWU_USED != 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn set(&mut self, ln: LiveNode, var: Variable, rwu: RWU) {
|
||||
let mut packed = 0;
|
||||
if rwu.reader {
|
||||
packed |= Self::RWU_READER;
|
||||
}
|
||||
if rwu.writer {
|
||||
packed |= Self::RWU_WRITER;
|
||||
}
|
||||
if rwu.used {
|
||||
packed |= Self::RWU_USED;
|
||||
}
|
||||
|
||||
let (word, shift) = self.word_and_shift(ln, var);
|
||||
let word = &mut self.words[word];
|
||||
*word = (*word & !(Self::RWU_MASK << shift)) | (packed << shift)
|
||||
}
|
||||
}
|
@ -548,11 +548,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
||||
// we may want to suggest removing a `&`.
|
||||
if sm.is_imported(expr.span) {
|
||||
if let Ok(src) = sm.span_to_snippet(sp) {
|
||||
if let Some(src) = self.replace_prefix(&src, "&", "") {
|
||||
if let Some(src) = src.strip_prefix('&') {
|
||||
return Some((
|
||||
sp,
|
||||
"consider removing the borrow",
|
||||
src,
|
||||
src.to_string(),
|
||||
Applicability::MachineApplicable,
|
||||
));
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ impl str {
|
||||
}
|
||||
|
||||
fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
|
||||
use core::unicode::derived_property::{Case_Ignorable, Cased};
|
||||
use core::unicode::{Case_Ignorable, Cased};
|
||||
match iter.skip_while(|&c| Case_Ignorable(c)).next() {
|
||||
Some(c) => Cased(c),
|
||||
None => false,
|
||||
|
@ -18,17 +18,14 @@ mod unicode_data;
|
||||
pub const UNICODE_VERSION: (u8, u8, u8) = unicode_data::UNICODE_VERSION;
|
||||
|
||||
// For use in liballoc, not re-exported in libstd.
|
||||
pub mod derived_property {
|
||||
pub use super::{Case_Ignorable, Cased};
|
||||
}
|
||||
pub use unicode_data::{
|
||||
case_ignorable::lookup as Case_Ignorable, cased::lookup as Cased, conversions,
|
||||
};
|
||||
|
||||
pub use unicode_data::alphabetic::lookup as Alphabetic;
|
||||
pub use unicode_data::case_ignorable::lookup as Case_Ignorable;
|
||||
pub use unicode_data::cased::lookup as Cased;
|
||||
pub use unicode_data::cc::lookup as Cc;
|
||||
pub use unicode_data::conversions;
|
||||
pub use unicode_data::grapheme_extend::lookup as Grapheme_Extend;
|
||||
pub use unicode_data::lowercase::lookup as Lowercase;
|
||||
pub use unicode_data::n::lookup as N;
|
||||
pub use unicode_data::uppercase::lookup as Uppercase;
|
||||
pub use unicode_data::white_space::lookup as White_Space;
|
||||
pub(crate) use unicode_data::alphabetic::lookup as Alphabetic;
|
||||
pub(crate) use unicode_data::cc::lookup as Cc;
|
||||
pub(crate) use unicode_data::grapheme_extend::lookup as Grapheme_Extend;
|
||||
pub(crate) use unicode_data::lowercase::lookup as Lowercase;
|
||||
pub(crate) use unicode_data::n::lookup as N;
|
||||
pub(crate) use unicode_data::uppercase::lookup as Uppercase;
|
||||
pub(crate) use unicode_data::white_space::lookup as White_Space;
|
||||
|
@ -198,7 +198,7 @@ simply delete the `pre-commit` file from .git/hooks."
|
||||
};
|
||||
};
|
||||
|
||||
Ok(if should_install {
|
||||
if should_install {
|
||||
let src = src_path.join("src").join("etc").join("pre-commit.sh");
|
||||
let git = t!(Command::new("git").args(&["rev-parse", "--git-common-dir"]).output().map(
|
||||
|output| {
|
||||
@ -217,5 +217,6 @@ simply delete the `pre-commit` file from .git/hooks."
|
||||
};
|
||||
} else {
|
||||
println!("Ok, skipping installation!");
|
||||
})
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -213,19 +213,102 @@ Then run the `cov` tool, with the `profdata` file and all test binaries:
|
||||
$ cargo cov -- report \
|
||||
--use-color --ignore-filename-regex='/.cargo/registry' \
|
||||
--instr-profile=json5format.profdata \
|
||||
target/debug/deps/lib-30768f9c53506dc5 \
|
||||
target/debug/deps/json5format-fececd4653271682
|
||||
--object target/debug/deps/lib-30768f9c53506dc5 \
|
||||
--object target/debug/deps/json5format-fececd4653271682
|
||||
$ cargo cov -- show \
|
||||
--use-color --ignore-filename-regex='/.cargo/registry' \
|
||||
--instr-profile=json5format.profdata \
|
||||
target/debug/deps/lib-30768f9c53506dc5 \
|
||||
target/debug/deps/json5format-fececd4653271682 \
|
||||
--object target/debug/deps/lib-30768f9c53506dc5 \
|
||||
--object target/debug/deps/json5format-fececd4653271682 \
|
||||
--show-instantiations --show-line-counts-or-regions \
|
||||
--Xdemangler=rustfilt | less -R
|
||||
```
|
||||
|
||||
_Note the command line option `--ignore-filename-regex=/.cargo/registry`, which excludes the sources for dependencies from the coverage results._
|
||||
|
||||
### Tips for listing the binaries automatically
|
||||
|
||||
For `bash` users, one suggested way to automatically complete the `cov` command with the list of binaries is with a command like:
|
||||
|
||||
```bash
|
||||
$ cargo cov -- report \
|
||||
$( \
|
||||
for file in \
|
||||
$( \
|
||||
RUSTFLAGS="-Zinstrument-coverage" \
|
||||
cargo test --tests --no-run --message-format=json \
|
||||
| jq -r "select(.profile.test == true) | .filenames[]" \
|
||||
| grep -v dSYM - \
|
||||
); \
|
||||
do \
|
||||
printf "%s %s " -object $file; \
|
||||
done \
|
||||
) \
|
||||
--instr-profile=json5format.profdata --summary-only # and/or other options
|
||||
```
|
||||
|
||||
Adding `--no-run --message-format=json` to the _same_ `cargo test` command used to run
|
||||
the tests (including the same environment variables and flags) generates output in a JSON
|
||||
format that `jq` can easily query.
|
||||
|
||||
The `printf` command takes this list and generates the `--object <binary>` arguments
|
||||
for each listed test binary.
|
||||
|
||||
### Including doc tests
|
||||
|
||||
The previous examples run `cargo test` with `--tests`, which excludes doc tests.[^79417]
|
||||
|
||||
To include doc tests in the coverage results, drop the `--tests` flag, and apply the
|
||||
`-Zinstrument-coverage` flag, and some doc-test-specific options in the
|
||||
`RUSTDOCFLAGS` environment variable. (The `cargo profdata` command does not change.)
|
||||
|
||||
```bash
|
||||
$ RUSTFLAGS="-Zinstrument-coverage" \
|
||||
RUSTDOCFLAGS="-Zinstrument-coverage -Zunstable-options --persist-doctests target/debug/doctestbins" \
|
||||
LLVM_PROFILE_FILE="json5format-%m.profraw" \
|
||||
cargo test
|
||||
$ cargo profdata -- merge \
|
||||
-sparse json5format-*.profraw -o json5format.profdata
|
||||
```
|
||||
|
||||
The `-Zunstable-options --persist-doctests` flag is required, to save the test binaries
|
||||
(with their coverage maps) for `llvm-cov`.
|
||||
|
||||
```bash
|
||||
$ cargo cov -- report \
|
||||
$( \
|
||||
for file in \
|
||||
$( \
|
||||
RUSTFLAGS="-Zinstrument-coverage" \
|
||||
RUSTDOCFLAGS="-Zinstrument-coverage -Zunstable-options --persist-doctests target/debug/doctestbins" \
|
||||
cargo test --no-run --message-format=json \
|
||||
| jq -r "select(.profile.test == true) | .filenames[]" \
|
||||
| grep -v dSYM - \
|
||||
) \
|
||||
target/debug/doctestbins/*/rust_out; \
|
||||
do \
|
||||
[[ -x $file ]] && printf "%s %s " -object $file; \
|
||||
done \
|
||||
) \
|
||||
--instr-profile=json5format.profdata --summary-only # and/or other options
|
||||
```
|
||||
|
||||
Note, the differences in this `cargo cov` command, compared with the version without
|
||||
doc tests, include:
|
||||
|
||||
* The `cargo test ... --no-run` command is updated with the same environment variables
|
||||
and flags used to _build_ the tests, _including_ the doc tests. (`LLVM_PROFILE_FILE`
|
||||
is only used when _running_ the tests.)
|
||||
* The file glob pattern `target/debug/doctestbins/*/rust_out` adds the `rust_out`
|
||||
binaries generated for doc tests (note, however, that some `rust_out` files may not
|
||||
be executable binaries).
|
||||
* `[[ -x $file ]] &&` filters the files passed on to the `printf`, to include only
|
||||
executable binaries.
|
||||
|
||||
[^79417]: There is ongoing work to resolve a known issue
|
||||
[(#79417)](https://github.com/rust-lang/rust/issues/79417) that doc test coverage
|
||||
generates incorrect source line numbers in `llvm-cov show` results.
|
||||
|
||||
## Other references
|
||||
|
||||
Rust's implementation and workflow for source-based code coverage is based on the same library and tools used to implement [source-based code coverage in Clang]. (This document is partially based on the Clang guide.)
|
||||
|
@ -391,7 +391,7 @@ impl<'a, I: Iterator<Item = Event<'a>>> Iterator for LinkReplacer<'a, I> {
|
||||
_,
|
||||
))) => {
|
||||
debug!("saw end of shortcut link to {}", dest);
|
||||
if self.links.iter().find(|&link| *link.href == **dest).is_some() {
|
||||
if self.links.iter().any(|link| *link.href == **dest) {
|
||||
assert!(self.shortcut_link.is_some(), "saw closing link without opening tag");
|
||||
self.shortcut_link = None;
|
||||
}
|
||||
|
@ -1469,16 +1469,21 @@ function defocusSearchBar() {
|
||||
});
|
||||
|
||||
if (e.which === 38) { // up
|
||||
if (!actives[currentTab].length ||
|
||||
!actives[currentTab][0].previousElementSibling) {
|
||||
return;
|
||||
if (e.ctrlKey) { // Going through result tabs.
|
||||
printTab(currentTab > 0 ? currentTab - 1 : 2);
|
||||
} else {
|
||||
if (!actives[currentTab].length ||
|
||||
!actives[currentTab][0].previousElementSibling) {
|
||||
return;
|
||||
}
|
||||
addClass(actives[currentTab][0].previousElementSibling, "highlighted");
|
||||
removeClass(actives[currentTab][0], "highlighted");
|
||||
}
|
||||
|
||||
addClass(actives[currentTab][0].previousElementSibling, "highlighted");
|
||||
removeClass(actives[currentTab][0], "highlighted");
|
||||
e.preventDefault();
|
||||
} else if (e.which === 40) { // down
|
||||
if (!actives[currentTab].length) {
|
||||
if (e.ctrlKey) { // Going through result tabs.
|
||||
printTab(currentTab > 1 ? 0 : currentTab + 1);
|
||||
} else if (!actives[currentTab].length) {
|
||||
var results = document.getElementById("results").childNodes;
|
||||
if (results.length > 0) {
|
||||
var res = results[currentTab].getElementsByClassName("result");
|
||||
@ -1496,13 +1501,6 @@ function defocusSearchBar() {
|
||||
document.location.href =
|
||||
actives[currentTab][0].getElementsByTagName("a")[0].href;
|
||||
}
|
||||
} else if (e.which === 9) { // tab
|
||||
if (e.shiftKey) {
|
||||
printTab(currentTab > 0 ? currentTab - 1 : 2);
|
||||
} else {
|
||||
printTab(currentTab > 1 ? 0 : currentTab + 1);
|
||||
}
|
||||
e.preventDefault();
|
||||
} else if (e.which === 16) { // shift
|
||||
// Does nothing, it's just to avoid losing "focus" on the highlighted element.
|
||||
} else if (actives[currentTab].length > 0) {
|
||||
@ -2898,11 +2896,14 @@ function defocusSearchBar() {
|
||||
["T", "Focus the theme picker menu"],
|
||||
["↑", "Move up in search results"],
|
||||
["↓", "Move down in search results"],
|
||||
["↹", "Switch tab"],
|
||||
["ctrl + ↑ / ↓", "Switch result tab"],
|
||||
["⏎", "Go to active search result"],
|
||||
["+", "Expand all sections"],
|
||||
["-", "Collapse all sections"],
|
||||
].map(x => "<dt><kbd>" + x[0] + "</kbd></dt><dd>" + x[1] + "</dd>").join("");
|
||||
].map(x => "<dt>" +
|
||||
x[0].split(" ")
|
||||
.map((y, index) => (index & 1) === 0 ? "<kbd>" + y + "</kbd>" : y)
|
||||
.join("") + "</dt><dd>" + x[1] + "</dd>").join("");
|
||||
var div_shortcuts = document.createElement("div");
|
||||
addClass(div_shortcuts, "shortcuts");
|
||||
div_shortcuts.innerHTML = "<h2>Keyboard Shortcuts</h2><dl>" + shortcuts + "</dl></div>";
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 7ade8dc4b84142abd3e6d1fb8a0f4111b0bbd571
|
||||
Subproject commit 8d78ad13896b955f630714f386a95ed91b237e3d
|
@ -147,13 +147,19 @@ else
|
||||
# Note `llvm-cov show` output for some programs can vary, but can be ignored
|
||||
# by inserting `// ignore-llvm-cov-show-diffs` at the top of the source file.
|
||||
#
|
||||
# FIXME(richkadel): It looks like most past variations seem to have been mitigated. None of the
|
||||
# Rust test source samples have the `// ignore-llvm-cov-show-diffs` anymore. The main variation
|
||||
# I had seen (and is still present in the new `coverage/lib/used_crate.rs`) is the `llvm-cov show`
|
||||
# reporting of multiple instantiations of a generic function with different type substitutions.
|
||||
# For some reason, `llvm-cov show` can report these in a non-deterministic order, breaking the
|
||||
# `diff` comparision. I was able to work around the problem with `diff --ignore-matching-lines=RE`
|
||||
# FIXME(richkadel): None of the Rust test source samples have the
|
||||
# `// ignore-llvm-cov-show-diffs` anymore. This directive exists to work around a limitation
|
||||
# with `llvm-cov show`. When reporting coverage for multiple instantiations of a generic function,
|
||||
# with different type substitutions, `llvm-cov show` prints these in a non-deterministic order,
|
||||
# breaking the `diff` comparision.
|
||||
#
|
||||
# A partial workaround is implemented below, with `diff --ignore-matching-lines=RE`
|
||||
# to ignore each line prefixing each generic instantiation coverage code region.
|
||||
#
|
||||
# This workaround only works if the coverage counts are identical across all reported
|
||||
# instantiations. If there is no way to ensure this, you may need to apply the
|
||||
# `// ignore-llvm-cov-show-diffs` directive, and rely on the `.json` and counter
|
||||
# files for validating results have not changed.
|
||||
|
||||
$(DIFF) --ignore-matching-lines='::<.*>.*:$$' \
|
||||
expected_show_coverage.$@.txt "$(TMPDIR)"/actual_show_coverage.$@.txt || \
|
||||
@ -190,10 +196,6 @@ endif
|
||||
$(call BIN,"$(TMPDIR)"/$@) \
|
||||
| "$(PYTHON)" $(BASEDIR)/prettify_json.py \
|
||||
> "$(TMPDIR)"/actual_export_coverage.$@.json
|
||||
# FIXME(richkadel): With the addition of `--ignore-matching-lines=RE` to ignore the
|
||||
# non-deterministically-ordered coverage results for multiple instantiations of generics with
|
||||
# differing type substitutions, I probably don't need the `.json` files anymore (and may not
|
||||
# need `prettify_json.py` either).
|
||||
|
||||
ifdef RUSTC_BLESS_TEST
|
||||
cp "$(TMPDIR)"/actual_export_coverage.$@.json expected_export_coverage.$@.json
|
||||
|
@ -19,12 +19,12 @@
|
||||
18| 2| println!("used_only_from_bin_crate_generic_function with {:?}", arg);
|
||||
19| 2|}
|
||||
------------------
|
||||
| used_crate::used_only_from_bin_crate_generic_function::<&alloc::vec::Vec<i32>>:
|
||||
| used_crate::used_only_from_bin_crate_generic_function::<&str>:
|
||||
| 17| 1|pub fn used_only_from_bin_crate_generic_function<T: Debug>(arg: T) {
|
||||
| 18| 1| println!("used_only_from_bin_crate_generic_function with {:?}", arg);
|
||||
| 19| 1|}
|
||||
------------------
|
||||
| used_crate::used_only_from_bin_crate_generic_function::<&str>:
|
||||
| used_crate::used_only_from_bin_crate_generic_function::<&alloc::vec::Vec<i32>>:
|
||||
| 17| 1|pub fn used_only_from_bin_crate_generic_function<T: Debug>(arg: T) {
|
||||
| 18| 1| println!("used_only_from_bin_crate_generic_function with {:?}", arg);
|
||||
| 19| 1|}
|
||||
|
@ -35,9 +35,6 @@ Counter in file 0 11:1 -> 11:2, (#2 + (#1 - #2))
|
||||
Counter in file 0 21:1 -> 21:23, #1
|
||||
Counter in file 0 67:5 -> 67:23, #1
|
||||
Counter in file 0 38:1 -> 38:19, #1
|
||||
Counter in file 0 29:1 -> 29:22, #1
|
||||
Counter in file 0 93:1 -> 101:2, #1
|
||||
Counter in file 0 91:1 -> 91:25, #1
|
||||
Counter in file 0 38:19 -> 42:12, #1
|
||||
Counter in file 0 43:9 -> 43:10, #3
|
||||
Counter in file 0 43:14 -> 43:18, (#1 + 0)
|
||||
@ -49,11 +46,14 @@ Counter in file 0 44:27 -> 44:32, #8
|
||||
Counter in file 0 44:36 -> 44:38, (#6 + 0)
|
||||
Counter in file 0 45:14 -> 45:16, #7
|
||||
Counter in file 0 47:1 -> 47:2, (#5 + (#6 + #7))
|
||||
Counter in file 0 29:1 -> 29:22, #1
|
||||
Counter in file 0 93:1 -> 101:2, #1
|
||||
Counter in file 0 91:1 -> 91:25, #1
|
||||
Counter in file 0 51:5 -> 52:18, #1
|
||||
Counter in file 0 53:13 -> 53:14, #2
|
||||
Counter in file 0 63:13 -> 63:14, (#1 - #2)
|
||||
Counter in file 0 65:5 -> 65:6, (#2 + (#1 - #2))
|
||||
Counter in file 0 13:20 -> 13:21, #1
|
||||
Counter in file 0 17:20 -> 17:21, #1
|
||||
Counter in file 0 49:1 -> 68:12, #1
|
||||
Counter in file 0 69:9 -> 69:10, #2
|
||||
Counter in file 0 69:14 -> 69:27, (#1 + 0)
|
||||
@ -69,8 +69,8 @@ Counter in file 0 86:14 -> 86:16, #2
|
||||
Counter in file 0 87:14 -> 87:16, #3
|
||||
Counter in file 0 89:1 -> 89:2, (#3 + (#2 + (#1 - (#3 + #2))))
|
||||
Counter in file 0 17:1 -> 17:20, #1
|
||||
Counter in file 0 17:20 -> 17:21, #1
|
||||
Counter in file 0 66:5 -> 66:23, #1
|
||||
Counter in file 0 13:20 -> 13:21, #1
|
||||
Counter in file 0 17:9 -> 17:10, #1
|
||||
Counter in file 0 17:9 -> 17:10, #1
|
||||
Counter in file 0 117:17 -> 117:19, #1
|
||||
|
@ -32,12 +32,12 @@ Combined regions:
|
||||
10:5 -> 12:6 (count=1)
|
||||
Segment at 10:5 (count = 1), RegionEntry
|
||||
Segment at 12:6 (count = 0), Skipped
|
||||
Emitting segments for function: _RNvXs_Cs4fqI2P2rA04_8genericsINtB4_8FireworklENtNtNtCs6HRHKMTmAen_4core3ops4drop4Drop4dropB4_
|
||||
Emitting segments for function: _RNvXs_Cs4fqI2P2rA04_8genericsINtB4_8FireworklENtNtNtCs3rFBWs28XFJ_4core3ops4drop4Drop4dropB4_
|
||||
Combined regions:
|
||||
17:5 -> 19:6 (count=1)
|
||||
Segment at 17:5 (count = 1), RegionEntry
|
||||
Segment at 19:6 (count = 0), Skipped
|
||||
Emitting segments for function: _RNvXs_Cs4fqI2P2rA04_8genericsINtB4_8FireworkdENtNtNtCs6HRHKMTmAen_4core3ops4drop4Drop4dropB4_
|
||||
Emitting segments for function: _RNvXs_Cs4fqI2P2rA04_8genericsINtB4_8FireworkdENtNtNtCs3rFBWs28XFJ_4core3ops4drop4Drop4dropB4_
|
||||
Combined regions:
|
||||
17:5 -> 19:6 (count=1)
|
||||
Segment at 17:5 (count = 1), RegionEntry
|
||||
|
@ -1,5 +1,5 @@
|
||||
Counter in file 0 25:1 -> 27:2, #1
|
||||
Counter in file 0 17:1 -> 19:2, #1
|
||||
Counter in file 0 25:1 -> 27:2, #1
|
||||
Counter in file 0 17:1 -> 19:2, #1
|
||||
Counter in file 0 5:1 -> 12:2, #1
|
||||
Counter in file 0 17:1 -> 19:2, 0
|
||||
@ -78,17 +78,17 @@ Segment at 51:1 (count = 0), RegionEntry
|
||||
Segment at 51:2 (count = 0), Skipped
|
||||
Segment at 53:1 (count = 1), RegionEntry
|
||||
Segment at 61:2 (count = 0), Skipped
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate41used_only_from_bin_crate_generic_functionRINtNtCsFAjihUSTht_5alloc3vec3VeclEECs4fqI2P2rA04_10uses_crate
|
||||
Combined regions:
|
||||
17:1 -> 19:2 (count=1)
|
||||
Segment at 17:1 (count = 1), RegionEntry
|
||||
Segment at 19:2 (count = 0), Skipped
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate41used_only_from_bin_crate_generic_functionReECs4fqI2P2rA04_10uses_crate
|
||||
Combined regions:
|
||||
17:1 -> 19:2 (count=1)
|
||||
Segment at 17:1 (count = 1), RegionEntry
|
||||
Segment at 19:2 (count = 0), Skipped
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate46used_only_from_this_lib_crate_generic_functionINtNtCsFAjihUSTht_5alloc3vec3VeclEEB2_
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate41used_only_from_bin_crate_generic_functionRINtNtCs3QflaznQylx_5alloc3vec3VeclEECs4fqI2P2rA04_10uses_crate
|
||||
Combined regions:
|
||||
17:1 -> 19:2 (count=1)
|
||||
Segment at 17:1 (count = 1), RegionEntry
|
||||
Segment at 19:2 (count = 0), Skipped
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate46used_only_from_this_lib_crate_generic_functionINtNtCs3QflaznQylx_5alloc3vec3VeclEEB2_
|
||||
Combined regions:
|
||||
21:1 -> 23:2 (count=1)
|
||||
Segment at 21:1 (count = 1), RegionEntry
|
||||
@ -98,7 +98,7 @@ Combined regions:
|
||||
21:1 -> 23:2 (count=1)
|
||||
Segment at 21:1 (count = 1), RegionEntry
|
||||
Segment at 23:2 (count = 0), Skipped
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate50used_from_bin_crate_and_lib_crate_generic_functionINtNtCsFAjihUSTht_5alloc3vec3VeclEECs4fqI2P2rA04_10uses_crate
|
||||
Emitting segments for function: _RINvCsbDqzXfLQacH_10used_crate50used_from_bin_crate_and_lib_crate_generic_functionINtNtCs3QflaznQylx_5alloc3vec3VeclEECs4fqI2P2rA04_10uses_crate
|
||||
Combined regions:
|
||||
25:1 -> 27:2 (count=1)
|
||||
Segment at 25:1 (count = 1), RegionEntry
|
||||
|
@ -36,7 +36,7 @@ const TEST_REPOS: &[Test] = &[
|
||||
Test {
|
||||
name: "xsv",
|
||||
repo: "https://github.com/BurntSushi/xsv",
|
||||
sha: "66956b6bfd62d6ac767a6b6499c982eae20a2c9f",
|
||||
sha: "3de6c04269a7d315f7e9864b9013451cd9580a08",
|
||||
lock: None,
|
||||
packages: &[],
|
||||
},
|
||||
|
@ -143,8 +143,8 @@ impl<'a> LintExtractor<'a> {
|
||||
Some((lineno, line)) => {
|
||||
let line = line.trim();
|
||||
if let Some(text) = line.strip_prefix("/// ") {
|
||||
doc_lines.push(text.trim().to_string());
|
||||
} else if line.starts_with("///") {
|
||||
doc_lines.push(text.to_string());
|
||||
} else if line == "///" {
|
||||
doc_lines.push("".to_string());
|
||||
} else if line.starts_with("// ") {
|
||||
// Ignore comments.
|
||||
|
Loading…
Reference in New Issue
Block a user