mirror of
https://github.com/rust-lang/rust.git
synced 2025-05-14 02:49:40 +00:00
Auto merge of #82780 - cjgillot:dep-stream, r=michaelwoerister
Stream the dep-graph to a file instead of storing it in-memory. This is a reimplementation of #60035. Instead of storing the dep-graph in-memory, the nodes are encoded as they come into the a temporary file as they come. At the end of a successful the compilation, this file is renamed to be the persistent dep-graph, to be decoded during the next compilation session. This two-files scheme avoids overwriting the dep-graph on unsuccessful or crashing compilations. The structure of the file is modified to be the sequence of `(DepNode, Fingerprint, EdgesVec)`. The deserialization is responsible for going to the more compressed representation. The `node_count` and `edge_count` are stored in the last 16 bytes of the file, in order to accurately reserve capacity for the vectors. At the end of the compilation, the encoder is flushed and dropped. The graph is not usable after this point: any creation of a node will ICE. I had to retrofit the debugging options, which is not really pretty.
This commit is contained in:
commit
d474075a8f
@ -40,8 +40,9 @@ use rustc_graphviz as dot;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
|
||||
use rustc_middle::dep_graph::debug::{DepNodeFilter, EdgeFilter};
|
||||
use rustc_middle::dep_graph::{DepGraphQuery, DepKind, DepNode, DepNodeExt};
|
||||
use rustc_middle::dep_graph::{
|
||||
DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter,
|
||||
};
|
||||
use rustc_middle::hir::map::Map;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
@ -54,7 +55,7 @@ use std::io::{BufWriter, Write};
|
||||
pub fn assert_dep_graph(tcx: TyCtxt<'_>) {
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
if tcx.sess.opts.debugging_opts.dump_dep_graph {
|
||||
dump_graph(tcx);
|
||||
tcx.dep_graph.with_query(dump_graph);
|
||||
}
|
||||
|
||||
if !tcx.sess.opts.debugging_opts.query_dep_graph {
|
||||
@ -200,29 +201,29 @@ fn check_paths<'tcx>(tcx: TyCtxt<'tcx>, if_this_changed: &Sources, then_this_wou
|
||||
}
|
||||
return;
|
||||
}
|
||||
let query = tcx.dep_graph.query();
|
||||
for &(_, source_def_id, ref source_dep_node) in if_this_changed {
|
||||
let dependents = query.transitive_predecessors(source_dep_node);
|
||||
for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {
|
||||
if !dependents.contains(&target_dep_node) {
|
||||
tcx.sess.span_err(
|
||||
target_span,
|
||||
&format!(
|
||||
"no path from `{}` to `{}`",
|
||||
tcx.def_path_str(source_def_id),
|
||||
target_pass
|
||||
),
|
||||
);
|
||||
} else {
|
||||
tcx.sess.span_err(target_span, "OK");
|
||||
tcx.dep_graph.with_query(|query| {
|
||||
for &(_, source_def_id, ref source_dep_node) in if_this_changed {
|
||||
let dependents = query.transitive_predecessors(source_dep_node);
|
||||
for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {
|
||||
if !dependents.contains(&target_dep_node) {
|
||||
tcx.sess.span_err(
|
||||
target_span,
|
||||
&format!(
|
||||
"no path from `{}` to `{}`",
|
||||
tcx.def_path_str(source_def_id),
|
||||
target_pass
|
||||
),
|
||||
);
|
||||
} else {
|
||||
tcx.sess.span_err(target_span, "OK");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn dump_graph(tcx: TyCtxt<'_>) {
|
||||
fn dump_graph(query: &DepGraphQuery) {
|
||||
let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| "dep_graph".to_string());
|
||||
let query = tcx.dep_graph.query();
|
||||
|
||||
let nodes = match env::var("RUST_DEP_GRAPH_FILTER") {
|
||||
Ok(string) => {
|
||||
|
@ -14,7 +14,7 @@ mod assert_dep_graph;
|
||||
pub mod assert_module_sources;
|
||||
mod persist;
|
||||
|
||||
pub use assert_dep_graph::assert_dep_graph;
|
||||
use assert_dep_graph::assert_dep_graph;
|
||||
pub use persist::copy_cgu_workproduct_to_incr_comp_cache_dir;
|
||||
pub use persist::delete_workproduct_files;
|
||||
pub use persist::finalize_session_directory;
|
||||
@ -26,4 +26,4 @@ pub use persist::prepare_session_directory;
|
||||
pub use persist::save_dep_graph;
|
||||
pub use persist::save_work_product_index;
|
||||
pub use persist::LoadResult;
|
||||
pub use persist::{load_dep_graph, DepGraphFuture};
|
||||
pub use persist::{build_dep_graph, load_dep_graph, DepGraphFuture};
|
||||
|
@ -14,7 +14,6 @@
|
||||
//! the required condition is not met.
|
||||
|
||||
use rustc_ast::{self as ast, Attribute, NestedMetaItem};
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
@ -381,10 +380,7 @@ impl DirtyCleanVisitor<'tcx> {
|
||||
fn assert_dirty(&self, item_span: Span, dep_node: DepNode) {
|
||||
debug!("assert_dirty({:?})", dep_node);
|
||||
|
||||
let current_fingerprint = self.get_fingerprint(&dep_node);
|
||||
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
|
||||
|
||||
if current_fingerprint == prev_fingerprint {
|
||||
if self.tcx.dep_graph.is_green(&dep_node) {
|
||||
let dep_node_str = self.dep_node_str(&dep_node);
|
||||
self.tcx
|
||||
.sess
|
||||
@ -392,28 +388,10 @@ impl DirtyCleanVisitor<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_fingerprint(&self, dep_node: &DepNode) -> Option<Fingerprint> {
|
||||
if self.tcx.dep_graph.dep_node_exists(dep_node) {
|
||||
let dep_node_index = self.tcx.dep_graph.dep_node_index_of(dep_node);
|
||||
Some(self.tcx.dep_graph.fingerprint_of(dep_node_index))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_clean(&self, item_span: Span, dep_node: DepNode) {
|
||||
debug!("assert_clean({:?})", dep_node);
|
||||
|
||||
let current_fingerprint = self.get_fingerprint(&dep_node);
|
||||
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
|
||||
|
||||
// if the node wasn't previously evaluated and now is (or vice versa),
|
||||
// then the node isn't actually clean or dirty.
|
||||
if (current_fingerprint == None) ^ (prev_fingerprint == None) {
|
||||
return;
|
||||
}
|
||||
|
||||
if current_fingerprint != prev_fingerprint {
|
||||
if self.tcx.dep_graph.is_red(&dep_node) {
|
||||
let dep_node_str = self.dep_node_str(&dep_node);
|
||||
self.tcx
|
||||
.sess
|
||||
|
@ -122,6 +122,7 @@ mod tests;
|
||||
|
||||
const LOCK_FILE_EXT: &str = ".lock";
|
||||
const DEP_GRAPH_FILENAME: &str = "dep-graph.bin";
|
||||
const STAGING_DEP_GRAPH_FILENAME: &str = "dep-graph.part.bin";
|
||||
const WORK_PRODUCTS_FILENAME: &str = "work-products.bin";
|
||||
const QUERY_CACHE_FILENAME: &str = "query-cache.bin";
|
||||
|
||||
@ -134,6 +135,9 @@ const INT_ENCODE_BASE: usize = base_n::CASE_INSENSITIVE;
|
||||
pub fn dep_graph_path(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME)
|
||||
}
|
||||
pub fn staging_dep_graph_path(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, STAGING_DEP_GRAPH_FILENAME)
|
||||
}
|
||||
pub fn dep_graph_path_from(incr_comp_session_dir: &Path) -> PathBuf {
|
||||
in_incr_comp_dir(incr_comp_session_dir, DEP_GRAPH_FILENAME)
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use rustc_hir::definitions::Definitions;
|
||||
use rustc_middle::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::ty::query::OnDiskCache;
|
||||
use rustc_serialize::opaque::Decoder;
|
||||
use rustc_serialize::Decodable as RustcDecodable;
|
||||
use rustc_serialize::Decodable;
|
||||
use rustc_session::Session;
|
||||
use std::path::Path;
|
||||
|
||||
@ -120,7 +120,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
|
||||
// Decode the list of work_products
|
||||
let mut work_product_decoder = Decoder::new(&work_products_data[..], start_pos);
|
||||
let work_products: Vec<SerializedWorkProduct> =
|
||||
RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
|
||||
Decodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
|
||||
let msg = format!(
|
||||
"Error decoding `work-products` from incremental \
|
||||
compilation session directory: {}",
|
||||
|
@ -18,6 +18,7 @@ pub use fs::prepare_session_directory;
|
||||
pub use load::load_query_result_cache;
|
||||
pub use load::LoadResult;
|
||||
pub use load::{load_dep_graph, DepGraphFuture};
|
||||
pub use save::build_dep_graph;
|
||||
pub use save::save_dep_graph;
|
||||
pub use save::save_work_product_index;
|
||||
pub use work_product::copy_cgu_workproduct_to_incr_comp_cache_dir;
|
||||
|
@ -1,6 +1,6 @@
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::join;
|
||||
use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::dep_graph::{DepGraph, PreviousDepGraph, WorkProduct, WorkProductId};
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
|
||||
use rustc_serialize::Encodable as RustcEncodable;
|
||||
@ -15,6 +15,9 @@ use super::file_format;
|
||||
use super::fs::*;
|
||||
use super::work_product;
|
||||
|
||||
/// Save and dump the DepGraph.
|
||||
///
|
||||
/// No query must be invoked after this function.
|
||||
pub fn save_dep_graph(tcx: TyCtxt<'_>) {
|
||||
debug!("save_dep_graph()");
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
@ -29,6 +32,14 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
|
||||
|
||||
let query_cache_path = query_cache_path(sess);
|
||||
let dep_graph_path = dep_graph_path(sess);
|
||||
let staging_dep_graph_path = staging_dep_graph_path(sess);
|
||||
|
||||
sess.time("assert_dep_graph", || crate::assert_dep_graph(tcx));
|
||||
sess.time("check_dirty_clean", || dirty_clean::check_dirty_clean_annotations(tcx));
|
||||
|
||||
if sess.opts.debugging_opts.incremental_info {
|
||||
tcx.dep_graph.print_incremental_info()
|
||||
}
|
||||
|
||||
join(
|
||||
move || {
|
||||
@ -36,16 +47,26 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
|
||||
save_in(sess, query_cache_path, "query cache", |e| encode_query_cache(tcx, e));
|
||||
});
|
||||
},
|
||||
|| {
|
||||
move || {
|
||||
sess.time("incr_comp_persist_dep_graph", || {
|
||||
save_in(sess, dep_graph_path, "dependency graph", |e| {
|
||||
sess.time("incr_comp_encode_dep_graph", || encode_dep_graph(tcx, e))
|
||||
});
|
||||
if let Err(err) = tcx.dep_graph.encode(&tcx.sess.prof) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph to `{}`: {}",
|
||||
staging_dep_graph_path.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
if let Err(err) = fs::rename(&staging_dep_graph_path, &dep_graph_path) {
|
||||
sess.err(&format!(
|
||||
"failed to move dependency graph from `{}` to `{}`: {}",
|
||||
staging_dep_graph_path.display(),
|
||||
dep_graph_path.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
dirty_clean::check_dirty_clean_annotations(tcx);
|
||||
})
|
||||
}
|
||||
|
||||
@ -92,7 +113,7 @@ pub fn save_work_product_index(
|
||||
});
|
||||
}
|
||||
|
||||
fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F)
|
||||
pub(crate) fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F)
|
||||
where
|
||||
F: FnOnce(&mut FileEncoder) -> FileEncodeResult,
|
||||
{
|
||||
@ -144,21 +165,6 @@ where
|
||||
debug!("save: data written to disk successfully");
|
||||
}
|
||||
|
||||
fn encode_dep_graph(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult {
|
||||
// First encode the commandline arguments hash
|
||||
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
tcx.dep_graph.print_incremental_info();
|
||||
}
|
||||
|
||||
// There is a tiny window between printing the incremental info above and encoding the dep
|
||||
// graph below in which the dep graph could change, thus making the printed incremental info
|
||||
// slightly out of date. If this matters to you, please feel free to submit a patch. :)
|
||||
|
||||
tcx.sess.time("incr_comp_encode_serialized_dep_graph", || tcx.dep_graph.encode(encoder))
|
||||
}
|
||||
|
||||
fn encode_work_product_index(
|
||||
work_products: &FxHashMap<WorkProductId, WorkProduct>,
|
||||
encoder: &mut FileEncoder,
|
||||
@ -177,3 +183,56 @@ fn encode_work_product_index(
|
||||
fn encode_query_cache(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult {
|
||||
tcx.sess.time("incr_comp_serialize_result_cache", || tcx.serialize_query_result_cache(encoder))
|
||||
}
|
||||
|
||||
pub fn build_dep_graph(
|
||||
sess: &Session,
|
||||
prev_graph: PreviousDepGraph,
|
||||
prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
|
||||
) -> Option<DepGraph> {
|
||||
if sess.opts.incremental.is_none() {
|
||||
// No incremental compilation.
|
||||
return None;
|
||||
}
|
||||
|
||||
// Stream the dep-graph to an alternate file, to avoid overwriting anything in case of errors.
|
||||
let path_buf = staging_dep_graph_path(sess);
|
||||
|
||||
let mut encoder = match FileEncoder::new(&path_buf) {
|
||||
Ok(encoder) => encoder,
|
||||
Err(err) => {
|
||||
sess.err(&format!(
|
||||
"failed to create dependency graph at `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = file_format::write_file_header(&mut encoder, sess.is_nightly_build()) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph header to `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
|
||||
// First encode the commandline arguments hash
|
||||
if let Err(err) = sess.opts.dep_tracking_hash().encode(&mut encoder) {
|
||||
sess.err(&format!(
|
||||
"failed to write dependency graph hash `{}`: {}",
|
||||
path_buf.display(),
|
||||
err
|
||||
));
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(DepGraph::new(
|
||||
prev_graph,
|
||||
prev_work_products,
|
||||
encoder,
|
||||
sess.opts.debugging_opts.query_dep_graph,
|
||||
sess.opts.debugging_opts.incremental_info,
|
||||
))
|
||||
}
|
||||
|
@ -1021,9 +1021,6 @@ pub fn start_codegen<'tcx>(
|
||||
rustc_symbol_mangling::test::report_symbol_names(tcx);
|
||||
}
|
||||
|
||||
tcx.sess.time("assert_dep_graph", || rustc_incremental::assert_dep_graph(tcx));
|
||||
tcx.sess.time("serialize_dep_graph", || rustc_incremental::save_dep_graph(tcx));
|
||||
|
||||
info!("Post-codegen\n{:?}", tcx.debug_stats());
|
||||
|
||||
if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) {
|
||||
|
@ -207,7 +207,13 @@ impl<'tcx> Queries<'tcx> {
|
||||
})
|
||||
.open(self.session())
|
||||
});
|
||||
DepGraph::new(prev_graph, prev_work_products)
|
||||
|
||||
rustc_incremental::build_dep_graph(
|
||||
self.session(),
|
||||
prev_graph,
|
||||
prev_work_products,
|
||||
)
|
||||
.unwrap_or_else(DepGraph::new_disabled)
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -435,6 +441,9 @@ impl Compiler {
|
||||
if self.session().opts.debugging_opts.query_stats {
|
||||
gcx.enter(rustc_query_impl::print_stats);
|
||||
}
|
||||
|
||||
self.session()
|
||||
.time("serialize_dep_graph", || gcx.enter(rustc_incremental::save_dep_graph));
|
||||
}
|
||||
|
||||
_timer = Some(self.session().timer("free_global_ctxt"));
|
||||
|
@ -8,8 +8,8 @@ use rustc_session::Session;
|
||||
mod dep_node;
|
||||
|
||||
pub use rustc_query_system::dep_graph::{
|
||||
debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex,
|
||||
WorkProduct, WorkProductId,
|
||||
debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex,
|
||||
SerializedDepNodeIndex, WorkProduct, WorkProductId,
|
||||
};
|
||||
|
||||
crate use dep_node::make_compile_codegen_unit;
|
||||
@ -20,6 +20,7 @@ pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
|
||||
pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
|
||||
pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>;
|
||||
pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
|
||||
pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
|
||||
|
||||
impl rustc_query_system::dep_graph::DepKind for DepKind {
|
||||
const NULL: Self = DepKind::Null;
|
||||
|
@ -477,10 +477,7 @@ macro_rules! define_queries {
|
||||
return
|
||||
}
|
||||
|
||||
debug_assert!(tcx.dep_graph
|
||||
.node_color(dep_node)
|
||||
.map(|c| c.is_green())
|
||||
.unwrap_or(false));
|
||||
debug_assert!(tcx.dep_graph.is_green(dep_node));
|
||||
|
||||
let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
|
||||
if queries::$name::cache_on_disk(tcx, &key, None) {
|
||||
|
@ -1,6 +1,8 @@
|
||||
//! Code for debugging the dep-graph.
|
||||
|
||||
use super::{DepKind, DepNode};
|
||||
use super::{DepKind, DepNode, DepNodeIndex};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use std::error::Error;
|
||||
|
||||
/// A dep-node filter goes from a user-defined string to a query over
|
||||
@ -34,13 +36,14 @@ impl DepNodeFilter {
|
||||
|
||||
/// A filter like `F -> G` where `F` and `G` are valid dep-node
|
||||
/// filters. This can be used to test the source/target independently.
|
||||
pub struct EdgeFilter {
|
||||
pub struct EdgeFilter<K: DepKind> {
|
||||
pub source: DepNodeFilter,
|
||||
pub target: DepNodeFilter,
|
||||
pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>,
|
||||
}
|
||||
|
||||
impl EdgeFilter {
|
||||
pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
|
||||
impl<K: DepKind> EdgeFilter<K> {
|
||||
pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> {
|
||||
let parts: Vec<_> = test.split("->").collect();
|
||||
if parts.len() != 2 {
|
||||
Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into())
|
||||
@ -48,12 +51,13 @@ impl EdgeFilter {
|
||||
Ok(EdgeFilter {
|
||||
source: DepNodeFilter::new(parts[0]),
|
||||
target: DepNodeFilter::new(parts[1]),
|
||||
index_to_node: Lock::new(FxHashMap::default()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn test<K: DepKind>(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
|
||||
pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
|
||||
self.source.test(source) && self.target.test(target)
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,6 +13,7 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
|
||||
|
||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_serialize::{opaque::FileEncoder, Encodable};
|
||||
use rustc_session::Session;
|
||||
|
||||
use std::fmt;
|
||||
@ -59,7 +60,7 @@ impl<T: DepContext> HasDepContext for T {
|
||||
}
|
||||
|
||||
/// Describe the different families of dependency nodes.
|
||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash {
|
||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
|
||||
const NULL: Self;
|
||||
|
||||
/// Return whether this kind always require evaluation.
|
||||
|
@ -1,34 +1,43 @@
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
|
||||
use rustc_index::vec::IndexVec;
|
||||
|
||||
use super::{DepKind, DepNode};
|
||||
use super::{DepKind, DepNode, DepNodeIndex};
|
||||
|
||||
pub struct DepGraphQuery<K> {
|
||||
pub graph: Graph<DepNode<K>, ()>,
|
||||
pub indices: FxHashMap<DepNode<K>, NodeIndex>,
|
||||
pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>,
|
||||
}
|
||||
|
||||
impl<K: DepKind> DepGraphQuery<K> {
|
||||
pub fn new(
|
||||
nodes: &[DepNode<K>],
|
||||
edge_list_indices: &[(usize, usize)],
|
||||
edge_list_data: &[usize],
|
||||
) -> DepGraphQuery<K> {
|
||||
let mut graph = Graph::with_capacity(nodes.len(), edge_list_data.len());
|
||||
let mut indices = FxHashMap::default();
|
||||
for node in nodes {
|
||||
indices.insert(*node, graph.add_node(*node));
|
||||
}
|
||||
pub fn new(prev_node_count: usize) -> DepGraphQuery<K> {
|
||||
let node_count = prev_node_count + prev_node_count / 4;
|
||||
let edge_count = 6 * node_count;
|
||||
|
||||
for (source, &(start, end)) in edge_list_indices.iter().enumerate() {
|
||||
for &target in &edge_list_data[start..end] {
|
||||
let source = indices[&nodes[source]];
|
||||
let target = indices[&nodes[target]];
|
||||
graph.add_edge(source, target, ());
|
||||
let graph = Graph::with_capacity(node_count, edge_count);
|
||||
let indices = FxHashMap::default();
|
||||
let dep_index_to_index = IndexVec::new();
|
||||
|
||||
DepGraphQuery { graph, indices, dep_index_to_index }
|
||||
}
|
||||
|
||||
pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
|
||||
let source = self.graph.add_node(node);
|
||||
if index.index() >= self.dep_index_to_index.len() {
|
||||
self.dep_index_to_index.resize(index.index() + 1, None);
|
||||
}
|
||||
self.dep_index_to_index[index] = Some(source);
|
||||
self.indices.insert(node, source);
|
||||
|
||||
for &target in edges.iter() {
|
||||
let target = self.dep_index_to_index[target];
|
||||
// We may miss the edges that are pushed while the `DepGraphQuery` is being accessed.
|
||||
// Skip them to issues.
|
||||
if let Some(target) = target {
|
||||
self.graph.add_edge(source, target, ());
|
||||
}
|
||||
}
|
||||
|
||||
DepGraphQuery { graph, indices }
|
||||
}
|
||||
|
||||
pub fn nodes(&self) -> Vec<&DepNode<K>> {
|
||||
|
@ -1,9 +1,28 @@
|
||||
//! The data that we will serialize and deserialize.
|
||||
//!
|
||||
//! The dep-graph is serialized as a sequence of NodeInfo, with the dependencies
|
||||
//! specified inline. The total number of nodes and edges are stored as the last
|
||||
//! 16 bytes of the file, so we can find them easily at decoding time.
|
||||
//!
|
||||
//! The serialisation is performed on-demand when each node is emitted. Using this
|
||||
//! scheme, we do not need to keep the current graph in memory.
|
||||
//!
|
||||
//! The deserisalisation is performed manually, in order to convert from the stored
|
||||
//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
|
||||
//! node and edge count are stored at the end of the file, all the arrays can be
|
||||
//! pre-allocated with the right length.
|
||||
|
||||
use super::{DepKind, DepNode};
|
||||
use super::query::DepGraphQuery;
|
||||
use super::{DepKind, DepNode, DepNodeIndex};
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_index::vec::IndexVec;
|
||||
use rustc_serialize::{Decodable, Decoder};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_index::vec::{Idx, IndexVec};
|
||||
use rustc_serialize::opaque::{self, FileEncodeResult, FileEncoder, IntEncodedWithFixedSize};
|
||||
use rustc_serialize::{Decodable, Decoder, Encodable};
|
||||
use smallvec::SmallVec;
|
||||
use std::convert::TryInto;
|
||||
|
||||
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
|
||||
// unused so that we can store multiple index types in `CompressedHybridIndex`,
|
||||
@ -50,78 +69,239 @@ impl<K: DepKind> SerializedDepGraph<K> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Decoder, K: DepKind + Decodable<D>> Decodable<D> for SerializedDepGraph<K> {
|
||||
fn decode(d: &mut D) -> Result<SerializedDepGraph<K>, D::Error> {
|
||||
// We used to serialize the dep graph by creating and serializing a `SerializedDepGraph`
|
||||
// using data copied from the `DepGraph`. But copying created a large memory spike, so we
|
||||
// now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we
|
||||
// deserialize that data into a `SerializedDepGraph` in the next compilation session, we
|
||||
// need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to
|
||||
// be in sync. If you update this decoding, be sure to update the encoding, and vice-versa.
|
||||
//
|
||||
// We mimic the sequence of `Encode` and `Encodable` method calls used by the `DepGraph`'s
|
||||
// `Encodable` implementation with the corresponding sequence of `Decode` and `Decodable`
|
||||
// method calls. E.g. `Decode::read_struct` pairs with `Encode::emit_struct`, `DepNode`'s
|
||||
// `decode` pairs with `DepNode`'s `encode`, and so on. Any decoding methods not associated
|
||||
// with corresponding encoding methods called in `DepGraph`'s `Encodable` implementation
|
||||
// are off limits, because we'd be relying on their implementation details.
|
||||
//
|
||||
// For example, because we know it happens to do the right thing, its tempting to just use
|
||||
// `IndexVec`'s `Decodable` implementation to decode into some of the collections below,
|
||||
// even though `DepGraph` doesn't use its `Encodable` implementation. But the `IndexVec`
|
||||
// implementation could change, and we'd have a bug.
|
||||
//
|
||||
// Variables below are explicitly typed so that anyone who changes the `SerializedDepGraph`
|
||||
// representation without updating this function will encounter a compilation error, and
|
||||
// know to update this and possibly the `DepGraph` `Encodable` implementation accordingly
|
||||
// (the latter should serialize data in a format compatible with our representation).
|
||||
impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder<'a>>
|
||||
for SerializedDepGraph<K>
|
||||
{
|
||||
#[instrument(skip(d))]
|
||||
fn decode(d: &mut opaque::Decoder<'a>) -> Result<SerializedDepGraph<K>, String> {
|
||||
let start_position = d.position();
|
||||
|
||||
d.read_struct("SerializedDepGraph", 4, |d| {
|
||||
let nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>> =
|
||||
d.read_struct_field("nodes", 0, |d| {
|
||||
// The last 16 bytes are the node count and edge count.
|
||||
debug!("position: {:?}", d.position());
|
||||
d.set_position(d.data.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE);
|
||||
debug!("position: {:?}", d.position());
|
||||
|
||||
let node_count = IntEncodedWithFixedSize::decode(d)?.0 as usize;
|
||||
let edge_count = IntEncodedWithFixedSize::decode(d)?.0 as usize;
|
||||
debug!(?node_count, ?edge_count);
|
||||
|
||||
debug!("position: {:?}", d.position());
|
||||
d.set_position(start_position);
|
||||
debug!("position: {:?}", d.position());
|
||||
|
||||
let mut nodes = IndexVec::with_capacity(node_count);
|
||||
let mut fingerprints = IndexVec::with_capacity(node_count);
|
||||
let mut edge_list_indices = IndexVec::with_capacity(node_count);
|
||||
let mut edge_list_data = Vec::with_capacity(edge_count);
|
||||
|
||||
for _index in 0..node_count {
|
||||
d.read_struct("NodeInfo", 3, |d| {
|
||||
let dep_node: DepNode<K> = d.read_struct_field("node", 0, Decodable::decode)?;
|
||||
let _i: SerializedDepNodeIndex = nodes.push(dep_node);
|
||||
debug_assert_eq!(_i.index(), _index);
|
||||
|
||||
let fingerprint: Fingerprint =
|
||||
d.read_struct_field("fingerprint", 1, Decodable::decode)?;
|
||||
let _i: SerializedDepNodeIndex = fingerprints.push(fingerprint);
|
||||
debug_assert_eq!(_i.index(), _index);
|
||||
|
||||
d.read_struct_field("edges", 2, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = IndexVec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
let start = edge_list_data.len().try_into().unwrap();
|
||||
for e in 0..len {
|
||||
let edge = d.read_seq_elt(e, Decodable::decode)?;
|
||||
edge_list_data.push(edge);
|
||||
}
|
||||
Ok(v)
|
||||
let end = edge_list_data.len().try_into().unwrap();
|
||||
let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end));
|
||||
debug_assert_eq!(_i.index(), _index);
|
||||
Ok(())
|
||||
})
|
||||
})?;
|
||||
})
|
||||
})?;
|
||||
}
|
||||
|
||||
let fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint> =
|
||||
d.read_struct_field("fingerprints", 1, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = IndexVec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
|
||||
let edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)> = d
|
||||
.read_struct_field("edge_list_indices", 2, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = IndexVec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
|
||||
let edge_list_data: Vec<SerializedDepNodeIndex> =
|
||||
d.read_struct_field("edge_list_data", 3, |d| {
|
||||
d.read_seq(|d, len| {
|
||||
let mut v = Vec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
|
||||
}
|
||||
Ok(v)
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data })
|
||||
})
|
||||
Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Encodable, Decodable)]
|
||||
pub struct NodeInfo<K: DepKind> {
|
||||
node: DepNode<K>,
|
||||
fingerprint: Fingerprint,
|
||||
edges: SmallVec<[DepNodeIndex; 8]>,
|
||||
}
|
||||
|
||||
struct Stat<K: DepKind> {
|
||||
kind: K,
|
||||
node_counter: u64,
|
||||
edge_counter: u64,
|
||||
}
|
||||
|
||||
struct EncoderState<K: DepKind> {
|
||||
encoder: FileEncoder,
|
||||
total_node_count: usize,
|
||||
total_edge_count: usize,
|
||||
result: FileEncodeResult,
|
||||
stats: Option<FxHashMap<K, Stat<K>>>,
|
||||
}
|
||||
|
||||
impl<K: DepKind> EncoderState<K> {
|
||||
fn new(encoder: FileEncoder, record_stats: bool) -> Self {
|
||||
Self {
|
||||
encoder,
|
||||
total_edge_count: 0,
|
||||
total_node_count: 0,
|
||||
result: Ok(()),
|
||||
stats: if record_stats { Some(FxHashMap::default()) } else { None },
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, record_graph))]
|
||||
fn encode_node(
|
||||
&mut self,
|
||||
node: &NodeInfo<K>,
|
||||
record_graph: &Option<Lock<DepGraphQuery<K>>>,
|
||||
) -> DepNodeIndex {
|
||||
let index = DepNodeIndex::new(self.total_node_count);
|
||||
self.total_node_count += 1;
|
||||
|
||||
let edge_count = node.edges.len();
|
||||
self.total_edge_count += edge_count;
|
||||
|
||||
if let Some(record_graph) = &record_graph {
|
||||
// Do not ICE when a query is called from within `with_query`.
|
||||
if let Some(record_graph) = &mut record_graph.try_lock() {
|
||||
record_graph.push(index, node.node, &node.edges);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(stats) = &mut self.stats {
|
||||
let kind = node.node.kind;
|
||||
|
||||
let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
|
||||
stat.node_counter += 1;
|
||||
stat.edge_counter += edge_count as u64;
|
||||
}
|
||||
|
||||
debug!(?index, ?node);
|
||||
let encoder = &mut self.encoder;
|
||||
if self.result.is_ok() {
|
||||
self.result = node.encode(encoder);
|
||||
}
|
||||
index
|
||||
}
|
||||
|
||||
fn finish(self) -> FileEncodeResult {
|
||||
let Self { mut encoder, total_node_count, total_edge_count, result, stats: _ } = self;
|
||||
let () = result?;
|
||||
|
||||
let node_count = total_node_count.try_into().unwrap();
|
||||
let edge_count = total_edge_count.try_into().unwrap();
|
||||
|
||||
debug!(?node_count, ?edge_count);
|
||||
debug!("position: {:?}", encoder.position());
|
||||
IntEncodedWithFixedSize(node_count).encode(&mut encoder)?;
|
||||
IntEncodedWithFixedSize(edge_count).encode(&mut encoder)?;
|
||||
debug!("position: {:?}", encoder.position());
|
||||
// Drop the encoder so that nothing is written after the counts.
|
||||
encoder.flush()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GraphEncoder<K: DepKind> {
|
||||
status: Lock<EncoderState<K>>,
|
||||
record_graph: Option<Lock<DepGraphQuery<K>>>,
|
||||
}
|
||||
|
||||
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
|
||||
pub fn new(
|
||||
encoder: FileEncoder,
|
||||
prev_node_count: usize,
|
||||
record_graph: bool,
|
||||
record_stats: bool,
|
||||
) -> Self {
|
||||
let record_graph =
|
||||
if record_graph { Some(Lock::new(DepGraphQuery::new(prev_node_count))) } else { None };
|
||||
let status = Lock::new(EncoderState::new(encoder, record_stats));
|
||||
GraphEncoder { status, record_graph }
|
||||
}
|
||||
|
||||
pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
|
||||
if let Some(record_graph) = &self.record_graph {
|
||||
f(&record_graph.lock())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_incremental_info(
|
||||
&self,
|
||||
total_read_count: u64,
|
||||
total_duplicate_read_count: u64,
|
||||
) {
|
||||
let status = self.status.lock();
|
||||
if let Some(record_stats) = &status.stats {
|
||||
let mut stats: Vec<_> = record_stats.values().collect();
|
||||
stats.sort_by_key(|s| -(s.node_counter as i64));
|
||||
|
||||
const SEPARATOR: &str = "[incremental] --------------------------------\
|
||||
----------------------------------------------\
|
||||
------------";
|
||||
|
||||
eprintln!("[incremental]");
|
||||
eprintln!("[incremental] DepGraph Statistics");
|
||||
eprintln!("{}", SEPARATOR);
|
||||
eprintln!("[incremental]");
|
||||
eprintln!("[incremental] Total Node Count: {}", status.total_node_count);
|
||||
eprintln!("[incremental] Total Edge Count: {}", status.total_edge_count);
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
eprintln!("[incremental] Total Edge Reads: {}", total_read_count);
|
||||
eprintln!(
|
||||
"[incremental] Total Duplicate Edge Reads: {}",
|
||||
total_duplicate_read_count
|
||||
);
|
||||
}
|
||||
|
||||
eprintln!("[incremental]");
|
||||
eprintln!(
|
||||
"[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
|
||||
"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
|
||||
);
|
||||
eprintln!("{}", SEPARATOR);
|
||||
|
||||
for stat in stats {
|
||||
let node_kind_ratio =
|
||||
(100.0 * (stat.node_counter as f64)) / (status.total_node_count as f64);
|
||||
let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
|
||||
|
||||
eprintln!(
|
||||
"[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
|
||||
format!("{:?}", stat.kind),
|
||||
node_kind_ratio,
|
||||
stat.node_counter,
|
||||
node_kind_avg_edges,
|
||||
);
|
||||
}
|
||||
|
||||
eprintln!("{}", SEPARATOR);
|
||||
eprintln!("[incremental]");
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
profiler: &SelfProfilerRef,
|
||||
node: DepNode<K>,
|
||||
fingerprint: Fingerprint,
|
||||
edges: SmallVec<[DepNodeIndex; 8]>,
|
||||
) -> DepNodeIndex {
|
||||
let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph");
|
||||
let node = NodeInfo { node, fingerprint, edges };
|
||||
self.status.lock().encode_node(&node, &self.record_graph)
|
||||
}
|
||||
|
||||
pub fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
|
||||
let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph");
|
||||
self.status.into_inner().finish()
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#![feature(const_fn)]
|
||||
#![feature(const_panic)]
|
||||
#![feature(core_intrinsics)]
|
||||
#![feature(drain_filter)]
|
||||
#![feature(hash_raw_entry)]
|
||||
#![feature(iter_zip)]
|
||||
#![feature(min_specialization)]
|
||||
|
@ -449,9 +449,11 @@ where
|
||||
|
||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
||||
tcx.start_query(job.id, diagnostics, || {
|
||||
tcx.dep_context()
|
||||
.dep_graph()
|
||||
.with_anon_task(query.dep_kind, || query.compute(tcx, key))
|
||||
tcx.dep_context().dep_graph().with_anon_task(
|
||||
*tcx.dep_context(),
|
||||
query.dep_kind,
|
||||
|| query.compute(tcx, key),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
@ -537,7 +539,7 @@ where
|
||||
// If `-Zincremental-verify-ich` is specified, re-hash results from
|
||||
// the cache and make sure that they have the expected fingerprint.
|
||||
if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
|
||||
}
|
||||
|
||||
result
|
||||
@ -560,7 +562,7 @@ where
|
||||
//
|
||||
// See issue #82920 for an example of a miscompilation that would get turned into
|
||||
// an ICE by this check
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
|
||||
|
||||
result
|
||||
}
|
||||
@ -570,14 +572,12 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
|
||||
tcx: CTX::DepContext,
|
||||
result: &V,
|
||||
dep_node: &DepNode<CTX::DepKind>,
|
||||
dep_node_index: DepNodeIndex,
|
||||
query: &QueryVtable<CTX, K, V>,
|
||||
) where
|
||||
CTX: QueryContext,
|
||||
{
|
||||
assert!(
|
||||
Some(tcx.dep_graph().fingerprint_of(dep_node_index))
|
||||
== tcx.dep_graph().prev_fingerprint_of(dep_node),
|
||||
tcx.dep_graph().is_green(dep_node),
|
||||
"fingerprint for green query instance not loaded from cache: {:?}",
|
||||
dep_node,
|
||||
);
|
||||
@ -588,9 +588,15 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
|
||||
let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
|
||||
debug!("END verify_ich({:?})", dep_node);
|
||||
|
||||
let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index);
|
||||
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
|
||||
|
||||
assert!(new_hash == old_hash, "found unstable fingerprints for {:?}: {:?}", dep_node, result);
|
||||
assert_eq!(
|
||||
Some(new_hash),
|
||||
old_hash,
|
||||
"found unstable fingerprints for {:?}: {:?}",
|
||||
dep_node,
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
fn force_query_with_job<C, CTX>(
|
||||
|
@ -981,7 +981,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
|
||||
OP: FnOnce(&mut Self) -> R,
|
||||
{
|
||||
let (result, dep_node) =
|
||||
self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || op(self));
|
||||
self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self));
|
||||
self.tcx().dep_graph.read_index(dep_node);
|
||||
(result, dep_node)
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
// check-pass
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/issue-64964
|
||||
// edition:2018
|
||||
|
||||
// Regression test for ICE related to `await`ing in a method + incr. comp. (#64964)
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test that when a trait impl changes, fns whose body uses that trait
|
||||
// must also be recompiled.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-assoc-type-codegen
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(warnings)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test that immediate callers have to change when callee changes, but
|
||||
// not callers' callers.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-caller-callee
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(dead_code)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test cases where a changing struct appears in the signature of fns
|
||||
// and methods.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-struct-signature
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(dead_code)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test that adding an impl to a trait `Foo` DOES affect functions
|
||||
// that only use `Bar` if they have methods in common.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-trait-impl-two-traits-same-method
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(dead_code)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test that adding an impl to a trait `Foo` does not affect functions
|
||||
// that only use `Bar`, so long as they do not have methods in common.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-trait-impl-two-traits
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(warnings)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test that when a trait impl changes, fns whose body uses that trait
|
||||
// must also be recompiled.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-trait-impl
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(warnings)]
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Test that changing what a `type` points to does not go unnoticed.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-type-alias
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(dead_code)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Test that changing what a `type` points to does not go unnoticed
|
||||
// by the variance analysis.
|
||||
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// compile-flags: -Z query-dep-graph -C incremental=tmp/dep-graph-variance-alias
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(dead_code)]
|
||||
|
Loading…
Reference in New Issue
Block a user