mirror of
https://github.com/rust-lang/rust.git
synced 2025-04-17 22:46:50 +00:00
Auto merge of #44901 - michaelwoerister:on-demand-eval, r=nikomatsakis
incr.comp.: Switch to red/green change tracking, remove legacy system. This PR finally switches incremental compilation to [red/green tracking](https://github.com/rust-lang/rust/issues/42293) and completely removes the legacy dependency graph implementation -- which includes a few quite costly passes that are simply not needed with the new system anymore. There's still some documentation to be done and there's certainly still lots of optimizing and tuning ahead -- but the foundation for red/green is in place with this PR. This has been in the making for a long time `:)` r? @nikomatsakis cc @alexcrichton, @rust-lang/compiler
This commit is contained in:
commit
d7e73e4b1a
@ -65,8 +65,8 @@ use hir::map::DefPathHash;
|
||||
use hir::{HirId, ItemLocalId};
|
||||
|
||||
use ich::Fingerprint;
|
||||
use ty::{TyCtxt, Instance, InstanceDef};
|
||||
use ty::fast_reject::SimplifiedType;
|
||||
use ty::{TyCtxt, Instance, InstanceDef, ParamEnvAnd, Ty};
|
||||
use ty::subst::Substs;
|
||||
use rustc_data_structures::stable_hasher::{StableHasher, HashStable};
|
||||
use ich::StableHashingContext;
|
||||
use std::fmt;
|
||||
@ -347,7 +347,7 @@ impl fmt::Debug for DepNode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:?}", self.kind)?;
|
||||
|
||||
if !self.kind.has_params() {
|
||||
if !self.kind.has_params() && !self.kind.is_anon() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -356,14 +356,14 @@ impl fmt::Debug for DepNode {
|
||||
::ty::tls::with_opt(|opt_tcx| {
|
||||
if let Some(tcx) = opt_tcx {
|
||||
if let Some(def_id) = self.extract_def_id(tcx) {
|
||||
write!(f, "{}", tcx.item_path_str(def_id))?;
|
||||
write!(f, "{}", tcx.def_path(def_id).to_string(tcx))?;
|
||||
} else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*self) {
|
||||
write!(f, "{}", s)?;
|
||||
} else {
|
||||
write!(f, "{:?}", self.hash)?;
|
||||
write!(f, "{}", self.hash)?;
|
||||
}
|
||||
} else {
|
||||
write!(f, "{:?}", self.hash)?;
|
||||
write!(f, "{}", self.hash)?;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
@ -430,7 +430,6 @@ define_dep_nodes!( <'tcx>
|
||||
[] RegionScopeTree(DefId),
|
||||
[] Coherence,
|
||||
[] CoherenceInherentImplOverlapCheck,
|
||||
[] Resolve,
|
||||
[] CoherenceCheckTrait(DefId),
|
||||
[] PrivacyAccessLevels(CrateNum),
|
||||
|
||||
@ -447,10 +446,8 @@ define_dep_nodes!( <'tcx>
|
||||
[] MirBorrowCheck(DefId),
|
||||
[] UnsafetyViolations(DefId),
|
||||
|
||||
[] RvalueCheck(DefId),
|
||||
[] Reachability,
|
||||
[] MirKeys,
|
||||
[] TransWriteMetadata,
|
||||
[] CrateVariances,
|
||||
|
||||
// Nodes representing bits of computed IR in the tcx. Each shared
|
||||
@ -484,32 +481,23 @@ define_dep_nodes!( <'tcx>
|
||||
[] TypeckBodiesKrate,
|
||||
[] TypeckTables(DefId),
|
||||
[] HasTypeckTables(DefId),
|
||||
[anon] ConstEval,
|
||||
[] ConstEval { param_env: ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)> },
|
||||
[] SymbolName(DefId),
|
||||
[] InstanceSymbolName { instance: Instance<'tcx> },
|
||||
[] SpecializationGraph(DefId),
|
||||
[] ObjectSafety(DefId),
|
||||
|
||||
[anon] IsCopy,
|
||||
[anon] IsSized,
|
||||
[anon] IsFreeze,
|
||||
[anon] NeedsDrop,
|
||||
[anon] Layout,
|
||||
[] IsCopy { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> },
|
||||
[] IsSized { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> },
|
||||
[] IsFreeze { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> },
|
||||
[] NeedsDrop { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> },
|
||||
[] Layout { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> },
|
||||
|
||||
// The set of impls for a given trait.
|
||||
[] TraitImpls(DefId),
|
||||
[] RelevantTraitImpls(DefId, SimplifiedType),
|
||||
|
||||
[] AllLocalTraitImpls,
|
||||
|
||||
// Nodes representing caches. To properly handle a true cache, we
|
||||
// don't use a DepTrackingMap, but rather we push a task node.
|
||||
// Otherwise the write into the map would be incorrectly
|
||||
// attributed to the first task that happened to fill the cache,
|
||||
// which would yield an overly conservative dep-graph.
|
||||
[] TraitItems(DefId),
|
||||
[] ReprHints(DefId),
|
||||
|
||||
// Trait selection cache is a little funny. Given a trait
|
||||
// reference like `Foo: SomeTrait<Bar>`, there could be
|
||||
// arbitrarily many def-ids to map on in there (e.g., `Foo`,
|
||||
@ -537,10 +525,6 @@ define_dep_nodes!( <'tcx>
|
||||
// trait-select node.
|
||||
[anon] TraitSelect,
|
||||
|
||||
// For proj. cache, we just keep a list of all def-ids, since it is
|
||||
// not a hotspot.
|
||||
[] ProjectionCache { def_ids: DefIdList },
|
||||
|
||||
[] ParamEnv(DefId),
|
||||
[] DescribeDef(DefId),
|
||||
[] DefSpan(DefId),
|
||||
@ -598,7 +582,6 @@ define_dep_nodes!( <'tcx>
|
||||
[] MissingLangItems(CrateNum),
|
||||
[] ExternConstBody(DefId),
|
||||
[] VisibleParentMap,
|
||||
[] IsDirectExternCrate(CrateNum),
|
||||
[] MissingExternCrateItem(CrateNum),
|
||||
[] UsedCrateSource(CrateNum),
|
||||
[] PostorderCnums,
|
||||
@ -618,6 +601,9 @@ define_dep_nodes!( <'tcx>
|
||||
[] CodegenUnit(InternedString),
|
||||
[] CompileCodegenUnit(InternedString),
|
||||
[] OutputFilenames,
|
||||
|
||||
// We use this for most things when incr. comp. is turned off.
|
||||
[] Null,
|
||||
);
|
||||
|
||||
trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug {
|
||||
@ -719,40 +705,6 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId, De
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefIdList,) {
|
||||
const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
|
||||
|
||||
// We actually would not need to specialize the implementation of this
|
||||
// method but it's faster to combine the hashes than to instantiate a full
|
||||
// hashing context and stable-hashing state.
|
||||
fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint {
|
||||
let mut fingerprint = Fingerprint::zero();
|
||||
|
||||
for &def_id in self.0.iter() {
|
||||
let def_path_hash = tcx.def_path_hash(def_id);
|
||||
fingerprint = fingerprint.combine(def_path_hash.0);
|
||||
}
|
||||
|
||||
fingerprint
|
||||
}
|
||||
|
||||
fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String {
|
||||
use std::fmt::Write;
|
||||
|
||||
let mut s = String::new();
|
||||
write!(&mut s, "[").unwrap();
|
||||
|
||||
for &def_id in self.0.iter() {
|
||||
write!(&mut s, "{}", tcx.def_path(def_id).to_string(tcx)).unwrap();
|
||||
}
|
||||
|
||||
write!(&mut s, "]").unwrap();
|
||||
|
||||
s
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (HirId,) {
|
||||
const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
|
||||
|
||||
@ -811,4 +763,3 @@ impl_stable_hash_for!(struct ::dep_graph::WorkProductId {
|
||||
hash
|
||||
});
|
||||
|
||||
type DefIdList = Vec<DefId>;
|
||||
|
@ -1,266 +0,0 @@
|
||||
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use ich::Fingerprint;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::stable_hasher::StableHasher;
|
||||
use std::env;
|
||||
use std::hash::Hash;
|
||||
use std::mem;
|
||||
use super::{DepGraphQuery, DepKind, DepNode};
|
||||
use super::debug::EdgeFilter;
|
||||
|
||||
pub(super) struct DepGraphEdges {
|
||||
nodes: Vec<DepNode>,
|
||||
indices: FxHashMap<DepNode, DepNodeIndex>,
|
||||
edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>,
|
||||
task_stack: Vec<OpenTask>,
|
||||
forbidden_edge: Option<EdgeFilter>,
|
||||
|
||||
// A set to help assert that no two tasks use the same DepNode. This is a
|
||||
// temporary measure. Once we load the previous dep-graph as readonly, this
|
||||
// check will fall out of the graph implementation naturally.
|
||||
opened_once: FxHashSet<DepNode>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub(super) struct DepNodeIndex {
|
||||
index: u32,
|
||||
}
|
||||
|
||||
impl DepNodeIndex {
|
||||
|
||||
pub const INVALID: DepNodeIndex = DepNodeIndex { index: ::std::u32::MAX };
|
||||
|
||||
fn new(v: usize) -> DepNodeIndex {
|
||||
assert!((v & 0xFFFF_FFFF) == v);
|
||||
DepNodeIndex { index: v as u32 }
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.index as usize
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum OpenTask {
|
||||
Regular {
|
||||
node: DepNode,
|
||||
reads: Vec<DepNode>,
|
||||
read_set: FxHashSet<DepNode>,
|
||||
},
|
||||
Anon {
|
||||
reads: Vec<DepNode>,
|
||||
read_set: FxHashSet<DepNode>,
|
||||
},
|
||||
Ignore,
|
||||
}
|
||||
|
||||
impl DepGraphEdges {
|
||||
pub fn new() -> DepGraphEdges {
|
||||
let forbidden_edge = if cfg!(debug_assertions) {
|
||||
match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
|
||||
Ok(s) => {
|
||||
match EdgeFilter::new(&s) {
|
||||
Ok(f) => Some(f),
|
||||
Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
|
||||
}
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
DepGraphEdges {
|
||||
nodes: vec![],
|
||||
indices: FxHashMap(),
|
||||
edges: FxHashSet(),
|
||||
task_stack: Vec::new(),
|
||||
forbidden_edge,
|
||||
opened_once: FxHashSet(),
|
||||
}
|
||||
}
|
||||
|
||||
fn id(&self, index: DepNodeIndex) -> DepNode {
|
||||
self.nodes[index.index()]
|
||||
}
|
||||
|
||||
pub fn push_ignore(&mut self) {
|
||||
self.task_stack.push(OpenTask::Ignore);
|
||||
}
|
||||
|
||||
pub fn pop_ignore(&mut self) {
|
||||
let popped_node = self.task_stack.pop().unwrap();
|
||||
debug_assert_eq!(popped_node, OpenTask::Ignore);
|
||||
}
|
||||
|
||||
pub fn push_task(&mut self, key: DepNode) {
|
||||
if !self.opened_once.insert(key) {
|
||||
bug!("Re-opened node {:?}", key)
|
||||
}
|
||||
|
||||
self.task_stack.push(OpenTask::Regular {
|
||||
node: key,
|
||||
reads: Vec::new(),
|
||||
read_set: FxHashSet(),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn pop_task(&mut self, key: DepNode) -> DepNodeIndex {
|
||||
let popped_node = self.task_stack.pop().unwrap();
|
||||
|
||||
if let OpenTask::Regular {
|
||||
node,
|
||||
read_set: _,
|
||||
reads
|
||||
} = popped_node {
|
||||
debug_assert_eq!(node, key);
|
||||
debug_assert!(!node.kind.is_input() || reads.is_empty());
|
||||
|
||||
let target_id = self.get_or_create_node(node);
|
||||
|
||||
for read in reads.into_iter() {
|
||||
let source_id = self.get_or_create_node(read);
|
||||
self.edges.insert((source_id, target_id));
|
||||
}
|
||||
|
||||
target_id
|
||||
} else {
|
||||
bug!("pop_task() - Expected regular task to be popped")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_anon_task(&mut self) {
|
||||
self.task_stack.push(OpenTask::Anon {
|
||||
reads: Vec::new(),
|
||||
read_set: FxHashSet(),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex {
|
||||
let popped_node = self.task_stack.pop().unwrap();
|
||||
|
||||
if let OpenTask::Anon {
|
||||
read_set: _,
|
||||
reads
|
||||
} = popped_node {
|
||||
let mut fingerprint = Fingerprint::zero();
|
||||
let mut hasher = StableHasher::new();
|
||||
|
||||
for read in reads.iter() {
|
||||
mem::discriminant(&read.kind).hash(&mut hasher);
|
||||
|
||||
// Fingerprint::combine() is faster than sending Fingerprint
|
||||
// through the StableHasher (at least as long as StableHasher
|
||||
// is so slow).
|
||||
fingerprint = fingerprint.combine(read.hash);
|
||||
}
|
||||
|
||||
fingerprint = fingerprint.combine(hasher.finish());
|
||||
|
||||
let target_dep_node = DepNode {
|
||||
kind,
|
||||
hash: fingerprint,
|
||||
};
|
||||
|
||||
if let Some(&index) = self.indices.get(&target_dep_node) {
|
||||
return index;
|
||||
}
|
||||
|
||||
let target_id = self.get_or_create_node(target_dep_node);
|
||||
|
||||
for read in reads.into_iter() {
|
||||
let source_id = self.get_or_create_node(read);
|
||||
self.edges.insert((source_id, target_id));
|
||||
}
|
||||
|
||||
target_id
|
||||
} else {
|
||||
bug!("pop_anon_task() - Expected anonymous task to be popped")
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates that the current task `C` reads `v` by adding an
|
||||
/// edge from `v` to `C`. If there is no current task, has no
|
||||
/// effect. Note that *reading* from tracked state is harmless if
|
||||
/// you are not in a task; what is bad is *writing* to tracked
|
||||
/// state (and leaking data that you read into a tracked task).
|
||||
pub fn read(&mut self, source: DepNode) {
|
||||
match self.task_stack.last_mut() {
|
||||
Some(&mut OpenTask::Regular {
|
||||
node: target,
|
||||
ref mut reads,
|
||||
ref mut read_set,
|
||||
}) => {
|
||||
if read_set.insert(source) {
|
||||
reads.push(source);
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
if let Some(ref forbidden_edge) = self.forbidden_edge {
|
||||
if forbidden_edge.test(&source, &target) {
|
||||
bug!("forbidden edge {:?} -> {:?} created", source, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(&mut OpenTask::Anon {
|
||||
ref mut reads,
|
||||
ref mut read_set,
|
||||
}) => {
|
||||
if read_set.insert(source) {
|
||||
reads.push(source);
|
||||
}
|
||||
}
|
||||
Some(&mut OpenTask::Ignore) | None => {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_index(&mut self, source: DepNodeIndex) {
|
||||
let dep_node = self.nodes[source.index()];
|
||||
self.read(dep_node);
|
||||
}
|
||||
|
||||
pub fn query(&self) -> DepGraphQuery {
|
||||
let edges: Vec<_> = self.edges.iter()
|
||||
.map(|&(i, j)| (self.id(i), self.id(j)))
|
||||
.collect();
|
||||
DepGraphQuery::new(&self.nodes, &edges)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_edge(&mut self, source: DepNode, target: DepNode) {
|
||||
let source = self.get_or_create_node(source);
|
||||
let target = self.get_or_create_node(target);
|
||||
self.edges.insert((source, target));
|
||||
}
|
||||
|
||||
pub fn add_node(&mut self, node: DepNode) -> DepNodeIndex {
|
||||
self.get_or_create_node(node)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_or_create_node(&mut self, dep_node: DepNode) -> DepNodeIndex {
|
||||
let DepGraphEdges {
|
||||
ref mut indices,
|
||||
ref mut nodes,
|
||||
..
|
||||
} = *self;
|
||||
|
||||
*indices.entry(dep_node).or_insert_with(|| {
|
||||
let next_id = nodes.len();
|
||||
nodes.push(dep_node);
|
||||
DepNodeIndex::new(next_id)
|
||||
})
|
||||
}
|
||||
}
|
@ -14,17 +14,19 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
|
||||
use session::config::OutputType;
|
||||
use std::cell::{Ref, RefCell};
|
||||
use std::env;
|
||||
use std::hash::Hash;
|
||||
use std::rc::Rc;
|
||||
use ty::TyCtxt;
|
||||
use util::common::{ProfileQueriesMsg, profq_msg};
|
||||
|
||||
use ich::Fingerprint;
|
||||
|
||||
use super::debug::EdgeFilter;
|
||||
use super::dep_node::{DepNode, DepKind, WorkProductId};
|
||||
use super::query::DepGraphQuery;
|
||||
use super::raii;
|
||||
use super::safe::DepGraphSafe;
|
||||
use super::edges::{self, DepGraphEdges};
|
||||
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
|
||||
use super::prev::PreviousDepGraph;
|
||||
|
||||
@ -42,28 +44,44 @@ pub struct DepGraph {
|
||||
fingerprints: Rc<RefCell<FxHashMap<DepNode, Fingerprint>>>
|
||||
}
|
||||
|
||||
/// As a temporary measure, while transitioning to the new DepGraph
|
||||
/// implementation, we maintain the old and the new dep-graph encoding in
|
||||
/// parallel, so a DepNodeIndex actually contains two indices, one for each
|
||||
/// version.
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct DepNodeIndex {
|
||||
legacy: edges::DepNodeIndex,
|
||||
new: DepNodeIndexNew,
|
||||
index: u32,
|
||||
}
|
||||
|
||||
impl Idx for DepNodeIndex {
|
||||
fn new(idx: usize) -> Self {
|
||||
debug_assert!((idx & 0xFFFF_FFFF) == idx);
|
||||
DepNodeIndex { index: idx as u32 }
|
||||
}
|
||||
fn index(self) -> usize {
|
||||
self.index as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl DepNodeIndex {
|
||||
pub const INVALID: DepNodeIndex = DepNodeIndex {
|
||||
legacy: edges::DepNodeIndex::INVALID,
|
||||
new: DepNodeIndexNew::INVALID,
|
||||
const INVALID: DepNodeIndex = DepNodeIndex {
|
||||
index: ::std::u32::MAX,
|
||||
};
|
||||
}
|
||||
|
||||
struct DepGraphData {
|
||||
/// The old, initial encoding of the dependency graph. This will soon go
|
||||
/// away.
|
||||
edges: RefCell<DepGraphEdges>,
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum DepNodeColor {
|
||||
Red,
|
||||
Green(DepNodeIndex)
|
||||
}
|
||||
|
||||
impl DepNodeColor {
|
||||
pub fn is_green(self) -> bool {
|
||||
match self {
|
||||
DepNodeColor::Red => false,
|
||||
DepNodeColor::Green(_) => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct DepGraphData {
|
||||
/// The new encoding of the dependency graph, optimized for red/green
|
||||
/// tracking. The `current` field is the dependency graph of only the
|
||||
/// current compilation session: We don't merge the previous dep-graph into
|
||||
@ -74,6 +92,8 @@ struct DepGraphData {
|
||||
/// nodes and edges as well as all fingerprints of nodes that have them.
|
||||
previous: PreviousDepGraph,
|
||||
|
||||
colors: RefCell<FxHashMap<DepNode, DepNodeColor>>,
|
||||
|
||||
/// When we load, there may be `.o` files, cached mir, or other such
|
||||
/// things available to us. If we find that they are not dirty, we
|
||||
/// load the path to the file storing those work-products here into
|
||||
@ -84,6 +104,9 @@ struct DepGraphData {
|
||||
work_products: RefCell<FxHashMap<WorkProductId, WorkProduct>>,
|
||||
|
||||
dep_node_debug: RefCell<FxHashMap<DepNode, String>>,
|
||||
|
||||
// Used for testing, only populated when -Zquery-dep-graph is specified.
|
||||
loaded_from_cache: RefCell<FxHashMap<DepNodeIndex, bool>>,
|
||||
}
|
||||
|
||||
impl DepGraph {
|
||||
@ -93,10 +116,11 @@ impl DepGraph {
|
||||
data: Some(Rc::new(DepGraphData {
|
||||
previous_work_products: RefCell::new(FxHashMap()),
|
||||
work_products: RefCell::new(FxHashMap()),
|
||||
edges: RefCell::new(DepGraphEdges::new()),
|
||||
dep_node_debug: RefCell::new(FxHashMap()),
|
||||
current: RefCell::new(CurrentDepGraph::new()),
|
||||
previous: prev_graph,
|
||||
colors: RefCell::new(FxHashMap()),
|
||||
loaded_from_cache: RefCell::new(FxHashMap()),
|
||||
})),
|
||||
fingerprints: Rc::new(RefCell::new(FxHashMap())),
|
||||
}
|
||||
@ -116,12 +140,22 @@ impl DepGraph {
|
||||
}
|
||||
|
||||
pub fn query(&self) -> DepGraphQuery {
|
||||
self.data.as_ref().unwrap().edges.borrow().query()
|
||||
let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
|
||||
let nodes: Vec<_> = current_dep_graph.nodes.iter().cloned().collect();
|
||||
let mut edges = Vec::new();
|
||||
for (index, edge_targets) in current_dep_graph.edges.iter_enumerated() {
|
||||
let from = current_dep_graph.nodes[index];
|
||||
for &edge_target in edge_targets {
|
||||
let to = current_dep_graph.nodes[edge_target];
|
||||
edges.push((from, to));
|
||||
}
|
||||
}
|
||||
|
||||
DepGraphQuery::new(&nodes[..], &edges[..])
|
||||
}
|
||||
|
||||
pub fn in_ignore<'graph>(&'graph self) -> Option<raii::IgnoreTask<'graph>> {
|
||||
self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.edges,
|
||||
&data.current))
|
||||
self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.current))
|
||||
}
|
||||
|
||||
pub fn with_ignore<OP,R>(&self, op: OP) -> R
|
||||
@ -168,7 +202,8 @@ impl DepGraph {
|
||||
R: HashStable<HCX>,
|
||||
{
|
||||
if let Some(ref data) = self.data {
|
||||
data.edges.borrow_mut().push_task(key);
|
||||
debug_assert!(!data.colors.borrow().contains_key(&key));
|
||||
|
||||
data.current.borrow_mut().push_task(key);
|
||||
if cfg!(debug_assertions) {
|
||||
profq_msg(ProfileQueriesMsg::TaskBegin(key.clone()))
|
||||
@ -186,31 +221,52 @@ impl DepGraph {
|
||||
profq_msg(ProfileQueriesMsg::TaskEnd)
|
||||
};
|
||||
|
||||
let dep_node_index_legacy = data.edges.borrow_mut().pop_task(key);
|
||||
let dep_node_index_new = data.current.borrow_mut().pop_task(key);
|
||||
let dep_node_index = data.current.borrow_mut().pop_task(key);
|
||||
|
||||
let mut stable_hasher = StableHasher::new();
|
||||
result.hash_stable(&mut hcx, &mut stable_hasher);
|
||||
|
||||
assert!(self.fingerprints
|
||||
.borrow_mut()
|
||||
.insert(key, stable_hasher.finish())
|
||||
.is_none());
|
||||
let current_fingerprint = stable_hasher.finish();
|
||||
|
||||
(result, DepNodeIndex {
|
||||
legacy: dep_node_index_legacy,
|
||||
new: dep_node_index_new,
|
||||
})
|
||||
// Store the current fingerprint
|
||||
{
|
||||
let old_value = self.fingerprints
|
||||
.borrow_mut()
|
||||
.insert(key, current_fingerprint);
|
||||
debug_assert!(old_value.is_none(),
|
||||
"DepGraph::with_task() - Duplicate fingerprint \
|
||||
insertion for {:?}", key);
|
||||
}
|
||||
|
||||
// Determine the color of the new DepNode.
|
||||
{
|
||||
let prev_fingerprint = data.previous.fingerprint_of(&key);
|
||||
|
||||
let color = if Some(current_fingerprint) == prev_fingerprint {
|
||||
DepNodeColor::Green(dep_node_index)
|
||||
} else {
|
||||
DepNodeColor::Red
|
||||
};
|
||||
|
||||
let old_value = data.colors.borrow_mut().insert(key, color);
|
||||
debug_assert!(old_value.is_none(),
|
||||
"DepGraph::with_task() - Duplicate DepNodeColor \
|
||||
insertion for {:?}", key);
|
||||
}
|
||||
|
||||
(result, dep_node_index)
|
||||
} else {
|
||||
if key.kind.fingerprint_needed_for_crate_hash() {
|
||||
let mut hcx = cx.create_stable_hashing_context();
|
||||
let result = task(cx, arg);
|
||||
let mut stable_hasher = StableHasher::new();
|
||||
result.hash_stable(&mut hcx, &mut stable_hasher);
|
||||
assert!(self.fingerprints
|
||||
.borrow_mut()
|
||||
.insert(key, stable_hasher.finish())
|
||||
.is_none());
|
||||
let old_value = self.fingerprints
|
||||
.borrow_mut()
|
||||
.insert(key, stable_hasher.finish());
|
||||
debug_assert!(old_value.is_none(),
|
||||
"DepGraph::with_task() - Duplicate fingerprint \
|
||||
insertion for {:?}", key);
|
||||
(result, DepNodeIndex::INVALID)
|
||||
} else {
|
||||
(task(cx, arg), DepNodeIndex::INVALID)
|
||||
@ -224,15 +280,12 @@ impl DepGraph {
|
||||
where OP: FnOnce() -> R
|
||||
{
|
||||
if let Some(ref data) = self.data {
|
||||
data.edges.borrow_mut().push_anon_task();
|
||||
data.current.borrow_mut().push_anon_task();
|
||||
let result = op();
|
||||
let dep_node_index_legacy = data.edges.borrow_mut().pop_anon_task(dep_kind);
|
||||
let dep_node_index_new = data.current.borrow_mut().pop_anon_task(dep_kind);
|
||||
(result, DepNodeIndex {
|
||||
legacy: dep_node_index_legacy,
|
||||
new: dep_node_index_new,
|
||||
})
|
||||
let dep_node_index = data.current
|
||||
.borrow_mut()
|
||||
.pop_anon_task(dep_kind);
|
||||
(result, dep_node_index)
|
||||
} else {
|
||||
(op(), DepNodeIndex::INVALID)
|
||||
}
|
||||
@ -241,11 +294,9 @@ impl DepGraph {
|
||||
#[inline]
|
||||
pub fn read(&self, v: DepNode) {
|
||||
if let Some(ref data) = self.data {
|
||||
data.edges.borrow_mut().read(v);
|
||||
|
||||
let mut current = data.current.borrow_mut();
|
||||
if let Some(&dep_node_index_new) = current.node_to_node_index.get(&v) {
|
||||
current.read_index(dep_node_index_new);
|
||||
if let Some(&dep_node_index) = current.node_to_node_index.get(&v) {
|
||||
current.read_index(dep_node_index);
|
||||
} else {
|
||||
bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
|
||||
}
|
||||
@ -253,29 +304,17 @@ impl DepGraph {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn read_index(&self, v: DepNodeIndex) {
|
||||
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
|
||||
if let Some(ref data) = self.data {
|
||||
data.edges.borrow_mut().read_index(v.legacy);
|
||||
data.current.borrow_mut().read_index(v.new);
|
||||
data.current.borrow_mut().read_index(dep_node_index);
|
||||
}
|
||||
}
|
||||
|
||||
/// Only to be used during graph loading
|
||||
#[inline]
|
||||
pub fn add_edge_directly(&self, source: DepNode, target: DepNode) {
|
||||
self.data.as_ref().unwrap().edges.borrow_mut().add_edge(source, target);
|
||||
}
|
||||
|
||||
/// Only to be used during graph loading
|
||||
pub fn add_node_directly(&self, node: DepNode) {
|
||||
self.data.as_ref().unwrap().edges.borrow_mut().add_node(node);
|
||||
}
|
||||
|
||||
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
|
||||
self.fingerprints.borrow()[dep_node]
|
||||
}
|
||||
|
||||
pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
|
||||
pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
|
||||
self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
|
||||
}
|
||||
|
||||
@ -383,6 +422,194 @@ impl DepGraph {
|
||||
edge_list_data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
|
||||
self.data.as_ref().and_then(|data| data.colors.borrow().get(dep_node).cloned())
|
||||
}
|
||||
|
||||
pub fn try_mark_green(&self,
|
||||
tcx: TyCtxt,
|
||||
dep_node: &DepNode)
|
||||
-> Option<DepNodeIndex> {
|
||||
debug!("try_mark_green({:?}) - BEGIN", dep_node);
|
||||
let data = self.data.as_ref().unwrap();
|
||||
|
||||
debug_assert!(!data.colors.borrow().contains_key(dep_node));
|
||||
debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node));
|
||||
|
||||
if dep_node.kind.is_input() {
|
||||
// We should only hit try_mark_green() for inputs that do not exist
|
||||
// anymore in the current compilation session. Existing inputs are
|
||||
// eagerly marked as either red/green before any queries are
|
||||
// executed.
|
||||
debug_assert!(dep_node.extract_def_id(tcx).is_none());
|
||||
debug!("try_mark_green({:?}) - END - DepNode is deleted input", dep_node);
|
||||
return None;
|
||||
}
|
||||
|
||||
let (prev_deps, prev_dep_node_index) = match data.previous.edges_from(dep_node) {
|
||||
Some(prev) => {
|
||||
// This DepNode and the corresponding query invocation existed
|
||||
// in the previous compilation session too, so we can try to
|
||||
// mark it as green by recursively marking all of its
|
||||
// dependencies green.
|
||||
prev
|
||||
}
|
||||
None => {
|
||||
// This DepNode did not exist in the previous compilation session,
|
||||
// so we cannot mark it as green.
|
||||
debug!("try_mark_green({:?}) - END - DepNode does not exist in \
|
||||
current compilation session anymore", dep_node);
|
||||
return None
|
||||
}
|
||||
};
|
||||
|
||||
let mut current_deps = Vec::new();
|
||||
|
||||
for &dep_dep_node_index in prev_deps {
|
||||
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
|
||||
|
||||
let dep_dep_node_color = data.colors.borrow().get(dep_dep_node).cloned();
|
||||
match dep_dep_node_color {
|
||||
Some(DepNodeColor::Green(node_index)) => {
|
||||
// This dependency has been marked as green before, we are
|
||||
// still fine and can continue with checking the other
|
||||
// dependencies.
|
||||
debug!("try_mark_green({:?}) --- found dependency {:?} to \
|
||||
be immediately green", dep_node, dep_dep_node);
|
||||
current_deps.push(node_index);
|
||||
}
|
||||
Some(DepNodeColor::Red) => {
|
||||
// We found a dependency the value of which has changed
|
||||
// compared to the previous compilation session. We cannot
|
||||
// mark the DepNode as green and also don't need to bother
|
||||
// with checking any of the other dependencies.
|
||||
debug!("try_mark_green({:?}) - END - dependency {:?} was \
|
||||
immediately red", dep_node, dep_dep_node);
|
||||
return None
|
||||
}
|
||||
None => {
|
||||
if dep_dep_node.kind.is_input() {
|
||||
// This input does not exist anymore.
|
||||
debug_assert!(dep_dep_node.extract_def_id(tcx).is_none(),
|
||||
"Encountered input {:?} without color",
|
||||
dep_dep_node);
|
||||
debug!("try_mark_green({:?}) - END - dependency {:?} \
|
||||
was deleted input", dep_node, dep_dep_node);
|
||||
return None;
|
||||
}
|
||||
|
||||
debug!("try_mark_green({:?}) --- state of dependency {:?} \
|
||||
is unknown, trying to mark it green", dep_node,
|
||||
dep_dep_node);
|
||||
|
||||
// We don't know the state of this dependency. Let's try to
|
||||
// mark it green.
|
||||
if let Some(node_index) = self.try_mark_green(tcx, dep_dep_node) {
|
||||
debug!("try_mark_green({:?}) --- managed to MARK \
|
||||
dependency {:?} as green", dep_node, dep_dep_node);
|
||||
current_deps.push(node_index);
|
||||
} else {
|
||||
// We failed to mark it green, so we try to force the query.
|
||||
debug!("try_mark_green({:?}) --- trying to force \
|
||||
dependency {:?}", dep_node, dep_dep_node);
|
||||
if ::ty::maps::force_from_dep_node(tcx, dep_dep_node) {
|
||||
let dep_dep_node_color = data.colors
|
||||
.borrow()
|
||||
.get(dep_dep_node)
|
||||
.cloned();
|
||||
match dep_dep_node_color {
|
||||
Some(DepNodeColor::Green(node_index)) => {
|
||||
debug!("try_mark_green({:?}) --- managed to \
|
||||
FORCE dependency {:?} to green",
|
||||
dep_node, dep_dep_node);
|
||||
current_deps.push(node_index);
|
||||
}
|
||||
Some(DepNodeColor::Red) => {
|
||||
debug!("try_mark_green({:?}) - END - \
|
||||
dependency {:?} was red after forcing",
|
||||
dep_node,
|
||||
dep_dep_node);
|
||||
return None
|
||||
}
|
||||
None => {
|
||||
bug!("try_mark_green() - Forcing the DepNode \
|
||||
should have set its color")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The DepNode could not be forced.
|
||||
debug!("try_mark_green({:?}) - END - dependency {:?} \
|
||||
could not be forced", dep_node, dep_dep_node);
|
||||
return None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If we got here without hitting a `return` that means that all
|
||||
// dependencies of this DepNode could be marked as green. Therefore we
|
||||
// can also mark this DepNode as green. We do so by...
|
||||
|
||||
// ... allocating an entry for it in the current dependency graph and
|
||||
// adding all the appropriate edges imported from the previous graph ...
|
||||
let dep_node_index = data.current
|
||||
.borrow_mut()
|
||||
.alloc_node(*dep_node, current_deps);
|
||||
|
||||
// ... copying the fingerprint from the previous graph too, so we don't
|
||||
// have to recompute it ...
|
||||
let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
|
||||
let old_fingerprint = self.fingerprints
|
||||
.borrow_mut()
|
||||
.insert(*dep_node, fingerprint);
|
||||
debug_assert!(old_fingerprint.is_none(),
|
||||
"DepGraph::try_mark_green() - Duplicate fingerprint \
|
||||
insertion for {:?}", dep_node);
|
||||
|
||||
// ... and finally storing a "Green" entry in the color map.
|
||||
let old_color = data.colors
|
||||
.borrow_mut()
|
||||
.insert(*dep_node, DepNodeColor::Green(dep_node_index));
|
||||
debug_assert!(old_color.is_none(),
|
||||
"DepGraph::try_mark_green() - Duplicate DepNodeColor \
|
||||
insertion for {:?}", dep_node);
|
||||
|
||||
debug!("try_mark_green({:?}) - END - successfully marked as green", dep_node.kind);
|
||||
Some(dep_node_index)
|
||||
}
|
||||
|
||||
// Used in various assertions
|
||||
pub fn is_green(&self, dep_node_index: DepNodeIndex) -> bool {
|
||||
let dep_node = self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index];
|
||||
self.data.as_ref().unwrap().colors.borrow().get(&dep_node).map(|&color| {
|
||||
match color {
|
||||
DepNodeColor::Red => false,
|
||||
DepNodeColor::Green(_) => true,
|
||||
}
|
||||
}).unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn mark_loaded_from_cache(&self, dep_node_index: DepNodeIndex, state: bool) {
|
||||
debug!("mark_loaded_from_cache({:?}, {})",
|
||||
self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index],
|
||||
state);
|
||||
|
||||
self.data
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.loaded_from_cache
|
||||
.borrow_mut()
|
||||
.insert(dep_node_index, state);
|
||||
}
|
||||
|
||||
pub fn was_loaded_from_cache(&self, dep_node: &DepNode) -> Option<bool> {
|
||||
let data = self.data.as_ref().unwrap();
|
||||
let dep_node_index = data.current.borrow().node_to_node_index[dep_node];
|
||||
data.loaded_from_cache.borrow().get(&dep_node_index).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
/// A "work product" is an intermediate result that we save into the
|
||||
@ -419,30 +646,62 @@ impl DepGraph {
|
||||
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
|
||||
pub struct WorkProduct {
|
||||
pub cgu_name: String,
|
||||
/// Extra hash used to decide if work-product is still suitable;
|
||||
/// note that this is *not* a hash of the work-product itself.
|
||||
/// See documentation on `WorkProduct` type for an example.
|
||||
pub input_hash: u64,
|
||||
|
||||
/// Saved files associated with this CGU
|
||||
pub saved_files: Vec<(OutputType, String)>,
|
||||
}
|
||||
|
||||
pub(super) struct CurrentDepGraph {
|
||||
nodes: IndexVec<DepNodeIndexNew, DepNode>,
|
||||
edges: IndexVec<DepNodeIndexNew, Vec<DepNodeIndexNew>>,
|
||||
node_to_node_index: FxHashMap<DepNode, DepNodeIndexNew>,
|
||||
|
||||
nodes: IndexVec<DepNodeIndex, DepNode>,
|
||||
edges: IndexVec<DepNodeIndex, Vec<DepNodeIndex>>,
|
||||
node_to_node_index: FxHashMap<DepNode, DepNodeIndex>,
|
||||
task_stack: Vec<OpenTask>,
|
||||
forbidden_edge: Option<EdgeFilter>,
|
||||
|
||||
// Anonymous DepNodes are nodes the ID of which we compute from the list of
|
||||
// their edges. This has the beneficial side-effect that multiple anonymous
|
||||
// nodes can be coalesced into one without changing the semantics of the
|
||||
// dependency graph. However, the merging of nodes can lead to a subtle
|
||||
// problem during red-green marking: The color of an anonymous node from
|
||||
// the current session might "shadow" the color of the node with the same
|
||||
// ID from the previous session. In order to side-step this problem, we make
|
||||
// sure that anon-node IDs allocated in different sessions don't overlap.
|
||||
// This is implemented by mixing a session-key into the ID fingerprint of
|
||||
// each anon node. The session-key is just a random number generated when
|
||||
// the DepGraph is created.
|
||||
anon_id_seed: Fingerprint,
|
||||
}
|
||||
|
||||
impl CurrentDepGraph {
|
||||
fn new() -> CurrentDepGraph {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let nanos = duration.as_secs() * 1_000_000_000 +
|
||||
duration.subsec_nanos() as u64;
|
||||
let mut stable_hasher = StableHasher::new();
|
||||
nanos.hash(&mut stable_hasher);
|
||||
|
||||
let forbidden_edge = if cfg!(debug_assertions) {
|
||||
match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
|
||||
Ok(s) => {
|
||||
match EdgeFilter::new(&s) {
|
||||
Ok(f) => Some(f),
|
||||
Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
|
||||
}
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
CurrentDepGraph {
|
||||
nodes: IndexVec::new(),
|
||||
edges: IndexVec::new(),
|
||||
node_to_node_index: FxHashMap(),
|
||||
anon_id_seed: stable_hasher.finish(),
|
||||
task_stack: Vec::new(),
|
||||
forbidden_edge,
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,7 +722,7 @@ impl CurrentDepGraph {
|
||||
});
|
||||
}
|
||||
|
||||
pub(super) fn pop_task(&mut self, key: DepNode) -> DepNodeIndexNew {
|
||||
pub(super) fn pop_task(&mut self, key: DepNode) -> DepNodeIndex {
|
||||
let popped_node = self.task_stack.pop().unwrap();
|
||||
|
||||
if let OpenTask::Regular {
|
||||
@ -485,14 +744,14 @@ impl CurrentDepGraph {
|
||||
});
|
||||
}
|
||||
|
||||
fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndexNew {
|
||||
fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex {
|
||||
let popped_node = self.task_stack.pop().unwrap();
|
||||
|
||||
if let OpenTask::Anon {
|
||||
read_set: _,
|
||||
reads
|
||||
} = popped_node {
|
||||
let mut fingerprint = Fingerprint::zero();
|
||||
let mut fingerprint = self.anon_id_seed;
|
||||
let mut hasher = StableHasher::new();
|
||||
|
||||
for &read in reads.iter() {
|
||||
@ -514,24 +773,35 @@ impl CurrentDepGraph {
|
||||
};
|
||||
|
||||
if let Some(&index) = self.node_to_node_index.get(&target_dep_node) {
|
||||
return index;
|
||||
index
|
||||
} else {
|
||||
self.alloc_node(target_dep_node, reads)
|
||||
}
|
||||
|
||||
self.alloc_node(target_dep_node, reads)
|
||||
} else {
|
||||
bug!("pop_anon_task() - Expected anonymous task to be popped")
|
||||
}
|
||||
}
|
||||
|
||||
fn read_index(&mut self, source: DepNodeIndexNew) {
|
||||
fn read_index(&mut self, source: DepNodeIndex) {
|
||||
match self.task_stack.last_mut() {
|
||||
Some(&mut OpenTask::Regular {
|
||||
ref mut reads,
|
||||
ref mut read_set,
|
||||
node: _,
|
||||
node: ref target,
|
||||
}) => {
|
||||
if read_set.insert(source) {
|
||||
reads.push(source);
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
if let Some(ref forbidden_edge) = self.forbidden_edge {
|
||||
let source = self.nodes[source];
|
||||
if forbidden_edge.test(&source, &target) {
|
||||
bug!("forbidden edge {:?} -> {:?} created",
|
||||
source,
|
||||
target)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(&mut OpenTask::Anon {
|
||||
@ -550,12 +820,12 @@ impl CurrentDepGraph {
|
||||
|
||||
fn alloc_node(&mut self,
|
||||
dep_node: DepNode,
|
||||
edges: Vec<DepNodeIndexNew>)
|
||||
-> DepNodeIndexNew {
|
||||
edges: Vec<DepNodeIndex>)
|
||||
-> DepNodeIndex {
|
||||
debug_assert_eq!(self.edges.len(), self.nodes.len());
|
||||
debug_assert_eq!(self.node_to_node_index.len(), self.nodes.len());
|
||||
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
|
||||
let dep_node_index = DepNodeIndexNew::new(self.nodes.len());
|
||||
let dep_node_index = DepNodeIndex::new(self.nodes.len());
|
||||
self.nodes.push(dep_node);
|
||||
self.node_to_node_index.insert(dep_node, dep_node_index);
|
||||
self.edges.push(edges);
|
||||
@ -563,38 +833,16 @@ impl CurrentDepGraph {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub(super) struct DepNodeIndexNew {
|
||||
index: u32,
|
||||
}
|
||||
|
||||
impl Idx for DepNodeIndexNew {
|
||||
fn new(v: usize) -> DepNodeIndexNew {
|
||||
assert!((v & 0xFFFF_FFFF) == v);
|
||||
DepNodeIndexNew { index: v as u32 }
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.index as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl DepNodeIndexNew {
|
||||
const INVALID: DepNodeIndexNew = DepNodeIndexNew {
|
||||
index: ::std::u32::MAX,
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum OpenTask {
|
||||
Regular {
|
||||
node: DepNode,
|
||||
reads: Vec<DepNodeIndexNew>,
|
||||
read_set: FxHashSet<DepNodeIndexNew>,
|
||||
reads: Vec<DepNodeIndex>,
|
||||
read_set: FxHashSet<DepNodeIndex>,
|
||||
},
|
||||
Anon {
|
||||
reads: Vec<DepNodeIndexNew>,
|
||||
read_set: FxHashSet<DepNodeIndexNew>,
|
||||
reads: Vec<DepNodeIndex>,
|
||||
read_set: FxHashSet<DepNodeIndex>,
|
||||
},
|
||||
Ignore,
|
||||
}
|
||||
|
@ -11,7 +11,6 @@
|
||||
pub mod debug;
|
||||
mod dep_node;
|
||||
mod dep_tracking_map;
|
||||
mod edges;
|
||||
mod graph;
|
||||
mod prev;
|
||||
mod query;
|
||||
@ -21,7 +20,7 @@ mod serialized;
|
||||
|
||||
pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
|
||||
pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId};
|
||||
pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex};
|
||||
pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor};
|
||||
pub use self::prev::PreviousDepGraph;
|
||||
pub use self::query::DepGraphQuery;
|
||||
pub use self::safe::AssertDepGraphSafe;
|
||||
|
@ -28,19 +28,33 @@ impl PreviousDepGraph {
|
||||
PreviousDepGraph { data, index }
|
||||
}
|
||||
|
||||
pub fn with_edges_from<F>(&self, dep_node: &DepNode, mut f: F)
|
||||
where
|
||||
F: FnMut(&(DepNode, Fingerprint)),
|
||||
{
|
||||
let node_index = self.index[dep_node];
|
||||
self.data
|
||||
.edge_targets_from(node_index)
|
||||
.into_iter()
|
||||
.for_each(|&index| f(&self.data.nodes[index]));
|
||||
#[inline]
|
||||
pub fn edges_from(&self,
|
||||
dep_node: &DepNode)
|
||||
-> Option<(&[SerializedDepNodeIndex], SerializedDepNodeIndex)> {
|
||||
self.index
|
||||
.get(dep_node)
|
||||
.map(|&node_index| {
|
||||
(self.data.edge_targets_from(node_index), node_index)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
|
||||
let node_index = self.index[dep_node];
|
||||
self.data.nodes[node_index].1
|
||||
#[inline]
|
||||
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
|
||||
self.data.nodes[dep_node_index].0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
|
||||
self.index
|
||||
.get(dep_node)
|
||||
.map(|&node_index| self.data.nodes[node_index].1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn fingerprint_by_index(&self,
|
||||
dep_node_index: SerializedDepNodeIndex)
|
||||
-> Fingerprint {
|
||||
self.data.nodes[dep_node_index].1
|
||||
}
|
||||
}
|
||||
|
@ -8,33 +8,26 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use super::edges::DepGraphEdges;
|
||||
use super::graph::CurrentDepGraph;
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
pub struct IgnoreTask<'graph> {
|
||||
legacy_graph: &'graph RefCell<DepGraphEdges>,
|
||||
new_graph: &'graph RefCell<CurrentDepGraph>,
|
||||
graph: &'graph RefCell<CurrentDepGraph>,
|
||||
}
|
||||
|
||||
impl<'graph> IgnoreTask<'graph> {
|
||||
pub(super) fn new(legacy_graph: &'graph RefCell<DepGraphEdges>,
|
||||
new_graph: &'graph RefCell<CurrentDepGraph>)
|
||||
-> IgnoreTask<'graph> {
|
||||
legacy_graph.borrow_mut().push_ignore();
|
||||
new_graph.borrow_mut().push_ignore();
|
||||
pub(super) fn new(graph: &'graph RefCell<CurrentDepGraph>) -> IgnoreTask<'graph> {
|
||||
graph.borrow_mut().push_ignore();
|
||||
IgnoreTask {
|
||||
legacy_graph,
|
||||
new_graph,
|
||||
graph,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'graph> Drop for IgnoreTask<'graph> {
|
||||
fn drop(&mut self) {
|
||||
self.legacy_graph.borrow_mut().pop_ignore();
|
||||
self.new_graph.borrow_mut().pop_ignore();
|
||||
self.graph.borrow_mut().pop_ignore();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,10 @@ impl SerializedDepGraph {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
|
||||
#[inline]
|
||||
pub fn edge_targets_from(&self,
|
||||
source: SerializedDepNodeIndex)
|
||||
-> &[SerializedDepNodeIndex] {
|
||||
let targets = self.edge_list_indices[source];
|
||||
&self.edge_list_data[targets.0 as usize..targets.1 as usize]
|
||||
}
|
||||
|
@ -43,7 +43,6 @@ use rustc_back::PanicStrategy;
|
||||
use rustc_data_structures::indexed_vec::IndexVec;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::stable_hasher::StableVec;
|
||||
use std::cell::{RefCell, Cell};
|
||||
|
||||
use std::ops::Deref;
|
||||
use std::rc::Rc;
|
||||
@ -57,6 +56,7 @@ use syntax::symbol::Symbol;
|
||||
#[macro_use]
|
||||
mod plumbing;
|
||||
use self::plumbing::*;
|
||||
pub use self::plumbing::force_from_dep_node;
|
||||
|
||||
mod keys;
|
||||
pub use self::keys::Key;
|
||||
@ -377,9 +377,9 @@ fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
|
||||
DepConstructor::TypeckBodiesKrate
|
||||
}
|
||||
|
||||
fn const_eval_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>)
|
||||
fn const_eval_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>)
|
||||
-> DepConstructor<'tcx> {
|
||||
DepConstructor::ConstEval
|
||||
DepConstructor::ConstEval { param_env }
|
||||
}
|
||||
|
||||
fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
|
||||
@ -390,24 +390,24 @@ fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
|
||||
DepConstructor::CrateVariances
|
||||
}
|
||||
|
||||
fn is_copy_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::IsCopy
|
||||
fn is_copy_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::IsCopy { param_env }
|
||||
}
|
||||
|
||||
fn is_sized_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::IsSized
|
||||
fn is_sized_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::IsSized { param_env }
|
||||
}
|
||||
|
||||
fn is_freeze_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::IsFreeze
|
||||
fn is_freeze_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::IsFreeze { param_env }
|
||||
}
|
||||
|
||||
fn needs_drop_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::NeedsDrop
|
||||
fn needs_drop_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::NeedsDrop { param_env }
|
||||
}
|
||||
|
||||
fn layout_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::Layout
|
||||
fn layout_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
|
||||
DepConstructor::Layout { param_env }
|
||||
}
|
||||
|
||||
fn lint_levels_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
|
||||
|
@ -12,7 +12,7 @@
|
||||
//! that generate the actual methods on tcx which find and execute the
|
||||
//! provider, manage the caches, and so forth.
|
||||
|
||||
use dep_graph::{DepNodeIndex};
|
||||
use dep_graph::{DepNodeIndex, DepNode, DepKind};
|
||||
use errors::{Diagnostic, DiagnosticBuilder};
|
||||
use ty::{TyCtxt};
|
||||
use ty::maps::Query; // NB: actually generated by the macros in this file
|
||||
@ -36,6 +36,26 @@ pub(super) struct QueryValue<T> {
|
||||
pub(super) diagnostics: Option<Box<QueryDiagnostics>>,
|
||||
}
|
||||
|
||||
impl<T> QueryValue<T> {
|
||||
pub(super) fn new(value: T,
|
||||
dep_node_index: DepNodeIndex,
|
||||
diagnostics: Vec<Diagnostic>)
|
||||
-> QueryValue<T> {
|
||||
QueryValue {
|
||||
value,
|
||||
index: dep_node_index,
|
||||
diagnostics: if diagnostics.len() == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(Box::new(QueryDiagnostics {
|
||||
diagnostics,
|
||||
emitted_diagnostics: Cell::new(true),
|
||||
}))
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct QueryDiagnostics {
|
||||
pub(super) diagnostics: Vec<Diagnostic>,
|
||||
pub(super) emitted_diagnostics: Cell<bool>,
|
||||
@ -142,6 +162,10 @@ macro_rules! define_maps {
|
||||
(<$tcx:tt>
|
||||
$($(#[$attr:meta])*
|
||||
[$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
|
||||
|
||||
use dep_graph::DepNodeIndex;
|
||||
use std::cell::RefCell;
|
||||
|
||||
define_map_struct! {
|
||||
tcx: $tcx,
|
||||
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
|
||||
@ -200,6 +224,7 @@ macro_rules! define_maps {
|
||||
}
|
||||
|
||||
impl<'a, $tcx, 'lcx> queries::$name<$tcx> {
|
||||
|
||||
#[allow(unused)]
|
||||
fn to_dep_node(tcx: TyCtxt<'a, $tcx, 'lcx>, key: &$K) -> DepNode {
|
||||
use dep_graph::DepConstructor::*;
|
||||
@ -207,12 +232,10 @@ macro_rules! define_maps {
|
||||
DepNode::new(tcx, $node(*key))
|
||||
}
|
||||
|
||||
fn try_get_with<F, R>(tcx: TyCtxt<'a, $tcx, 'lcx>,
|
||||
mut span: Span,
|
||||
key: $K,
|
||||
f: F)
|
||||
-> Result<R, CycleError<'a, $tcx>>
|
||||
where F: FnOnce(&$V) -> R
|
||||
fn try_get_with(tcx: TyCtxt<'a, $tcx, 'lcx>,
|
||||
mut span: Span,
|
||||
key: $K)
|
||||
-> Result<$V, CycleError<'a, $tcx>>
|
||||
{
|
||||
debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})",
|
||||
stringify!($name),
|
||||
@ -239,10 +262,8 @@ macro_rules! define_maps {
|
||||
}
|
||||
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
|
||||
tcx.dep_graph.read_index(value.index);
|
||||
return Ok(f(&value.value));
|
||||
return Ok((&value.value).clone());
|
||||
}
|
||||
// else, we are going to run the provider:
|
||||
profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin);
|
||||
|
||||
// FIXME(eddyb) Get more valid Span's on queries.
|
||||
// def_span guard is necessary to prevent a recursive loop,
|
||||
@ -251,70 +272,159 @@ macro_rules! define_maps {
|
||||
span = key.default_span(tcx)
|
||||
}
|
||||
|
||||
let dep_node = Self::to_dep_node(tcx, &key);
|
||||
let res = tcx.cycle_check(span, Query::$name(key), || {
|
||||
tcx.sess.diagnostic().track_diagnostics(|| {
|
||||
if dep_node.kind.is_anon() {
|
||||
tcx.dep_graph.with_anon_task(dep_node.kind, || {
|
||||
let provider = tcx.maps.providers[key.map_crate()].$name;
|
||||
provider(tcx.global_tcx(), key)
|
||||
})
|
||||
} else {
|
||||
fn run_provider<'a, 'tcx, 'lcx>(tcx: TyCtxt<'a, 'tcx, 'lcx>,
|
||||
key: $K)
|
||||
-> $V {
|
||||
let provider = tcx.maps.providers[key.map_crate()].$name;
|
||||
provider(tcx.global_tcx(), key)
|
||||
}
|
||||
// Fast path for when incr. comp. is off. `to_dep_node` is
|
||||
// expensive for some DepKinds.
|
||||
if !tcx.dep_graph.is_fully_enabled() {
|
||||
let null_dep_node = DepNode::new_no_params(::dep_graph::DepKind::Null);
|
||||
return Self::force(tcx, key, span, null_dep_node)
|
||||
.map(|(v, _)| v);
|
||||
}
|
||||
|
||||
tcx.dep_graph.with_task(dep_node, tcx, key, run_provider)
|
||||
}
|
||||
let dep_node = Self::to_dep_node(tcx, &key);
|
||||
|
||||
if dep_node.kind.is_anon() {
|
||||
profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin);
|
||||
|
||||
let res = tcx.cycle_check(span, Query::$name(key), || {
|
||||
tcx.sess.diagnostic().track_diagnostics(|| {
|
||||
tcx.dep_graph.with_anon_task(dep_node.kind, || {
|
||||
Self::compute_result(tcx.global_tcx(), key)
|
||||
})
|
||||
})
|
||||
})?;
|
||||
|
||||
profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd);
|
||||
let ((result, dep_node_index), diagnostics) = res;
|
||||
|
||||
tcx.dep_graph.read_index(dep_node_index);
|
||||
let value = QueryValue::new(result, dep_node_index, diagnostics);
|
||||
|
||||
return Ok((&tcx.maps
|
||||
.$name
|
||||
.borrow_mut()
|
||||
.map
|
||||
.entry(key)
|
||||
.or_insert(value)
|
||||
.value).clone());
|
||||
}
|
||||
|
||||
if !dep_node.kind.is_input() {
|
||||
use dep_graph::DepNodeColor;
|
||||
if let Some(DepNodeColor::Green(dep_node_index)) = tcx.dep_graph
|
||||
.node_color(&dep_node) {
|
||||
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
|
||||
tcx.dep_graph.read_index(dep_node_index);
|
||||
return Self::load_from_disk_and_cache_in_memory(tcx,
|
||||
key,
|
||||
span,
|
||||
dep_node_index)
|
||||
}
|
||||
|
||||
debug!("ty::queries::{}::try_get_with(key={:?}) - running try_mark_green",
|
||||
stringify!($name),
|
||||
key);
|
||||
|
||||
if let Some(dep_node_index) = tcx.dep_graph.try_mark_green(tcx, &dep_node) {
|
||||
debug_assert!(tcx.dep_graph.is_green(dep_node_index));
|
||||
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
|
||||
tcx.dep_graph.read_index(dep_node_index);
|
||||
return Self::load_from_disk_and_cache_in_memory(tcx,
|
||||
key,
|
||||
span,
|
||||
dep_node_index)
|
||||
}
|
||||
}
|
||||
|
||||
match Self::force(tcx, key, span, dep_node) {
|
||||
Ok((result, dep_node_index)) => {
|
||||
tcx.dep_graph.read_index(dep_node_index);
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => Err(e)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_result(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> $V {
|
||||
let provider = tcx.maps.providers[key.map_crate()].$name;
|
||||
provider(tcx.global_tcx(), key)
|
||||
}
|
||||
|
||||
fn load_from_disk_and_cache_in_memory(tcx: TyCtxt<'a, $tcx, 'lcx>,
|
||||
key: $K,
|
||||
span: Span,
|
||||
dep_node_index: DepNodeIndex)
|
||||
-> Result<$V, CycleError<'a, $tcx>>
|
||||
{
|
||||
debug_assert!(tcx.dep_graph.is_green(dep_node_index));
|
||||
|
||||
// We don't do any caching yet, so recompute
|
||||
let (result, diagnostics) = tcx.cycle_check(span, Query::$name(key), || {
|
||||
tcx.sess.diagnostic().track_diagnostics(|| {
|
||||
// The dep-graph for this computation is already in place
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
Self::compute_result(tcx, key)
|
||||
})
|
||||
})
|
||||
})?;
|
||||
profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd);
|
||||
let ((result, dep_node_index), diagnostics) = res;
|
||||
|
||||
tcx.dep_graph.read_index(dep_node_index);
|
||||
if tcx.sess.opts.debugging_opts.query_dep_graph {
|
||||
tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true);
|
||||
}
|
||||
|
||||
let value = QueryValue {
|
||||
value: result,
|
||||
index: dep_node_index,
|
||||
diagnostics: if diagnostics.len() == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(Box::new(QueryDiagnostics {
|
||||
diagnostics,
|
||||
emitted_diagnostics: Cell::new(true),
|
||||
}))
|
||||
},
|
||||
};
|
||||
let value = QueryValue::new(result, dep_node_index, diagnostics);
|
||||
|
||||
Ok(f(&tcx.maps
|
||||
Ok((&tcx.maps
|
||||
.$name
|
||||
.borrow_mut()
|
||||
.map
|
||||
.entry(key)
|
||||
.or_insert(value)
|
||||
.value))
|
||||
.value).clone())
|
||||
}
|
||||
|
||||
fn force(tcx: TyCtxt<'a, $tcx, 'lcx>,
|
||||
key: $K,
|
||||
span: Span,
|
||||
dep_node: DepNode)
|
||||
-> Result<($V, DepNodeIndex), CycleError<'a, $tcx>> {
|
||||
debug_assert!(tcx.dep_graph.node_color(&dep_node).is_none());
|
||||
|
||||
profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin);
|
||||
let res = tcx.cycle_check(span, Query::$name(key), || {
|
||||
tcx.sess.diagnostic().track_diagnostics(|| {
|
||||
tcx.dep_graph.with_task(dep_node,
|
||||
tcx,
|
||||
key,
|
||||
Self::compute_result)
|
||||
})
|
||||
})?;
|
||||
profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd);
|
||||
|
||||
let ((result, dep_node_index), diagnostics) = res;
|
||||
|
||||
if tcx.sess.opts.debugging_opts.query_dep_graph {
|
||||
tcx.dep_graph.mark_loaded_from_cache(dep_node_index, false);
|
||||
}
|
||||
|
||||
let value = QueryValue::new(result, dep_node_index, diagnostics);
|
||||
|
||||
Ok(((&tcx.maps
|
||||
.$name
|
||||
.borrow_mut()
|
||||
.map
|
||||
.entry(key)
|
||||
.or_insert(value)
|
||||
.value).clone(),
|
||||
dep_node_index))
|
||||
}
|
||||
|
||||
pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K)
|
||||
-> Result<$V, DiagnosticBuilder<'a>> {
|
||||
match Self::try_get_with(tcx, span, key, Clone::clone) {
|
||||
match Self::try_get_with(tcx, span, key) {
|
||||
Ok(e) => Ok(e),
|
||||
Err(e) => Err(tcx.report_cycle(e)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) {
|
||||
// Ignore dependencies, since we not reading the computed value
|
||||
let _task = tcx.dep_graph.in_ignore();
|
||||
|
||||
match Self::try_get_with(tcx, span, key, |_| ()) {
|
||||
Ok(()) => {}
|
||||
Err(e) => tcx.report_cycle(e).emit(),
|
||||
}
|
||||
}
|
||||
})*
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
@ -492,3 +602,264 @@ macro_rules! define_provider_struct {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>,
|
||||
dep_node: &DepNode)
|
||||
-> bool {
|
||||
use ty::maps::keys::Key;
|
||||
use hir::def_id::LOCAL_CRATE;
|
||||
|
||||
// We must avoid ever having to call force_from_dep_node() for a
|
||||
// DepNode::CodegenUnit:
|
||||
// Since we cannot reconstruct the query key of a DepNode::CodegenUnit, we
|
||||
// would always end up having to evaluate the first caller of the
|
||||
// `codegen_unit` query that *is* reconstructible. This might very well be
|
||||
// the `compile_codegen_unit` query, thus re-translating the whole CGU just
|
||||
// to re-trigger calling the `codegen_unit` query with the right key. At
|
||||
// that point we would already have re-done all the work we are trying to
|
||||
// avoid doing in the first place.
|
||||
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
||||
// each CGU, right after partitioning. This way `try_mark_green` will always
|
||||
// hit the cache instead of having to go through `force_from_dep_node`.
|
||||
// This assertion makes sure, we actually keep applying the solution above.
|
||||
debug_assert!(dep_node.kind != DepKind::CodegenUnit,
|
||||
"calling force_from_dep_node() on DepKind::CodegenUnit");
|
||||
|
||||
if !dep_node.kind.can_reconstruct_query_key() {
|
||||
return false
|
||||
}
|
||||
|
||||
macro_rules! def_id {
|
||||
() => {
|
||||
if let Some(def_id) = dep_node.extract_def_id(tcx) {
|
||||
def_id
|
||||
} else {
|
||||
// return from the whole function
|
||||
return false
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
macro_rules! krate {
|
||||
() => { (def_id!()).krate }
|
||||
};
|
||||
|
||||
macro_rules! force {
|
||||
($query:ident, $key:expr) => {
|
||||
{
|
||||
use $crate::util::common::{ProfileQueriesMsg, profq_msg};
|
||||
|
||||
// FIXME(eddyb) Get more valid Span's on queries.
|
||||
// def_span guard is necessary to prevent a recursive loop,
|
||||
// default_span calls def_span query internally.
|
||||
let span = if stringify!($query) != "def_span" {
|
||||
$key.default_span(tcx)
|
||||
} else {
|
||||
::syntax_pos::DUMMY_SP
|
||||
};
|
||||
|
||||
profq_msg!(tcx,
|
||||
ProfileQueriesMsg::QueryBegin(
|
||||
span.data(),
|
||||
::ty::maps::QueryMsg::$query(profq_key!(tcx, $key))
|
||||
)
|
||||
);
|
||||
|
||||
match ::ty::maps::queries::$query::force(tcx, $key, span, *dep_node) {
|
||||
Ok(_) => {},
|
||||
Err(e) => {
|
||||
tcx.report_cycle(e).emit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// FIXME(#45015): We should try move this boilerplate code into a macro
|
||||
// somehow.
|
||||
match dep_node.kind {
|
||||
// These are inputs that are expected to be pre-allocated and that
|
||||
// should therefore always be red or green already
|
||||
DepKind::AllLocalTraitImpls |
|
||||
DepKind::Krate |
|
||||
DepKind::CrateMetadata |
|
||||
DepKind::HirBody |
|
||||
DepKind::Hir |
|
||||
|
||||
// This are anonymous nodes
|
||||
DepKind::IsCopy |
|
||||
DepKind::IsSized |
|
||||
DepKind::IsFreeze |
|
||||
DepKind::NeedsDrop |
|
||||
DepKind::Layout |
|
||||
DepKind::TraitSelect |
|
||||
DepKind::ConstEval |
|
||||
|
||||
// We don't have enough information to reconstruct the query key of
|
||||
// these
|
||||
DepKind::InstanceSymbolName |
|
||||
DepKind::MirShim |
|
||||
DepKind::BorrowCheckKrate |
|
||||
DepKind::Specializes |
|
||||
DepKind::ImplementationsOfTrait |
|
||||
DepKind::TypeParamPredicates |
|
||||
DepKind::CodegenUnit |
|
||||
DepKind::CompileCodegenUnit |
|
||||
|
||||
// These are just odd
|
||||
DepKind::Null |
|
||||
DepKind::WorkProduct => {
|
||||
bug!("force_from_dep_node() - Encountered {:?}", dep_node.kind)
|
||||
}
|
||||
|
||||
// These are not queries
|
||||
DepKind::CoherenceCheckTrait |
|
||||
DepKind::ItemVarianceConstraints => {
|
||||
return false
|
||||
}
|
||||
|
||||
DepKind::RegionScopeTree => { force!(region_scope_tree, def_id!()); }
|
||||
|
||||
DepKind::Coherence => { force!(crate_inherent_impls, LOCAL_CRATE); }
|
||||
DepKind::CoherenceInherentImplOverlapCheck => {
|
||||
force!(crate_inherent_impls_overlap_check, LOCAL_CRATE)
|
||||
},
|
||||
DepKind::PrivacyAccessLevels => { force!(privacy_access_levels, LOCAL_CRATE); }
|
||||
DepKind::MirConstQualif => { force!(mir_const_qualif, def_id!()); }
|
||||
DepKind::MirConst => { force!(mir_const, def_id!()); }
|
||||
DepKind::MirValidated => { force!(mir_validated, def_id!()); }
|
||||
DepKind::MirOptimized => { force!(optimized_mir, def_id!()); }
|
||||
|
||||
DepKind::BorrowCheck => { force!(borrowck, def_id!()); }
|
||||
DepKind::MirBorrowCheck => { force!(mir_borrowck, def_id!()); }
|
||||
DepKind::UnsafetyViolations => { force!(unsafety_violations, def_id!()); }
|
||||
DepKind::Reachability => { force!(reachable_set, LOCAL_CRATE); }
|
||||
DepKind::MirKeys => { force!(mir_keys, LOCAL_CRATE); }
|
||||
DepKind::CrateVariances => { force!(crate_variances, LOCAL_CRATE); }
|
||||
DepKind::AssociatedItems => { force!(associated_item, def_id!()); }
|
||||
DepKind::TypeOfItem => { force!(type_of, def_id!()); }
|
||||
DepKind::GenericsOfItem => { force!(generics_of, def_id!()); }
|
||||
DepKind::PredicatesOfItem => { force!(predicates_of, def_id!()); }
|
||||
DepKind::SuperPredicatesOfItem => { force!(super_predicates_of, def_id!()); }
|
||||
DepKind::TraitDefOfItem => { force!(trait_def, def_id!()); }
|
||||
DepKind::AdtDefOfItem => { force!(adt_def, def_id!()); }
|
||||
DepKind::IsDefaultImpl => { force!(is_default_impl, def_id!()); }
|
||||
DepKind::ImplTraitRef => { force!(impl_trait_ref, def_id!()); }
|
||||
DepKind::ImplPolarity => { force!(impl_polarity, def_id!()); }
|
||||
DepKind::ClosureKind => { force!(closure_kind, def_id!()); }
|
||||
DepKind::FnSignature => { force!(fn_sig, def_id!()); }
|
||||
DepKind::GenSignature => { force!(generator_sig, def_id!()); }
|
||||
DepKind::CoerceUnsizedInfo => { force!(coerce_unsized_info, def_id!()); }
|
||||
DepKind::ItemVariances => { force!(variances_of, def_id!()); }
|
||||
DepKind::IsConstFn => { force!(is_const_fn, def_id!()); }
|
||||
DepKind::IsForeignItem => { force!(is_foreign_item, def_id!()); }
|
||||
DepKind::SizedConstraint => { force!(adt_sized_constraint, def_id!()); }
|
||||
DepKind::DtorckConstraint => { force!(adt_dtorck_constraint, def_id!()); }
|
||||
DepKind::AdtDestructor => { force!(adt_destructor, def_id!()); }
|
||||
DepKind::AssociatedItemDefIds => { force!(associated_item_def_ids, def_id!()); }
|
||||
DepKind::InherentImpls => { force!(inherent_impls, def_id!()); }
|
||||
DepKind::TypeckBodiesKrate => { force!(typeck_item_bodies, LOCAL_CRATE); }
|
||||
DepKind::TypeckTables => { force!(typeck_tables_of, def_id!()); }
|
||||
DepKind::HasTypeckTables => { force!(has_typeck_tables, def_id!()); }
|
||||
DepKind::SymbolName => { force!(def_symbol_name, def_id!()); }
|
||||
DepKind::SpecializationGraph => { force!(specialization_graph_of, def_id!()); }
|
||||
DepKind::ObjectSafety => { force!(is_object_safe, def_id!()); }
|
||||
DepKind::TraitImpls => { force!(trait_impls_of, def_id!()); }
|
||||
|
||||
DepKind::ParamEnv => { force!(param_env, def_id!()); }
|
||||
DepKind::DescribeDef => { force!(describe_def, def_id!()); }
|
||||
DepKind::DefSpan => { force!(def_span, def_id!()); }
|
||||
DepKind::LookupStability => { force!(lookup_stability, def_id!()); }
|
||||
DepKind::LookupDeprecationEntry => {
|
||||
force!(lookup_deprecation_entry, def_id!());
|
||||
}
|
||||
DepKind::ItemBodyNestedBodies => { force!(item_body_nested_bodies, def_id!()); }
|
||||
DepKind::ConstIsRvaluePromotableToStatic => {
|
||||
force!(const_is_rvalue_promotable_to_static, def_id!());
|
||||
}
|
||||
DepKind::ImplParent => { force!(impl_parent, def_id!()); }
|
||||
DepKind::TraitOfItem => { force!(trait_of_item, def_id!()); }
|
||||
DepKind::IsExportedSymbol => { force!(is_exported_symbol, def_id!()); }
|
||||
DepKind::IsMirAvailable => { force!(is_mir_available, def_id!()); }
|
||||
DepKind::ItemAttrs => { force!(item_attrs, def_id!()); }
|
||||
DepKind::FnArgNames => { force!(fn_arg_names, def_id!()); }
|
||||
DepKind::DylibDepFormats => { force!(dylib_dependency_formats, krate!()); }
|
||||
DepKind::IsPanicRuntime => { force!(is_panic_runtime, krate!()); }
|
||||
DepKind::IsCompilerBuiltins => { force!(is_compiler_builtins, krate!()); }
|
||||
DepKind::HasGlobalAllocator => { force!(has_global_allocator, krate!()); }
|
||||
DepKind::ExternCrate => { force!(extern_crate, def_id!()); }
|
||||
DepKind::LintLevels => { force!(lint_levels, LOCAL_CRATE); }
|
||||
DepKind::InScopeTraits => { force!(in_scope_traits_map, def_id!().index); }
|
||||
DepKind::ModuleExports => { force!(module_exports, def_id!()); }
|
||||
DepKind::IsSanitizerRuntime => { force!(is_sanitizer_runtime, krate!()); }
|
||||
DepKind::IsProfilerRuntime => { force!(is_profiler_runtime, krate!()); }
|
||||
DepKind::GetPanicStrategy => { force!(panic_strategy, krate!()); }
|
||||
DepKind::IsNoBuiltins => { force!(is_no_builtins, krate!()); }
|
||||
DepKind::ImplDefaultness => { force!(impl_defaultness, def_id!()); }
|
||||
DepKind::ExportedSymbolIds => { force!(exported_symbol_ids, krate!()); }
|
||||
DepKind::NativeLibraries => { force!(native_libraries, krate!()); }
|
||||
DepKind::PluginRegistrarFn => { force!(plugin_registrar_fn, krate!()); }
|
||||
DepKind::DeriveRegistrarFn => { force!(derive_registrar_fn, krate!()); }
|
||||
DepKind::CrateDisambiguator => { force!(crate_disambiguator, krate!()); }
|
||||
DepKind::CrateHash => { force!(crate_hash, krate!()); }
|
||||
DepKind::OriginalCrateName => { force!(original_crate_name, krate!()); }
|
||||
|
||||
DepKind::AllTraitImplementations => {
|
||||
force!(all_trait_implementations, krate!());
|
||||
}
|
||||
|
||||
DepKind::IsDllimportForeignItem => {
|
||||
force!(is_dllimport_foreign_item, def_id!());
|
||||
}
|
||||
DepKind::IsStaticallyIncludedForeignItem => {
|
||||
force!(is_statically_included_foreign_item, def_id!());
|
||||
}
|
||||
DepKind::NativeLibraryKind => { force!(native_library_kind, def_id!()); }
|
||||
DepKind::LinkArgs => { force!(link_args, LOCAL_CRATE); }
|
||||
|
||||
DepKind::NamedRegion => { force!(named_region_map, def_id!().index); }
|
||||
DepKind::IsLateBound => { force!(is_late_bound_map, def_id!().index); }
|
||||
DepKind::ObjectLifetimeDefaults => {
|
||||
force!(object_lifetime_defaults_map, def_id!().index);
|
||||
}
|
||||
|
||||
DepKind::Visibility => { force!(visibility, def_id!()); }
|
||||
DepKind::DepKind => { force!(dep_kind, krate!()); }
|
||||
DepKind::CrateName => { force!(crate_name, krate!()); }
|
||||
DepKind::ItemChildren => { force!(item_children, def_id!()); }
|
||||
DepKind::ExternModStmtCnum => { force!(extern_mod_stmt_cnum, def_id!()); }
|
||||
DepKind::GetLangItems => { force!(get_lang_items, LOCAL_CRATE); }
|
||||
DepKind::DefinedLangItems => { force!(defined_lang_items, krate!()); }
|
||||
DepKind::MissingLangItems => { force!(missing_lang_items, krate!()); }
|
||||
DepKind::ExternConstBody => { force!(extern_const_body, def_id!()); }
|
||||
DepKind::VisibleParentMap => { force!(visible_parent_map, LOCAL_CRATE); }
|
||||
DepKind::MissingExternCrateItem => {
|
||||
force!(missing_extern_crate_item, krate!());
|
||||
}
|
||||
DepKind::UsedCrateSource => { force!(used_crate_source, krate!()); }
|
||||
DepKind::PostorderCnums => { force!(postorder_cnums, LOCAL_CRATE); }
|
||||
DepKind::HasCloneClosures => { force!(has_clone_closures, krate!()); }
|
||||
DepKind::HasCopyClosures => { force!(has_copy_closures, krate!()); }
|
||||
|
||||
DepKind::Freevars => { force!(freevars, def_id!()); }
|
||||
DepKind::MaybeUnusedTraitImport => {
|
||||
force!(maybe_unused_trait_import, def_id!());
|
||||
}
|
||||
DepKind::MaybeUnusedExternCrates => { force!(maybe_unused_extern_crates, LOCAL_CRATE); }
|
||||
DepKind::StabilityIndex => { force!(stability_index, LOCAL_CRATE); }
|
||||
DepKind::AllCrateNums => { force!(all_crate_nums, LOCAL_CRATE); }
|
||||
DepKind::ExportedSymbols => { force!(exported_symbols, krate!()); }
|
||||
DepKind::CollectAndPartitionTranslationItems => {
|
||||
force!(collect_and_partition_translation_items, LOCAL_CRATE);
|
||||
}
|
||||
DepKind::ExportName => { force!(export_name, def_id!()); }
|
||||
DepKind::ContainsExternIndicator => {
|
||||
force!(contains_extern_indicator, def_id!());
|
||||
}
|
||||
DepKind::IsTranslatedFunction => { force!(is_translated_function, def_id!()); }
|
||||
DepKind::OutputFilenames => { force!(output_filenames, LOCAL_CRATE); }
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
|
@ -1298,6 +1298,22 @@ impl<'tcx, T> ParamEnvAnd<'tcx, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'gcx, T> HashStable<StableHashingContext<'gcx>> for ParamEnvAnd<'gcx, T>
|
||||
where T: HashStable<StableHashingContext<'gcx>>
|
||||
{
|
||||
fn hash_stable<W: StableHasherResult>(&self,
|
||||
hcx: &mut StableHashingContext<'gcx>,
|
||||
hasher: &mut StableHasher<W>) {
|
||||
let ParamEnvAnd {
|
||||
ref param_env,
|
||||
ref value
|
||||
} = *self;
|
||||
|
||||
param_env.hash_stable(hcx, hasher);
|
||||
value.hash_stable(hcx, hasher);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct Destructor {
|
||||
/// The def-id of the destructor method
|
||||
|
@ -642,8 +642,8 @@ pub fn phase_2_configure_and_expand<F>(sess: &Session,
|
||||
);
|
||||
|
||||
let dep_graph = if sess.opts.build_dep_graph() {
|
||||
let prev_dep_graph = time(time_passes, "load prev dep-graph (new)", || {
|
||||
rustc_incremental::load_dep_graph_new(sess)
|
||||
let prev_dep_graph = time(time_passes, "load prev dep-graph", || {
|
||||
rustc_incremental::load_dep_graph(sess)
|
||||
});
|
||||
|
||||
DepGraph::new(prev_dep_graph)
|
||||
@ -1052,9 +1052,9 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
|
||||
tx,
|
||||
output_filenames,
|
||||
|tcx| {
|
||||
time(time_passes,
|
||||
"load_dep_graph",
|
||||
|| rustc_incremental::load_dep_graph(tcx));
|
||||
// Do some initialization of the DepGraph that can only be done with the
|
||||
// tcx available.
|
||||
rustc_incremental::dep_graph_tcx_init(tcx);
|
||||
|
||||
time(time_passes,
|
||||
"stability checking",
|
||||
|
@ -209,7 +209,7 @@ fn check_paths<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
}
|
||||
let query = tcx.dep_graph.query();
|
||||
for &(_, source_def_id, ref source_dep_node) in if_this_changed {
|
||||
let dependents = query.transitive_successors(source_dep_node);
|
||||
let dependents = query.transitive_predecessors(source_dep_node);
|
||||
for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {
|
||||
if !dependents.contains(&target_dep_node) {
|
||||
tcx.sess.span_err(
|
||||
|
@ -32,10 +32,11 @@ mod persist;
|
||||
|
||||
pub use assert_dep_graph::assert_dep_graph;
|
||||
pub use persist::load_dep_graph;
|
||||
pub use persist::load_dep_graph_new;
|
||||
pub use persist::dep_graph_tcx_init;
|
||||
pub use persist::save_dep_graph;
|
||||
pub use persist::save_trans_partition;
|
||||
pub use persist::save_work_products;
|
||||
pub use persist::in_incr_comp_dir;
|
||||
pub use persist::prepare_session_directory;
|
||||
pub use persist::finalize_session_directory;
|
||||
pub use persist::delete_workproduct_files;
|
||||
|
@ -10,78 +10,11 @@
|
||||
|
||||
//! The data that we will serialize and deserialize.
|
||||
|
||||
use rustc::dep_graph::{DepNode, WorkProduct, WorkProductId};
|
||||
use rustc::dep_graph::{WorkProduct, WorkProductId};
|
||||
use rustc::hir::def_id::DefIndex;
|
||||
use rustc::hir::map::DefPathHash;
|
||||
use rustc::ich::Fingerprint;
|
||||
use rustc::middle::cstore::EncodedMetadataHash;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
|
||||
|
||||
/// Data for use when recompiling the **current crate**.
|
||||
#[derive(Debug, RustcEncodable, RustcDecodable)]
|
||||
pub struct SerializedDepGraph {
|
||||
/// The set of all DepNodes in the graph
|
||||
pub nodes: IndexVec<DepNodeIndex, DepNode>,
|
||||
/// For each DepNode, stores the list of edges originating from that
|
||||
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
|
||||
/// which holds the actual DepNodeIndices of the target nodes.
|
||||
pub edge_list_indices: IndexVec<DepNodeIndex, (u32, u32)>,
|
||||
/// A flattened list of all edge targets in the graph. Edge sources are
|
||||
/// implicit in edge_list_indices.
|
||||
pub edge_list_data: Vec<DepNodeIndex>,
|
||||
|
||||
/// These are output nodes that have no incoming edges. We track
|
||||
/// these separately so that when we reload all edges, we don't
|
||||
/// lose track of these nodes.
|
||||
pub bootstrap_outputs: Vec<DepNode>,
|
||||
|
||||
/// These are hashes of two things:
|
||||
/// - the HIR nodes in this crate
|
||||
/// - the metadata nodes from dependent crates we use
|
||||
///
|
||||
/// In each case, we store a hash summarizing the contents of
|
||||
/// those items as they were at the time we did this compilation.
|
||||
/// In the case of HIR nodes, this hash is derived by walking the
|
||||
/// HIR itself. In the case of metadata nodes, the hash is loaded
|
||||
/// from saved state.
|
||||
///
|
||||
/// When we do the next compile, we will load these back up and
|
||||
/// compare them against the hashes we see at that time, which
|
||||
/// will tell us what has changed, either in this crate or in some
|
||||
/// crate that we depend on.
|
||||
///
|
||||
/// Because they will be reloaded, we don't store the DefId (which
|
||||
/// will be different when we next compile) related to each node,
|
||||
/// but rather the `DefPathIndex`. This can then be retraced
|
||||
/// to find the current def-id.
|
||||
pub hashes: Vec<(DepNodeIndex, Fingerprint)>,
|
||||
}
|
||||
|
||||
impl SerializedDepGraph {
|
||||
pub fn edge_targets_from(&self, source: DepNodeIndex) -> &[DepNodeIndex] {
|
||||
let targets = self.edge_list_indices[source];
|
||||
&self.edge_list_data[targets.0 as usize .. targets.1 as usize]
|
||||
}
|
||||
}
|
||||
|
||||
/// The index of a DepNode in the SerializedDepGraph::nodes array.
|
||||
#[derive(Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Debug,
|
||||
RustcEncodable, RustcDecodable)]
|
||||
pub struct DepNodeIndex(pub u32);
|
||||
|
||||
impl Idx for DepNodeIndex {
|
||||
#[inline]
|
||||
fn new(idx: usize) -> Self {
|
||||
assert!(idx <= ::std::u32::MAX as usize);
|
||||
DepNodeIndex(idx as u32)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, RustcEncodable, RustcDecodable)]
|
||||
pub struct SerializedWorkProduct {
|
||||
|
@ -122,7 +122,7 @@ impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> {
|
||||
let current_fingerprint = self.tcx.dep_graph.fingerprint_of(&dep_node);
|
||||
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
|
||||
|
||||
if current_fingerprint == prev_fingerprint {
|
||||
if Some(current_fingerprint) == prev_fingerprint {
|
||||
let dep_node_str = self.dep_node_str(&dep_node);
|
||||
self.tcx.sess.span_err(
|
||||
item_span,
|
||||
@ -136,7 +136,7 @@ impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> {
|
||||
let current_fingerprint = self.tcx.dep_graph.fingerprint_of(&dep_node);
|
||||
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
|
||||
|
||||
if current_fingerprint != prev_fingerprint {
|
||||
if Some(current_fingerprint) != prev_fingerprint {
|
||||
let dep_node_str = self.dep_node_str(&dep_node);
|
||||
self.tcx.sess.span_err(
|
||||
item_span,
|
||||
|
@ -129,7 +129,6 @@ use std::__rand::{thread_rng, Rng};
|
||||
|
||||
const LOCK_FILE_EXT: &'static str = ".lock";
|
||||
const DEP_GRAPH_FILENAME: &'static str = "dep-graph.bin";
|
||||
const DEP_GRAPH_NEW_FILENAME: &'static str = "dep-graph-new.bin";
|
||||
const WORK_PRODUCTS_FILENAME: &'static str = "work-products.bin";
|
||||
const METADATA_HASHES_FILENAME: &'static str = "metadata.bin";
|
||||
|
||||
@ -143,10 +142,6 @@ pub fn dep_graph_path(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME)
|
||||
}
|
||||
|
||||
pub fn dep_graph_path_new(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, DEP_GRAPH_NEW_FILENAME)
|
||||
}
|
||||
|
||||
pub fn work_products_path(sess: &Session) -> PathBuf {
|
||||
in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME)
|
||||
}
|
||||
|
@ -10,63 +10,68 @@
|
||||
|
||||
//! Code to save/load the dep-graph from files.
|
||||
|
||||
use rustc::dep_graph::{DepNode, WorkProductId, DepKind, PreviousDepGraph};
|
||||
use rustc::dep_graph::{PreviousDepGraph, SerializedDepGraph};
|
||||
use rustc::hir::svh::Svh;
|
||||
use rustc::ich::Fingerprint;
|
||||
use rustc::session::Session;
|
||||
use rustc::ty::TyCtxt;
|
||||
use rustc::util::nodemap::DefIdMap;
|
||||
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
|
||||
use rustc_data_structures::indexed_vec::IndexVec;
|
||||
use rustc_serialize::Decodable as RustcDecodable;
|
||||
use rustc_serialize::opaque::Decoder;
|
||||
use std::path::{Path};
|
||||
use std::path::Path;
|
||||
|
||||
use super::data::*;
|
||||
use super::fs::*;
|
||||
use super::file_format;
|
||||
use super::work_product;
|
||||
|
||||
// The key is a dirty node. The value is **some** base-input that we
|
||||
// can blame it on.
|
||||
pub type DirtyNodes = FxHashMap<DepNodeIndex, DepNodeIndex>;
|
||||
pub fn dep_graph_tcx_init<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
|
||||
if !tcx.dep_graph.is_fully_enabled() {
|
||||
return
|
||||
}
|
||||
|
||||
/// If we are in incremental mode, and a previous dep-graph exists,
|
||||
/// then load up those nodes/edges that are still valid into the
|
||||
/// dep-graph for this session. (This is assumed to be running very
|
||||
/// early in compilation, before we've really done any work, but
|
||||
/// actually it doesn't matter all that much.) See `README.md` for
|
||||
/// more general overview.
|
||||
pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
|
||||
tcx.allocate_metadata_dep_nodes();
|
||||
tcx.precompute_in_scope_traits_hashes();
|
||||
if tcx.sess.incr_session_load_dep_graph() {
|
||||
let _ignore = tcx.dep_graph.in_ignore();
|
||||
load_dep_graph_if_exists(tcx);
|
||||
}
|
||||
}
|
||||
|
||||
fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
|
||||
let dep_graph_path = dep_graph_path(tcx.sess);
|
||||
let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) {
|
||||
Some(p) => p,
|
||||
None => return // no file
|
||||
};
|
||||
if tcx.sess.incr_comp_session_dir_opt().is_none() {
|
||||
// If we are only building with -Zquery-dep-graph but without an actual
|
||||
// incr. comp. session directory, we exit here. Otherwise we'd fail
|
||||
// when trying to load work products.
|
||||
return
|
||||
}
|
||||
|
||||
let work_products_path = work_products_path(tcx.sess);
|
||||
let work_products_data = match load_data(tcx.sess, &work_products_path) {
|
||||
Some(p) => p,
|
||||
None => return // no file
|
||||
};
|
||||
if let Some(work_products_data) = load_data(tcx.sess, &work_products_path) {
|
||||
// Decode the list of work_products
|
||||
let mut work_product_decoder = Decoder::new(&work_products_data[..], 0);
|
||||
let work_products: Vec<SerializedWorkProduct> =
|
||||
RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
|
||||
let msg = format!("Error decoding `work-products` from incremental \
|
||||
compilation session directory: {}", e);
|
||||
tcx.sess.fatal(&msg[..])
|
||||
});
|
||||
|
||||
match decode_dep_graph(tcx, &dep_graph_data, &work_products_data) {
|
||||
Ok(dirty_nodes) => dirty_nodes,
|
||||
Err(err) => {
|
||||
tcx.sess.warn(
|
||||
&format!("decoding error in dep-graph from `{}` and `{}`: {}",
|
||||
dep_graph_path.display(),
|
||||
work_products_path.display(),
|
||||
err));
|
||||
for swp in work_products {
|
||||
let mut all_files_exist = true;
|
||||
for &(_, ref file_name) in swp.work_product.saved_files.iter() {
|
||||
let path = in_incr_comp_dir_sess(tcx.sess, file_name);
|
||||
if !path.exists() {
|
||||
all_files_exist = false;
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
eprintln!("incremental: could not find file for work \
|
||||
product: {}", path.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if all_files_exist {
|
||||
debug!("reconcile_work_products: all files for {:?} exist", swp);
|
||||
tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product);
|
||||
} else {
|
||||
debug!("reconcile_work_products: some file for {:?} does not exist", swp);
|
||||
delete_dirty_work_product(tcx, swp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -94,201 +99,6 @@ fn load_data(sess: &Session, path: &Path) -> Option<Vec<u8>> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Check if a DepNode from the previous dep-graph refers to something that
|
||||
/// still exists in the current compilation session. Only works for DepNode
|
||||
/// variants that represent inputs (HIR and imported Metadata).
|
||||
fn does_still_exist(tcx: TyCtxt, dep_node: &DepNode) -> bool {
|
||||
match dep_node.kind {
|
||||
DepKind::Hir |
|
||||
DepKind::HirBody |
|
||||
DepKind::InScopeTraits |
|
||||
DepKind::CrateMetadata => {
|
||||
dep_node.extract_def_id(tcx).is_some()
|
||||
}
|
||||
_ => {
|
||||
bug!("unexpected Input DepNode: {:?}", dep_node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decode the dep graph and load the edges/nodes that are still clean
|
||||
/// into `tcx.dep_graph`.
|
||||
pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
dep_graph_data: &[u8],
|
||||
work_products_data: &[u8])
|
||||
-> Result<(), String>
|
||||
{
|
||||
// Decode the list of work_products
|
||||
let mut work_product_decoder = Decoder::new(work_products_data, 0);
|
||||
let work_products = <Vec<SerializedWorkProduct>>::decode(&mut work_product_decoder)?;
|
||||
|
||||
// Deserialize the directory and dep-graph.
|
||||
let mut dep_graph_decoder = Decoder::new(dep_graph_data, 0);
|
||||
let prev_commandline_args_hash = u64::decode(&mut dep_graph_decoder)?;
|
||||
|
||||
if prev_commandline_args_hash != tcx.sess.opts.dep_tracking_hash() {
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
eprintln!("incremental: completely ignoring cache because of \
|
||||
differing commandline arguments");
|
||||
}
|
||||
// We can't reuse the cache, purge it.
|
||||
debug!("decode_dep_graph: differing commandline arg hashes");
|
||||
for swp in work_products {
|
||||
delete_dirty_work_product(tcx, swp);
|
||||
}
|
||||
|
||||
// No need to do any further work
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?;
|
||||
|
||||
// Compute the set of nodes from the old graph where some input
|
||||
// has changed or been removed.
|
||||
let dirty_raw_nodes = initial_dirty_nodes(tcx,
|
||||
&serialized_dep_graph.nodes,
|
||||
&serialized_dep_graph.hashes);
|
||||
let dirty_raw_nodes = transitive_dirty_nodes(&serialized_dep_graph,
|
||||
dirty_raw_nodes);
|
||||
|
||||
// Recreate the edges in the graph that are still clean.
|
||||
let mut clean_work_products = FxHashSet();
|
||||
let mut dirty_work_products = FxHashSet(); // incomplete; just used to suppress debug output
|
||||
for (source, targets) in serialized_dep_graph.edge_list_indices.iter_enumerated() {
|
||||
let target_begin = targets.0 as usize;
|
||||
let target_end = targets.1 as usize;
|
||||
|
||||
for &target in &serialized_dep_graph.edge_list_data[target_begin .. target_end] {
|
||||
process_edge(tcx,
|
||||
source,
|
||||
target,
|
||||
&serialized_dep_graph.nodes,
|
||||
&dirty_raw_nodes,
|
||||
&mut clean_work_products,
|
||||
&mut dirty_work_products,
|
||||
&work_products);
|
||||
}
|
||||
}
|
||||
|
||||
// Recreate bootstrap outputs, which are outputs that have no incoming edges
|
||||
// (and hence cannot be dirty).
|
||||
for bootstrap_output in &serialized_dep_graph.bootstrap_outputs {
|
||||
if let DepKind::WorkProduct = bootstrap_output.kind {
|
||||
let wp_id = WorkProductId::from_fingerprint(bootstrap_output.hash);
|
||||
clean_work_products.insert(wp_id);
|
||||
}
|
||||
|
||||
tcx.dep_graph.add_node_directly(*bootstrap_output);
|
||||
}
|
||||
|
||||
// Add in work-products that are still clean, and delete those that are
|
||||
// dirty.
|
||||
reconcile_work_products(tcx, work_products, &clean_work_products);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Computes which of the original set of def-ids are dirty. Stored in
|
||||
/// a bit vector where the index is the DefPathIndex.
|
||||
fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
nodes: &IndexVec<DepNodeIndex, DepNode>,
|
||||
serialized_hashes: &[(DepNodeIndex, Fingerprint)])
|
||||
-> DirtyNodes {
|
||||
let mut dirty_nodes = FxHashMap();
|
||||
|
||||
for &(dep_node_index, prev_hash) in serialized_hashes {
|
||||
let dep_node = nodes[dep_node_index];
|
||||
if does_still_exist(tcx, &dep_node) {
|
||||
let current_hash = tcx.dep_graph.fingerprint_of(&dep_node);
|
||||
|
||||
if current_hash == prev_hash {
|
||||
debug!("initial_dirty_nodes: {:?} is clean (hash={:?})",
|
||||
dep_node,
|
||||
current_hash);
|
||||
continue;
|
||||
}
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
|
||||
println!("node {:?} is dirty as hash is {:?}, was {:?}",
|
||||
dep_node,
|
||||
current_hash,
|
||||
prev_hash);
|
||||
}
|
||||
|
||||
debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}",
|
||||
dep_node,
|
||||
current_hash,
|
||||
prev_hash);
|
||||
} else {
|
||||
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
|
||||
println!("node {:?} is dirty as it was removed", dep_node);
|
||||
}
|
||||
|
||||
debug!("initial_dirty_nodes: {:?} is dirty as it was removed", dep_node);
|
||||
}
|
||||
dirty_nodes.insert(dep_node_index, dep_node_index);
|
||||
}
|
||||
|
||||
dirty_nodes
|
||||
}
|
||||
|
||||
fn transitive_dirty_nodes(serialized_dep_graph: &SerializedDepGraph,
|
||||
mut dirty_nodes: DirtyNodes)
|
||||
-> DirtyNodes
|
||||
{
|
||||
let mut stack: Vec<(DepNodeIndex, DepNodeIndex)> = vec![];
|
||||
stack.extend(dirty_nodes.iter().map(|(&s, &b)| (s, b)));
|
||||
while let Some((source, blame)) = stack.pop() {
|
||||
// we know the source is dirty (because of the node `blame`)...
|
||||
debug_assert!(dirty_nodes.contains_key(&source));
|
||||
|
||||
// ...so we dirty all the targets (with the same blame)
|
||||
for &target in serialized_dep_graph.edge_targets_from(source) {
|
||||
if !dirty_nodes.contains_key(&target) {
|
||||
dirty_nodes.insert(target, blame);
|
||||
stack.push((target, blame));
|
||||
}
|
||||
}
|
||||
}
|
||||
dirty_nodes
|
||||
}
|
||||
|
||||
/// Go through the list of work-products produced in the previous run.
|
||||
/// Delete any whose nodes have been found to be dirty or which are
|
||||
/// otherwise no longer applicable.
|
||||
fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
work_products: Vec<SerializedWorkProduct>,
|
||||
clean_work_products: &FxHashSet<WorkProductId>) {
|
||||
debug!("reconcile_work_products({:?})", work_products);
|
||||
for swp in work_products {
|
||||
if !clean_work_products.contains(&swp.id) {
|
||||
debug!("reconcile_work_products: dep-node for {:?} is dirty", swp);
|
||||
delete_dirty_work_product(tcx, swp);
|
||||
} else {
|
||||
let mut all_files_exist = true;
|
||||
for &(_, ref file_name) in swp.work_product.saved_files.iter() {
|
||||
let path = in_incr_comp_dir_sess(tcx.sess, file_name);
|
||||
if !path.exists() {
|
||||
all_files_exist = false;
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
eprintln!("incremental: could not find file for \
|
||||
up-to-date work product: {}", path.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if all_files_exist {
|
||||
debug!("reconcile_work_products: all files for {:?} exist", swp);
|
||||
tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product);
|
||||
} else {
|
||||
debug!("reconcile_work_products: some file for {:?} does not exist", swp);
|
||||
delete_dirty_work_product(tcx, swp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn delete_dirty_work_product(tcx: TyCtxt,
|
||||
swp: SerializedWorkProduct) {
|
||||
debug!("delete_dirty_work_product({:?})", swp);
|
||||
@ -353,90 +163,14 @@ pub fn load_prev_metadata_hashes(tcx: TyCtxt) -> DefIdMap<Fingerprint> {
|
||||
output
|
||||
}
|
||||
|
||||
fn process_edge<'a, 'tcx, 'edges>(
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
source: DepNodeIndex,
|
||||
target: DepNodeIndex,
|
||||
nodes: &IndexVec<DepNodeIndex, DepNode>,
|
||||
dirty_raw_nodes: &DirtyNodes,
|
||||
clean_work_products: &mut FxHashSet<WorkProductId>,
|
||||
dirty_work_products: &mut FxHashSet<WorkProductId>,
|
||||
work_products: &[SerializedWorkProduct])
|
||||
{
|
||||
// If the target is dirty, skip the edge. If this is an edge
|
||||
// that targets a work-product, we can print the blame
|
||||
// information now.
|
||||
if let Some(&blame) = dirty_raw_nodes.get(&target) {
|
||||
let target = nodes[target];
|
||||
if let DepKind::WorkProduct = target.kind {
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
let wp_id = WorkProductId::from_fingerprint(target.hash);
|
||||
|
||||
if dirty_work_products.insert(wp_id) {
|
||||
// Try to reconstruct the human-readable version of the
|
||||
// DepNode. This cannot be done for things that where
|
||||
// removed.
|
||||
let blame = nodes[blame];
|
||||
let blame_str = if let Some(def_id) = blame.extract_def_id(tcx) {
|
||||
format!("{:?}({})",
|
||||
blame.kind,
|
||||
tcx.def_path(def_id).to_string(tcx))
|
||||
} else {
|
||||
format!("{:?}", blame)
|
||||
};
|
||||
|
||||
let wp = work_products.iter().find(|swp| swp.id == wp_id).unwrap();
|
||||
|
||||
eprintln!("incremental: module {:?} is dirty because \
|
||||
{:?} changed or was removed",
|
||||
wp.work_product.cgu_name,
|
||||
blame_str);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// At this point we have asserted that the target is clean -- otherwise, we
|
||||
// would have hit the return above. We can do some further consistency
|
||||
// checks based on this fact:
|
||||
|
||||
// We should never have an edge where the target is clean but the source
|
||||
// was dirty. Otherwise something was wrong with the dirtying pass above:
|
||||
debug_assert!(!dirty_raw_nodes.contains_key(&source));
|
||||
|
||||
// We also never should encounter an edge going from a removed input to a
|
||||
// clean target because removing the input would have dirtied the input
|
||||
// node and transitively dirtied the target.
|
||||
debug_assert!(match nodes[source].kind {
|
||||
DepKind::Hir | DepKind::HirBody | DepKind::CrateMetadata => {
|
||||
does_still_exist(tcx, &nodes[source])
|
||||
}
|
||||
_ => true,
|
||||
});
|
||||
|
||||
if !dirty_raw_nodes.contains_key(&target) {
|
||||
let target = nodes[target];
|
||||
let source = nodes[source];
|
||||
tcx.dep_graph.add_edge_directly(source, target);
|
||||
|
||||
if let DepKind::WorkProduct = target.kind {
|
||||
let wp_id = WorkProductId::from_fingerprint(target.hash);
|
||||
clean_work_products.insert(wp_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_dep_graph_new(sess: &Session) -> PreviousDepGraph {
|
||||
use rustc::dep_graph::SerializedDepGraph as SerializedDepGraphNew;
|
||||
|
||||
let empty = PreviousDepGraph::new(SerializedDepGraphNew::new());
|
||||
pub fn load_dep_graph(sess: &Session) -> PreviousDepGraph {
|
||||
let empty = PreviousDepGraph::new(SerializedDepGraph::new());
|
||||
|
||||
if sess.opts.incremental.is_none() {
|
||||
return empty
|
||||
}
|
||||
|
||||
if let Some(bytes) = load_data(sess, &dep_graph_path_new(sess)) {
|
||||
if let Some(bytes) = load_data(sess, &dep_graph_path(sess)) {
|
||||
let mut decoder = Decoder::new(&bytes, 0);
|
||||
let prev_commandline_args_hash = u64::decode(&mut decoder)
|
||||
.expect("Error reading commandline arg hash from cached dep-graph");
|
||||
@ -453,7 +187,7 @@ pub fn load_dep_graph_new(sess: &Session) -> PreviousDepGraph {
|
||||
return empty
|
||||
}
|
||||
|
||||
let dep_graph = SerializedDepGraphNew::decode(&mut decoder)
|
||||
let dep_graph = SerializedDepGraph::decode(&mut decoder)
|
||||
.expect("Error reading cached dep-graph");
|
||||
|
||||
PreviousDepGraph::new(dep_graph)
|
||||
|
@ -16,7 +16,6 @@ mod data;
|
||||
mod dirty_clean;
|
||||
mod fs;
|
||||
mod load;
|
||||
mod preds;
|
||||
mod save;
|
||||
mod work_product;
|
||||
mod file_format;
|
||||
@ -25,7 +24,8 @@ pub use self::fs::prepare_session_directory;
|
||||
pub use self::fs::finalize_session_directory;
|
||||
pub use self::fs::in_incr_comp_dir;
|
||||
pub use self::load::load_dep_graph;
|
||||
pub use self::load::load_dep_graph_new;
|
||||
pub use self::load::dep_graph_tcx_init;
|
||||
pub use self::save::save_dep_graph;
|
||||
pub use self::save::save_work_products;
|
||||
pub use self::work_product::save_trans_partition;
|
||||
pub use self::work_product::delete_workproduct_files;
|
||||
|
@ -1,48 +0,0 @@
|
||||
Graph compression
|
||||
|
||||
The graph compression algorithm is intended to remove and minimize the
|
||||
size of the dependency graph so it can be saved, while preserving
|
||||
everything we care about. In particular, given a set of input/output
|
||||
nodes in the graph (which must be disjoint), we ensure that the set of
|
||||
input nodes that can reach a given output node does not change,
|
||||
although the intermediate nodes may change in various ways. In short,
|
||||
the output nodes are intended to be the ones whose existence we care
|
||||
about when we start up, because they have some associated data that we
|
||||
will try to re-use (and hence if they are dirty, we have to throw that
|
||||
data away). The other intermediate nodes don't really matter so much.
|
||||
|
||||
### Overview
|
||||
|
||||
The algorithm works as follows:
|
||||
|
||||
1. Do a single walk of the graph to construct a DAG
|
||||
- in this walk, we identify and unify all cycles, electing a representative "head" node
|
||||
- this is done using the union-find implementation
|
||||
- this code is found in the `classify` module
|
||||
2. The result from this walk is a `Dag`:
|
||||
- the set of SCCs, represented by the union-find table
|
||||
- a set of edges in the new DAG, represented by:
|
||||
- a vector of parent nodes for each child node
|
||||
- a vector of cross-edges
|
||||
- once these are canonicalized, some of these edges may turn out to be cyclic edges
|
||||
(i.e., an edge A -> A where A is the head of some SCC)
|
||||
3. We pass this `Dag` into the construct code, which then creates a
|
||||
new graph. This graph has a smaller set of indices which includes
|
||||
*at least* the inputs/outputs from the original graph, but may have
|
||||
other nodes as well, if keeping them reduces the overall size of
|
||||
the graph.
|
||||
- This code is found in the `construct` module.
|
||||
|
||||
### Some notes
|
||||
|
||||
The input graph is assumed to have *read-by* edges. i.e., `A -> B`
|
||||
means that the task B reads data from A. But the DAG defined by
|
||||
classify is expressed in terms of *reads-from* edges, which are the
|
||||
inverse. So `A -> B` is the same as `B -rf-> A`. *reads-from* edges
|
||||
are more natural since we want to walk from the outputs to the inputs,
|
||||
effectively. When we construct the final graph, we reverse these edges
|
||||
back into the *read-by* edges common elsewhere.
|
||||
|
||||
|
||||
|
||||
|
@ -1,151 +0,0 @@
|
||||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! First phase. Detect cycles and cross-edges.
|
||||
|
||||
use super::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
pub struct Classify<'a, 'g: 'a, N: 'g, I: 'a, O: 'a>
|
||||
where N: Debug + Clone + 'g,
|
||||
I: Fn(&N) -> bool,
|
||||
O: Fn(&N) -> bool,
|
||||
{
|
||||
r: &'a mut GraphReduce<'g, N, I, O>,
|
||||
stack: Vec<NodeIndex>,
|
||||
colors: Vec<Color>,
|
||||
dag: Dag,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
enum Color {
|
||||
// not yet visited
|
||||
White,
|
||||
|
||||
// visiting; usize is index on stack
|
||||
Grey(usize),
|
||||
|
||||
// finished visiting
|
||||
Black,
|
||||
}
|
||||
|
||||
impl<'a, 'g, N, I, O> Classify<'a, 'g, N, I, O>
|
||||
where N: Debug + Clone + 'g,
|
||||
I: Fn(&N) -> bool,
|
||||
O: Fn(&N) -> bool,
|
||||
{
|
||||
pub(super) fn new(r: &'a mut GraphReduce<'g, N, I, O>) -> Self {
|
||||
Classify {
|
||||
r,
|
||||
colors: vec![Color::White; r.in_graph.len_nodes()],
|
||||
stack: vec![],
|
||||
dag: Dag {
|
||||
parents: (0..r.in_graph.len_nodes()).map(|i| NodeIndex(i)).collect(),
|
||||
cross_edges: vec![],
|
||||
input_nodes: vec![],
|
||||
output_nodes: vec![],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn walk(mut self) -> Dag {
|
||||
for (index, node) in self.r.in_graph.all_nodes().iter().enumerate() {
|
||||
if (self.r.is_output)(&node.data) {
|
||||
let index = NodeIndex(index);
|
||||
self.dag.output_nodes.push(index);
|
||||
match self.colors[index.0] {
|
||||
Color::White => self.open(index),
|
||||
Color::Grey(_) => panic!("grey node but have not yet started a walk"),
|
||||
Color::Black => (), // already visited, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// At this point we've identifed all the cycles, and we've
|
||||
// constructed a spanning tree over the original graph
|
||||
// (encoded in `self.parents`) as well as a list of
|
||||
// cross-edges that reflect additional edges from the DAG.
|
||||
//
|
||||
// If we converted each node to its `cycle-head` (a
|
||||
// representative choice from each SCC, basically) and then
|
||||
// take the union of `self.parents` and `self.cross_edges`
|
||||
// (after canonicalization), that is basically our DAG.
|
||||
//
|
||||
// Note that both of those may well contain trivial `X -rf-> X`
|
||||
// cycle edges after canonicalization, though. e.g., if you
|
||||
// have a graph `{A -rf-> B, B -rf-> A}`, we will have unioned A and
|
||||
// B, but A will also be B's parent (or vice versa), and hence
|
||||
// when we canonicalize the parent edge it would become `A -rf->
|
||||
// A` (or `B -rf-> B`).
|
||||
self.dag
|
||||
}
|
||||
|
||||
fn open(&mut self, node: NodeIndex) {
|
||||
let index = self.stack.len();
|
||||
self.stack.push(node);
|
||||
self.colors[node.0] = Color::Grey(index);
|
||||
for child in self.r.inputs(node) {
|
||||
self.walk_edge(node, child);
|
||||
}
|
||||
self.stack.pop().unwrap();
|
||||
self.colors[node.0] = Color::Black;
|
||||
|
||||
if (self.r.is_input)(&self.r.in_graph.node_data(node)) {
|
||||
// base inputs should have no inputs
|
||||
assert!(self.r.inputs(node).next().is_none());
|
||||
debug!("input: `{:?}`", self.r.in_graph.node_data(node));
|
||||
self.dag.input_nodes.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
fn walk_edge(&mut self, parent: NodeIndex, child: NodeIndex) {
|
||||
debug!("walk_edge: {:?} -rf-> {:?}, {:?}",
|
||||
self.r.in_graph.node_data(parent),
|
||||
self.r.in_graph.node_data(child),
|
||||
self.colors[child.0]);
|
||||
|
||||
// Ignore self-edges, just in case they exist.
|
||||
if child == parent {
|
||||
return;
|
||||
}
|
||||
|
||||
match self.colors[child.0] {
|
||||
Color::White => {
|
||||
// Not yet visited this node; start walking it.
|
||||
assert_eq!(self.dag.parents[child.0], child);
|
||||
self.dag.parents[child.0] = parent;
|
||||
self.open(child);
|
||||
}
|
||||
|
||||
Color::Grey(stack_index) => {
|
||||
// Back-edge; unify everything on stack between here and `stack_index`
|
||||
// since we are all participating in a cycle
|
||||
assert!(self.stack[stack_index] == child);
|
||||
|
||||
for &n in &self.stack[stack_index..] {
|
||||
debug!("cycle `{:?}` and `{:?}`",
|
||||
self.r.in_graph.node_data(n),
|
||||
self.r.in_graph.node_data(parent));
|
||||
self.r.mark_cycle(n, parent);
|
||||
}
|
||||
}
|
||||
|
||||
Color::Black => {
|
||||
// Cross-edge, record and ignore
|
||||
self.dag.cross_edges.push((parent, child));
|
||||
debug!("cross-edge `{:?} -rf-> {:?}`",
|
||||
self.r.in_graph.node_data(parent),
|
||||
self.r.in_graph.node_data(child));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,94 +0,0 @@
|
||||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn detect_cycles() {
|
||||
let (graph, nodes) = graph! {
|
||||
A -> C0,
|
||||
A -> C1,
|
||||
B -> C1,
|
||||
C0 -> C1,
|
||||
C1 -> C0,
|
||||
C0 -> D,
|
||||
C1 -> E,
|
||||
};
|
||||
let inputs = ["A", "B"];
|
||||
let outputs = ["D", "E"];
|
||||
let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n));
|
||||
Classify::new(&mut reduce).walk();
|
||||
|
||||
assert!(!reduce.in_cycle(nodes("A"), nodes("C0")));
|
||||
assert!(!reduce.in_cycle(nodes("B"), nodes("C0")));
|
||||
assert!(reduce.in_cycle(nodes("C0"), nodes("C1")));
|
||||
assert!(!reduce.in_cycle(nodes("D"), nodes("C0")));
|
||||
assert!(!reduce.in_cycle(nodes("E"), nodes("C0")));
|
||||
assert!(!reduce.in_cycle(nodes("E"), nodes("A")));
|
||||
}
|
||||
|
||||
/// Regr test for a bug where we forgot to pop nodes off of the stack
|
||||
/// as we were walking. In this case, because edges are pushed to the front
|
||||
/// of the list, we would visit OUT, then A, then IN, and then close IN (but forget
|
||||
/// to POP. Then visit B, C, and then A, which would mark everything from A to C as
|
||||
/// cycle. But since we failed to pop IN, the stack was `OUT, A, IN, B, C` so that
|
||||
/// marked C and IN as being in a cycle.
|
||||
#[test]
|
||||
fn edge_order1() {
|
||||
let (graph, nodes) = graph! {
|
||||
A -> C,
|
||||
C -> B,
|
||||
B -> A,
|
||||
IN -> B,
|
||||
IN -> A,
|
||||
A -> OUT,
|
||||
};
|
||||
let inputs = ["IN"];
|
||||
let outputs = ["OUT"];
|
||||
let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n));
|
||||
Classify::new(&mut reduce).walk();
|
||||
|
||||
// A, B, and C are mutually in a cycle, but IN/OUT are not participating.
|
||||
let names = ["A", "B", "C", "IN", "OUT"];
|
||||
let cycle_names = ["A", "B", "C"];
|
||||
for &i in &names {
|
||||
for &j in names.iter().filter(|&&j| j != i) {
|
||||
let in_cycle = cycle_names.contains(&i) && cycle_names.contains(&j);
|
||||
assert_eq!(reduce.in_cycle(nodes(i), nodes(j)), in_cycle,
|
||||
"cycle status for nodes {} and {} is incorrect",
|
||||
i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as `edge_order1` but in reverse order so as to detect a failure
|
||||
/// if we were to enqueue edges onto end of list instead.
|
||||
#[test]
|
||||
fn edge_order2() {
|
||||
let (graph, nodes) = graph! {
|
||||
A -> OUT,
|
||||
IN -> A,
|
||||
IN -> B,
|
||||
B -> A,
|
||||
C -> B,
|
||||
A -> C,
|
||||
};
|
||||
let inputs = ["IN"];
|
||||
let outputs = ["OUT"];
|
||||
let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n));
|
||||
Classify::new(&mut reduce).walk();
|
||||
|
||||
assert!(reduce.in_cycle(nodes("B"), nodes("C")));
|
||||
|
||||
assert!(!reduce.in_cycle(nodes("IN"), nodes("A")));
|
||||
assert!(!reduce.in_cycle(nodes("IN"), nodes("B")));
|
||||
assert!(!reduce.in_cycle(nodes("IN"), nodes("C")));
|
||||
assert!(!reduce.in_cycle(nodes("IN"), nodes("OUT")));
|
||||
}
|
@ -1,223 +0,0 @@
|
||||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! Second phase. Construct new graph. The previous phase has
|
||||
//! converted the input graph into a DAG by detecting and unifying
|
||||
//! cycles. It provides us with the following (which is a
|
||||
//! representation of the DAG):
|
||||
//!
|
||||
//! - SCCs, in the form of a union-find repr that can convert each node to
|
||||
//! its *cycle head* (an arbitrarily chosen representative from the cycle)
|
||||
//! - a vector of *leaf nodes*, just a convenience
|
||||
//! - a vector of *parents* for each node (in some cases, nodes have no parents,
|
||||
//! or their parent is another member of same cycle; in that case, the vector
|
||||
//! will be stored `v[i] == i`, after canonicalization)
|
||||
//! - a vector of *cross edges*, meaning add'l edges between graphs nodes beyond
|
||||
//! the parents.
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(super) fn construct_graph<'g, N, I, O>(r: &mut GraphReduce<'g, N, I, O>, dag: Dag)
|
||||
-> Reduction<'g, N>
|
||||
where N: Debug + Clone, I: Fn(&N) -> bool, O: Fn(&N) -> bool,
|
||||
{
|
||||
let Dag { parents: old_parents, input_nodes, output_nodes, cross_edges } = dag;
|
||||
let in_graph = r.in_graph;
|
||||
|
||||
debug!("construct_graph");
|
||||
|
||||
// Create a canonical list of edges; this includes both parent and
|
||||
// cross-edges. We store this in `(target -> Vec<source>)` form.
|
||||
// We call the first edge to any given target its "parent".
|
||||
let mut edges = FxHashMap();
|
||||
let old_parent_edges = old_parents.iter().cloned().zip((0..).map(NodeIndex));
|
||||
for (source, target) in old_parent_edges.chain(cross_edges) {
|
||||
debug!("original edge `{:?} -rf-> {:?}`",
|
||||
in_graph.node_data(source),
|
||||
in_graph.node_data(target));
|
||||
let source = r.cycle_head(source);
|
||||
let target = r.cycle_head(target);
|
||||
if source != target {
|
||||
let v = edges.entry(target).or_insert(vec![]);
|
||||
if !v.contains(&source) {
|
||||
debug!("edge `{:?} -rf-> {:?}` is edge #{} with that target",
|
||||
in_graph.node_data(source),
|
||||
in_graph.node_data(target),
|
||||
v.len());
|
||||
v.push(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
let parent = |ni: NodeIndex| -> NodeIndex {
|
||||
edges[&ni][0]
|
||||
};
|
||||
|
||||
// `retain_map`: a map of those nodes that we will want to
|
||||
// *retain* in the ultimate graph; the key is the node index in
|
||||
// the old graph, the value is the node index in the new
|
||||
// graph. These are nodes in the following categories:
|
||||
//
|
||||
// - inputs
|
||||
// - work-products
|
||||
// - targets of a cross-edge
|
||||
//
|
||||
// The first two categories hopefully make sense. We want the
|
||||
// inputs so we can compare hashes later. We want the
|
||||
// work-products so we can tell precisely when a given
|
||||
// work-product is invalidated. But the last one isn't strictly
|
||||
// needed; we keep cross-target edges so as to minimize the total
|
||||
// graph size.
|
||||
//
|
||||
// Consider a graph like:
|
||||
//
|
||||
// WP0 -rf-> Y
|
||||
// WP1 -rf-> Y
|
||||
// Y -rf-> INPUT0
|
||||
// Y -rf-> INPUT1
|
||||
// Y -rf-> INPUT2
|
||||
// Y -rf-> INPUT3
|
||||
//
|
||||
// Now if we were to remove Y, we would have a total of 8 edges: both WP0 and WP1
|
||||
// depend on INPUT0...INPUT3. As it is, we have 6 edges.
|
||||
//
|
||||
// NB: The current rules are not optimal. For example, given this
|
||||
// input graph:
|
||||
//
|
||||
// OUT0 -rf-> X
|
||||
// OUT1 -rf-> X
|
||||
// X -rf -> INPUT0
|
||||
//
|
||||
// we will preserve X because it has two "consumers" (OUT0 and
|
||||
// OUT1). We could as easily skip it, but we'd have to tally up
|
||||
// the number of input nodes that it (transitively) reaches, and I
|
||||
// was too lazy to do so. This is the unit test `suboptimal`.
|
||||
|
||||
let mut retain_map = FxHashMap();
|
||||
let mut new_graph = Graph::new();
|
||||
|
||||
{
|
||||
// Start by adding start-nodes and inputs.
|
||||
let retained_nodes = output_nodes.iter().chain(&input_nodes).map(|&n| r.cycle_head(n));
|
||||
|
||||
// Next add in targets of cross-edges. Due to the canonicalization,
|
||||
// some of these may be self-edges or may may duplicate the parent
|
||||
// edges, so ignore those.
|
||||
let retained_nodes = retained_nodes.chain(
|
||||
edges.iter()
|
||||
.filter(|&(_, ref sources)| sources.len() > 1)
|
||||
.map(|(&target, _)| target));
|
||||
|
||||
// Now create the new graph, adding in the entries from the map.
|
||||
for n in retained_nodes {
|
||||
retain_map.entry(n)
|
||||
.or_insert_with(|| {
|
||||
let data = in_graph.node_data(n);
|
||||
debug!("retaining node `{:?}`", data);
|
||||
new_graph.add_node(data)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Given a cycle-head `ni`, converts it to the closest parent that has
|
||||
// been retained in the output graph.
|
||||
let retained_parent = |mut ni: NodeIndex| -> NodeIndex {
|
||||
loop {
|
||||
debug!("retained_parent({:?})", in_graph.node_data(ni));
|
||||
match retain_map.get(&ni) {
|
||||
Some(&v) => return v,
|
||||
None => ni = parent(ni),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Now add in the edges into the graph.
|
||||
for (&target, sources) in &edges {
|
||||
if let Some(&r_target) = retain_map.get(&target) {
|
||||
debug!("adding edges that target `{:?}`", in_graph.node_data(target));
|
||||
for &source in sources {
|
||||
debug!("new edge `{:?} -rf-> {:?}`",
|
||||
in_graph.node_data(source),
|
||||
in_graph.node_data(target));
|
||||
let r_source = retained_parent(source);
|
||||
|
||||
// NB. In the input graph, we have `a -> b` if b
|
||||
// **reads from** a. But in the terminology of this
|
||||
// code, we would describe that edge as `b -> a`,
|
||||
// because we have edges *from* outputs *to* inputs.
|
||||
// Therefore, when we create our new graph, we have to
|
||||
// reverse the edge.
|
||||
new_graph.add_edge(r_target, r_source, ());
|
||||
}
|
||||
} else {
|
||||
assert_eq!(sources.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
// One complication. In some cases, output nodes *may* participate in
|
||||
// cycles. An example:
|
||||
//
|
||||
// [HIR0] [HIR1]
|
||||
// | |
|
||||
// v v
|
||||
// TypeckClosureBody(X) -> ItemSignature(X::SomeClosureInX)
|
||||
// | ^ | |
|
||||
// | +-------------------------+ |
|
||||
// | |
|
||||
// v v
|
||||
// Foo Bar
|
||||
//
|
||||
// In these cases, the output node may not wind up as the head
|
||||
// of the cycle, in which case it would be absent from the
|
||||
// final graph. We don't wish this to happen, therefore we go
|
||||
// over the list of output nodes again and check for any that
|
||||
// are not their own cycle-head. If we find such a node, we
|
||||
// add it to the graph now with an edge from the cycle head.
|
||||
// So the graph above could get transformed into this:
|
||||
//
|
||||
// [HIR0, HIR1]
|
||||
// |
|
||||
// v
|
||||
// TypeckClosureBody(X) ItemSignature(X::SomeClosureInX)
|
||||
// ^ | |
|
||||
// +-------------------------+ |
|
||||
// v
|
||||
// [Foo, Bar]
|
||||
//
|
||||
// (Note that all the edges here are "read-by" edges, not
|
||||
// "reads-from" edges.)
|
||||
for &output_node in &output_nodes {
|
||||
let head = r.cycle_head(output_node);
|
||||
if output_node == head {
|
||||
assert!(retain_map.contains_key(&output_node));
|
||||
} else {
|
||||
assert!(!retain_map.contains_key(&output_node));
|
||||
let output_data = in_graph.node_data(output_node);
|
||||
let new_node = new_graph.add_node(output_data);
|
||||
let new_head_node = retain_map[&head];
|
||||
new_graph.add_edge(new_head_node, new_node, ());
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, prepare a list of the input node indices as found in
|
||||
// the new graph. Note that since all input nodes are leaves in
|
||||
// the graph, they should never participate in a cycle.
|
||||
let input_nodes =
|
||||
input_nodes.iter()
|
||||
.map(|&n| {
|
||||
assert_eq!(r.cycle_head(n), n, "input node participating in a cycle");
|
||||
retain_map[&n]
|
||||
})
|
||||
.collect();
|
||||
|
||||
Reduction { graph: new_graph, input_nodes: input_nodes }
|
||||
}
|
||||
|
@ -1,43 +0,0 @@
|
||||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use rustc_data_structures::graph::NodeIndex;
|
||||
use rustc_data_structures::unify::UnifyKey;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct DagId {
|
||||
index: u32,
|
||||
}
|
||||
|
||||
impl DagId {
|
||||
pub fn from_input_index(n: NodeIndex) -> Self {
|
||||
DagId { index: n.0 as u32 }
|
||||
}
|
||||
|
||||
pub fn as_input_index(&self) -> NodeIndex {
|
||||
NodeIndex(self.index as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl UnifyKey for DagId {
|
||||
type Value = ();
|
||||
|
||||
fn index(&self) -> u32 {
|
||||
self.index
|
||||
}
|
||||
|
||||
fn from_index(u: u32) -> Self {
|
||||
DagId { index: u }
|
||||
}
|
||||
|
||||
fn tag(_: Option<Self>) -> &'static str {
|
||||
"DagId"
|
||||
}
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! Graph compression. See `README.md`.
|
||||
|
||||
use rustc_data_structures::graph::{Graph, NodeIndex};
|
||||
use rustc_data_structures::unify::UnificationTable;
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
mod test_macro;
|
||||
|
||||
mod construct;
|
||||
|
||||
mod classify;
|
||||
use self::classify::Classify;
|
||||
|
||||
mod dag_id;
|
||||
use self::dag_id::DagId;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
pub fn reduce_graph<N, I, O>(graph: &Graph<N, ()>,
|
||||
is_input: I,
|
||||
is_output: O) -> Reduction<N>
|
||||
where N: Debug + Clone,
|
||||
I: Fn(&N) -> bool,
|
||||
O: Fn(&N) -> bool,
|
||||
{
|
||||
GraphReduce::new(graph, is_input, is_output).compute()
|
||||
}
|
||||
|
||||
pub struct Reduction<'q, N> where N: 'q + Debug + Clone {
|
||||
pub graph: Graph<&'q N, ()>,
|
||||
pub input_nodes: Vec<NodeIndex>,
|
||||
}
|
||||
|
||||
struct GraphReduce<'q, N, I, O>
|
||||
where N: 'q + Debug + Clone,
|
||||
I: Fn(&N) -> bool,
|
||||
O: Fn(&N) -> bool,
|
||||
{
|
||||
in_graph: &'q Graph<N, ()>,
|
||||
unify: UnificationTable<DagId>,
|
||||
is_input: I,
|
||||
is_output: O,
|
||||
}
|
||||
|
||||
struct Dag {
|
||||
// The "parent" of a node is the node which reached it during the
|
||||
// initial DFS. To encode the case of "no parent" (i.e., for the
|
||||
// roots of the walk), we make `parents[i] == i` to start, which
|
||||
// turns out be convenient.
|
||||
parents: Vec<NodeIndex>,
|
||||
|
||||
// Additional edges beyond the parents.
|
||||
cross_edges: Vec<(NodeIndex, NodeIndex)>,
|
||||
|
||||
// Nodes which we found that are considered "outputs"
|
||||
output_nodes: Vec<NodeIndex>,
|
||||
|
||||
// Nodes which we found that are considered "inputs"
|
||||
input_nodes: Vec<NodeIndex>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
struct DagNode {
|
||||
in_index: NodeIndex
|
||||
}
|
||||
|
||||
impl<'q, N, I, O> GraphReduce<'q, N, I, O>
|
||||
where N: Debug + Clone,
|
||||
I: Fn(&N) -> bool,
|
||||
O: Fn(&N) -> bool,
|
||||
{
|
||||
fn new(in_graph: &'q Graph<N, ()>, is_input: I, is_output: O) -> Self {
|
||||
let mut unify = UnificationTable::new();
|
||||
|
||||
// create a set of unification keys whose indices
|
||||
// correspond to the indices from the input graph
|
||||
for i in 0..in_graph.len_nodes() {
|
||||
let k = unify.new_key(());
|
||||
assert!(k == DagId::from_input_index(NodeIndex(i)));
|
||||
}
|
||||
|
||||
GraphReduce { in_graph, unify, is_input, is_output }
|
||||
}
|
||||
|
||||
fn compute(mut self) -> Reduction<'q, N> {
|
||||
let dag = Classify::new(&mut self).walk();
|
||||
construct::construct_graph(&mut self, dag)
|
||||
}
|
||||
|
||||
fn inputs(&self, in_node: NodeIndex) -> impl Iterator<Item = NodeIndex> + 'q {
|
||||
self.in_graph.predecessor_nodes(in_node)
|
||||
}
|
||||
|
||||
fn mark_cycle(&mut self, in_node1: NodeIndex, in_node2: NodeIndex) {
|
||||
let dag_id1 = DagId::from_input_index(in_node1);
|
||||
let dag_id2 = DagId::from_input_index(in_node2);
|
||||
self.unify.union(dag_id1, dag_id2);
|
||||
}
|
||||
|
||||
/// Convert a dag-id into its cycle head representative. This will
|
||||
/// be a no-op unless `in_node` participates in a cycle, in which
|
||||
/// case a distinct node *may* be returned.
|
||||
fn cycle_head(&mut self, in_node: NodeIndex) -> NodeIndex {
|
||||
let i = DagId::from_input_index(in_node);
|
||||
self.unify.find(i).as_input_index()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn in_cycle(&mut self, ni1: NodeIndex, ni2: NodeIndex) -> bool {
|
||||
self.cycle_head(ni1) == self.cycle_head(ni2)
|
||||
}
|
||||
}
|
@ -1,259 +0,0 @@
|
||||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use super::*;
|
||||
|
||||
fn reduce(graph: &Graph<&'static str, ()>,
|
||||
inputs: &[&'static str],
|
||||
outputs: &[&'static str],
|
||||
expected: &[&'static str])
|
||||
{
|
||||
let reduce = GraphReduce::new(&graph,
|
||||
|n| inputs.contains(n),
|
||||
|n| outputs.contains(n));
|
||||
let result = reduce.compute();
|
||||
let mut edges: Vec<String> =
|
||||
result.graph
|
||||
.all_edges()
|
||||
.iter()
|
||||
.map(|edge| format!("{} -> {}",
|
||||
result.graph.node_data(edge.source()),
|
||||
result.graph.node_data(edge.target())))
|
||||
.collect();
|
||||
edges.sort();
|
||||
println!("{:#?}", edges);
|
||||
assert_eq!(edges.len(), expected.len());
|
||||
for (expected, actual) in expected.iter().zip(&edges) {
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test1() {
|
||||
// +---------------+
|
||||
// | |
|
||||
// | +--------|------+
|
||||
// | | v v
|
||||
// [A] -> [C0] -> [C1] [D]
|
||||
// [ ] <- [ ] -> [E]
|
||||
// ^
|
||||
// [B] -------------+
|
||||
let (graph, _nodes) = graph! {
|
||||
A -> C0,
|
||||
A -> C1,
|
||||
B -> C1,
|
||||
C0 -> C1,
|
||||
C1 -> C0,
|
||||
C0 -> D,
|
||||
C1 -> E,
|
||||
};
|
||||
|
||||
// [A] -> [C1] -> [D]
|
||||
// [B] -> [ ] -> [E]
|
||||
reduce(&graph, &["A", "B"], &["D", "E"], &[
|
||||
"A -> C1",
|
||||
"B -> C1",
|
||||
"C1 -> D",
|
||||
"C1 -> E",
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test2() {
|
||||
// +---------------+
|
||||
// | |
|
||||
// | +--------|------+
|
||||
// | | v v
|
||||
// [A] -> [C0] -> [C1] [D] -> [E]
|
||||
// [ ] <- [ ]
|
||||
// ^
|
||||
// [B] -------------+
|
||||
let (graph, _nodes) = graph! {
|
||||
A -> C0,
|
||||
A -> C1,
|
||||
B -> C1,
|
||||
C0 -> C1,
|
||||
C1 -> C0,
|
||||
C0 -> D,
|
||||
D -> E,
|
||||
};
|
||||
|
||||
// [A] -> [D] -> [E]
|
||||
// [B] -> [ ]
|
||||
reduce(&graph, &["A", "B"], &["D", "E"], &[
|
||||
"A -> D",
|
||||
"B -> D",
|
||||
"D -> E",
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test2b() {
|
||||
// Variant on test2 in which [B] is not
|
||||
// considered an input.
|
||||
let (graph, _nodes) = graph! {
|
||||
A -> C0,
|
||||
A -> C1,
|
||||
B -> C1,
|
||||
C0 -> C1,
|
||||
C1 -> C0,
|
||||
C0 -> D,
|
||||
D -> E,
|
||||
};
|
||||
|
||||
// [A] -> [D] -> [E]
|
||||
reduce(&graph, &["A"], &["D", "E"], &[
|
||||
"A -> D",
|
||||
"D -> E",
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test3() {
|
||||
|
||||
// Edges going *downwards*, so 0, 1 and 2 are inputs,
|
||||
// while 7, 8, and 9 are outputs.
|
||||
//
|
||||
// 0 1 2
|
||||
// | \ /
|
||||
// 3---+ |
|
||||
// | | |
|
||||
// | | |
|
||||
// 4 5 6
|
||||
// \ / \ / \
|
||||
// | | |
|
||||
// 7 8 9
|
||||
//
|
||||
// Here the end result removes node 4, instead encoding an edge
|
||||
// from n3 -> n7, but keeps nodes 5 and 6, as they are common
|
||||
// inputs to nodes 8/9.
|
||||
|
||||
let (graph, _nodes) = graph! {
|
||||
n0 -> n3,
|
||||
n3 -> n4,
|
||||
n3 -> n5,
|
||||
n4 -> n7,
|
||||
n5 -> n7,
|
||||
n5 -> n8,
|
||||
n1 -> n6,
|
||||
n2 -> n6,
|
||||
n6 -> n8,
|
||||
n6 -> n9,
|
||||
};
|
||||
|
||||
reduce(&graph, &["n0", "n1", "n2"], &["n7", "n8", "n9"], &[
|
||||
"n0 -> n3",
|
||||
"n1 -> n6",
|
||||
"n2 -> n6",
|
||||
"n3 -> n5",
|
||||
"n3 -> n7",
|
||||
"n5 -> n7",
|
||||
"n5 -> n8",
|
||||
"n6 -> n8",
|
||||
"n6 -> n9"
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cached_dfs_cyclic() {
|
||||
|
||||
// 0 1 <---- 2 3
|
||||
// ^ | ^ ^
|
||||
// | v | |
|
||||
// 4 ----> 5 ----> 6 ----> 7
|
||||
// ^ ^ ^ ^
|
||||
// | | | |
|
||||
// 8 9 10 11
|
||||
|
||||
let (graph, _nodes) = graph! {
|
||||
// edges from above diagram, in columns, top-to-bottom:
|
||||
n4 -> n0,
|
||||
n8 -> n4,
|
||||
n4 -> n5,
|
||||
n1 -> n5,
|
||||
n9 -> n5,
|
||||
n2 -> n1,
|
||||
n5 -> n6,
|
||||
n6 -> n2,
|
||||
n10 -> n6,
|
||||
n6 -> n7,
|
||||
n7 -> n3,
|
||||
n11 -> n7,
|
||||
};
|
||||
|
||||
// 0 1 2 3
|
||||
// ^ ^ / ^
|
||||
// | |/ |
|
||||
// 4 ----> 5 --------------+
|
||||
// ^ ^ \ |
|
||||
// | | \ |
|
||||
// 8 9 10 11
|
||||
|
||||
reduce(&graph, &["n8", "n9", "n10", "n11"], &["n0", "n1", "n2", "n3"], &[
|
||||
"n10 -> n5",
|
||||
"n11 -> n3",
|
||||
"n4 -> n0",
|
||||
"n4 -> n5",
|
||||
"n5 -> n1",
|
||||
"n5 -> n2",
|
||||
"n5 -> n3",
|
||||
"n8 -> n4",
|
||||
"n9 -> n5"
|
||||
]);
|
||||
}
|
||||
|
||||
/// Demonstrates the case where we don't reduce as much as we could.
|
||||
#[test]
|
||||
fn suboptimal() {
|
||||
let (graph, _nodes) = graph! {
|
||||
INPUT0 -> X,
|
||||
X -> OUTPUT0,
|
||||
X -> OUTPUT1,
|
||||
};
|
||||
|
||||
reduce(&graph, &["INPUT0"], &["OUTPUT0", "OUTPUT1"], &[
|
||||
"INPUT0 -> X",
|
||||
"X -> OUTPUT0",
|
||||
"X -> OUTPUT1"
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cycle_output() {
|
||||
// +---------------+
|
||||
// | |
|
||||
// | +--------|------+
|
||||
// | | v v
|
||||
// [A] -> [C0] <-> [C1] <- [D]
|
||||
// +----> [E]
|
||||
// ^
|
||||
// [B] ----------------- ---+
|
||||
let (graph, _nodes) = graph! {
|
||||
A -> C0,
|
||||
A -> C1,
|
||||
B -> E,
|
||||
C0 -> C1,
|
||||
C1 -> C0,
|
||||
C0 -> D,
|
||||
C1 -> E,
|
||||
D -> C1,
|
||||
};
|
||||
|
||||
// [A] -> [C0] --> [D]
|
||||
// +----> [E]
|
||||
// ^
|
||||
// [B] -------------+
|
||||
reduce(&graph, &["A", "B"], &["D", "E"], &[
|
||||
"A -> C0",
|
||||
"B -> E",
|
||||
"C0 -> D",
|
||||
"C0 -> E",
|
||||
]);
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
macro_rules! graph {
|
||||
($( $source:ident -> $target:ident, )*) => {
|
||||
{
|
||||
use $crate::rustc_data_structures::graph::{Graph, NodeIndex};
|
||||
use $crate::rustc_data_structures::fx::FxHashMap;
|
||||
|
||||
let mut graph = Graph::new();
|
||||
let mut nodes: FxHashMap<&'static str, NodeIndex> = FxHashMap();
|
||||
|
||||
for &name in &[ $(stringify!($source), stringify!($target)),* ] {
|
||||
let name: &'static str = name;
|
||||
nodes.entry(name)
|
||||
.or_insert_with(|| graph.add_node(name));
|
||||
}
|
||||
|
||||
$(
|
||||
{
|
||||
let source = nodes[&stringify!($source)];
|
||||
let target = nodes[&stringify!($target)];
|
||||
graph.add_edge(source, target, ());
|
||||
}
|
||||
)*
|
||||
|
||||
let f = move |name: &'static str| -> NodeIndex { nodes[&name] };
|
||||
|
||||
(graph, f)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,108 +0,0 @@
|
||||
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind};
|
||||
use rustc::ich::Fingerprint;
|
||||
use rustc::ty::TyCtxt;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::graph::{Graph, NodeIndex};
|
||||
|
||||
|
||||
mod compress;
|
||||
|
||||
/// A data-structure that makes it easy to enumerate the hashable
|
||||
/// predecessors of any given dep-node.
|
||||
pub struct Predecessors<'query> {
|
||||
// A reduced version of the input graph that contains fewer nodes.
|
||||
// This is intended to keep all of the base inputs (i.e., HIR
|
||||
// nodes) and all of the "work-products" we may care about
|
||||
// later. Other nodes may be retained if it keeps the overall size
|
||||
// of the graph down.
|
||||
pub reduced_graph: Graph<&'query DepNode, ()>,
|
||||
|
||||
// These are output nodes that have no incoming edges. We have to
|
||||
// track these specially because, when we load the data back up
|
||||
// again, we want to make sure and recreate these nodes (we want
|
||||
// to recreate the nodes where all incoming edges are clean; but
|
||||
// since we ordinarily just serialize edges, we wind up just
|
||||
// forgetting that bootstrap outputs even exist in that case.)
|
||||
pub bootstrap_outputs: Vec<&'query DepNode>,
|
||||
|
||||
// For the inputs (hir/foreign-metadata), we include hashes.
|
||||
pub hashes: FxHashMap<&'query DepNode, Fingerprint>,
|
||||
}
|
||||
|
||||
impl<'q> Predecessors<'q> {
|
||||
pub fn new(tcx: TyCtxt, query: &'q DepGraphQuery) -> Self {
|
||||
// Find the set of "start nodes". These are nodes that we will
|
||||
// possibly query later.
|
||||
let is_output = |node: &DepNode| -> bool {
|
||||
match node.kind {
|
||||
DepKind::WorkProduct => true,
|
||||
DepKind::CrateMetadata => {
|
||||
// We do *not* create dep-nodes for the current crate's
|
||||
// metadata anymore, just for metadata that we import/read
|
||||
// from other crates.
|
||||
debug_assert!(!node.extract_def_id(tcx).unwrap().is_local());
|
||||
false
|
||||
}
|
||||
// if -Z query-dep-graph is passed, save more extended data
|
||||
// to enable better unit testing
|
||||
DepKind::TypeckTables => tcx.sess.opts.debugging_opts.query_dep_graph,
|
||||
|
||||
_ => false,
|
||||
}
|
||||
};
|
||||
|
||||
// Reduce the graph to the most important nodes.
|
||||
let compress::Reduction { graph, input_nodes } =
|
||||
compress::reduce_graph(&query.graph,
|
||||
|n| n.kind.is_input(),
|
||||
|n| is_output(n));
|
||||
|
||||
let mut hashes = FxHashMap();
|
||||
for input_index in input_nodes {
|
||||
let input = *graph.node_data(input_index);
|
||||
debug!("computing hash for input node `{:?}`", input);
|
||||
hashes.entry(input)
|
||||
.or_insert_with(|| tcx.dep_graph.fingerprint_of(&input));
|
||||
}
|
||||
|
||||
if tcx.sess.opts.debugging_opts.query_dep_graph {
|
||||
// Not all inputs might have been reachable from an output node,
|
||||
// but we still want their hash for our unit tests.
|
||||
let hir_nodes = query.graph.all_nodes().iter().filter_map(|node| {
|
||||
match node.data.kind {
|
||||
DepKind::Hir => Some(&node.data),
|
||||
_ => None,
|
||||
}
|
||||
});
|
||||
|
||||
for node in hir_nodes {
|
||||
hashes.entry(node)
|
||||
.or_insert_with(|| tcx.dep_graph.fingerprint_of(&node));
|
||||
}
|
||||
}
|
||||
|
||||
let bootstrap_outputs: Vec<&'q DepNode> =
|
||||
(0 .. graph.len_nodes())
|
||||
.map(NodeIndex)
|
||||
.filter(|&n| graph.incoming_edges(n).next().is_none())
|
||||
.map(|n| *graph.node_data(n))
|
||||
.filter(|n| is_output(n))
|
||||
.collect();
|
||||
|
||||
Predecessors {
|
||||
reduced_graph: graph,
|
||||
bootstrap_outputs,
|
||||
hashes,
|
||||
}
|
||||
}
|
||||
}
|
@ -8,7 +8,7 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use rustc::dep_graph::{DepGraph, DepNode};
|
||||
use rustc::dep_graph::DepGraph;
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::hir::svh::Svh;
|
||||
use rustc::ich::Fingerprint;
|
||||
@ -18,8 +18,6 @@ use rustc::ty::TyCtxt;
|
||||
use rustc::util::common::time;
|
||||
use rustc::util::nodemap::DefIdMap;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::graph;
|
||||
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
|
||||
use rustc_serialize::Encodable as RustcEncodable;
|
||||
use rustc_serialize::opaque::Encoder;
|
||||
use std::io::{self, Cursor, Write};
|
||||
@ -27,7 +25,6 @@ use std::fs::{self, File};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use super::data::*;
|
||||
use super::preds::*;
|
||||
use super::fs::*;
|
||||
use super::dirty_clean;
|
||||
use super::file_format;
|
||||
@ -55,9 +52,6 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
|
||||
let mut current_metadata_hashes = FxHashMap();
|
||||
|
||||
// IMPORTANT: We are saving the metadata hashes *before* the dep-graph,
|
||||
// since metadata-encoding might add new entries to the
|
||||
// DefIdDirectory (which is saved in the dep-graph file).
|
||||
if sess.opts.debugging_opts.incremental_cc ||
|
||||
sess.opts.debugging_opts.query_dep_graph {
|
||||
save_in(sess,
|
||||
@ -69,24 +63,10 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
e));
|
||||
}
|
||||
|
||||
time(sess.time_passes(), "persist dep-graph (old)", || {
|
||||
let query = tcx.dep_graph.query();
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
eprintln!("incremental: {} nodes in dep-graph", query.graph.len_nodes());
|
||||
eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges());
|
||||
}
|
||||
|
||||
let preds = Predecessors::new(tcx, &query);
|
||||
time(sess.time_passes(), "persist dep-graph", || {
|
||||
save_in(sess,
|
||||
dep_graph_path(sess),
|
||||
|e| encode_dep_graph(tcx, &preds, e));
|
||||
});
|
||||
|
||||
time(sess.time_passes(), "persist dep-graph (new)", || {
|
||||
save_in(sess,
|
||||
dep_graph_path_new(sess),
|
||||
|e| encode_dep_graph_new(tcx, e));
|
||||
|e| encode_dep_graph(tcx, e));
|
||||
});
|
||||
|
||||
dirty_clean::check_dirty_clean_annotations(tcx);
|
||||
@ -182,9 +162,9 @@ fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_dep_graph_new(tcx: TyCtxt,
|
||||
encoder: &mut Encoder)
|
||||
-> io::Result<()> {
|
||||
fn encode_dep_graph(tcx: TyCtxt,
|
||||
encoder: &mut Encoder)
|
||||
-> io::Result<()> {
|
||||
// First encode the commandline arguments hash
|
||||
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
|
||||
|
||||
@ -195,118 +175,12 @@ fn encode_dep_graph_new(tcx: TyCtxt,
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_dep_graph(tcx: TyCtxt,
|
||||
preds: &Predecessors,
|
||||
encoder: &mut Encoder)
|
||||
-> io::Result<()> {
|
||||
// First encode the commandline arguments hash
|
||||
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
|
||||
|
||||
// NB: We rely on this Vec being indexable by reduced_graph's NodeIndex.
|
||||
let mut nodes: IndexVec<DepNodeIndex, DepNode> = preds
|
||||
.reduced_graph
|
||||
.all_nodes()
|
||||
.iter()
|
||||
.map(|node| node.data.clone())
|
||||
.collect();
|
||||
|
||||
let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
|
||||
let mut edge_list_data = Vec::with_capacity(preds.reduced_graph.len_edges());
|
||||
|
||||
for node_index in 0 .. nodes.len() {
|
||||
let start = edge_list_data.len() as u32;
|
||||
|
||||
for target in preds.reduced_graph.successor_nodes(graph::NodeIndex(node_index)) {
|
||||
edge_list_data.push(DepNodeIndex::new(target.node_id()));
|
||||
}
|
||||
|
||||
let end = edge_list_data.len() as u32;
|
||||
debug_assert_eq!(node_index, edge_list_indices.len());
|
||||
edge_list_indices.push((start, end));
|
||||
}
|
||||
|
||||
// Let's make sure we had no overflow there.
|
||||
assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
|
||||
// Check that we have a consistent number of edges.
|
||||
assert_eq!(edge_list_data.len(), preds.reduced_graph.len_edges());
|
||||
|
||||
let bootstrap_outputs = preds.bootstrap_outputs
|
||||
.iter()
|
||||
.map(|dep_node| (**dep_node).clone())
|
||||
.collect();
|
||||
|
||||
// Next, build the map of content hashes. To this end, we need to transform
|
||||
// the (DepNode -> Fingerprint) map that we have into a
|
||||
// (DepNodeIndex -> Fingerprint) map. This may necessitate adding nodes back
|
||||
// to the dep-graph that have been filtered out during reduction.
|
||||
let content_hashes = {
|
||||
// We have to build a (DepNode -> DepNodeIndex) map. We over-allocate a
|
||||
// little because we expect some more nodes to be added.
|
||||
let capacity = (nodes.len() * 120) / 100;
|
||||
let mut node_to_index = FxHashMap::with_capacity_and_hasher(capacity,
|
||||
Default::default());
|
||||
// Add the nodes we already have in the graph.
|
||||
node_to_index.extend(nodes.iter_enumerated()
|
||||
.map(|(index, &node)| (node, index)));
|
||||
|
||||
let mut content_hashes = Vec::with_capacity(preds.hashes.len());
|
||||
|
||||
for (&&dep_node, &hash) in preds.hashes.iter() {
|
||||
let dep_node_index = *node_to_index
|
||||
.entry(dep_node)
|
||||
.or_insert_with(|| {
|
||||
// There is no DepNodeIndex for this DepNode yet. This
|
||||
// happens when the DepNode got filtered out during graph
|
||||
// reduction. Since we have a content hash for the DepNode,
|
||||
// we add it back to the graph.
|
||||
let next_index = nodes.len();
|
||||
nodes.push(dep_node);
|
||||
|
||||
debug_assert_eq!(next_index, edge_list_indices.len());
|
||||
// Push an empty list of edges
|
||||
edge_list_indices.push((0,0));
|
||||
|
||||
DepNodeIndex::new(next_index)
|
||||
});
|
||||
|
||||
content_hashes.push((dep_node_index, hash));
|
||||
}
|
||||
|
||||
content_hashes
|
||||
};
|
||||
|
||||
let graph = SerializedDepGraph {
|
||||
nodes,
|
||||
edge_list_indices,
|
||||
edge_list_data,
|
||||
bootstrap_outputs,
|
||||
hashes: content_hashes,
|
||||
};
|
||||
|
||||
// Encode the graph data.
|
||||
graph.encode(encoder)?;
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
eprintln!("incremental: {} nodes in reduced dep-graph", graph.nodes.len());
|
||||
eprintln!("incremental: {} edges in serialized dep-graph", graph.edge_list_data.len());
|
||||
eprintln!("incremental: {} hashes in serialized dep-graph", graph.hashes.len());
|
||||
}
|
||||
|
||||
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
|
||||
for (dep_node, hash) in &preds.hashes {
|
||||
println!("ICH for {:?} is {}", dep_node, hash);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_metadata_hashes(tcx: TyCtxt,
|
||||
svh: Svh,
|
||||
metadata_hashes: &EncodedMetadataHashes,
|
||||
current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>,
|
||||
encoder: &mut Encoder)
|
||||
-> io::Result<()> {
|
||||
fn encode_metadata_hashes(tcx: TyCtxt,
|
||||
svh: Svh,
|
||||
metadata_hashes: &EncodedMetadataHashes,
|
||||
current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>,
|
||||
encoder: &mut Encoder)
|
||||
-> io::Result<()> {
|
||||
assert_eq!(metadata_hashes.hashes.len(),
|
||||
metadata_hashes.hashes.iter().map(|x| (x.def_index, ())).collect::<FxHashMap<_,_>>().len());
|
||||
|
||||
@ -338,8 +212,8 @@ pub fn encode_metadata_hashes(tcx: TyCtxt,
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_work_products(dep_graph: &DepGraph,
|
||||
encoder: &mut Encoder) -> io::Result<()> {
|
||||
fn encode_work_products(dep_graph: &DepGraph,
|
||||
encoder: &mut Encoder) -> io::Result<()> {
|
||||
let work_products: Vec<_> = dep_graph
|
||||
.work_products()
|
||||
.iter()
|
||||
|
@ -21,11 +21,9 @@ use std::fs as std_fs;
|
||||
pub fn save_trans_partition(sess: &Session,
|
||||
dep_graph: &DepGraph,
|
||||
cgu_name: &str,
|
||||
partition_hash: u64,
|
||||
files: &[(OutputType, PathBuf)]) {
|
||||
debug!("save_trans_partition({:?},{},{:?})",
|
||||
debug!("save_trans_partition({:?},{:?})",
|
||||
cgu_name,
|
||||
partition_hash,
|
||||
files);
|
||||
if sess.opts.incremental.is_none() {
|
||||
return;
|
||||
@ -57,7 +55,6 @@ pub fn save_trans_partition(sess: &Session,
|
||||
|
||||
let work_product = WorkProduct {
|
||||
cgu_name: cgu_name.to_string(),
|
||||
input_hash: partition_hash,
|
||||
saved_files,
|
||||
};
|
||||
|
||||
|
@ -13,7 +13,7 @@ use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
|
||||
use rustc::mir::Mir;
|
||||
use rustc::mir::transform::{MirPassIndex, MirSuite, MirSource,
|
||||
MIR_CONST, MIR_VALIDATED, MIR_OPTIMIZED};
|
||||
use rustc::ty::{self, TyCtxt};
|
||||
use rustc::ty::TyCtxt;
|
||||
use rustc::ty::maps::Providers;
|
||||
use rustc::ty::steal::Steal;
|
||||
use rustc::hir;
|
||||
@ -21,7 +21,7 @@ use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
|
||||
use rustc::util::nodemap::DefIdSet;
|
||||
use std::rc::Rc;
|
||||
use syntax::ast;
|
||||
use syntax_pos::{DUMMY_SP, Span};
|
||||
use syntax_pos::Span;
|
||||
use transform;
|
||||
|
||||
pub mod add_validation;
|
||||
@ -114,11 +114,10 @@ fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx
|
||||
let source = MirSource::from_local_def_id(tcx, def_id);
|
||||
if let MirSource::Const(_) = source {
|
||||
// Ensure that we compute the `mir_const_qualif` for constants at
|
||||
// this point, before we steal the mir-const result. We don't
|
||||
// directly need the result or `mir_const_qualif`, so we can just force it.
|
||||
ty::queries::mir_const_qualif::force(tcx, DUMMY_SP, def_id);
|
||||
// this point, before we steal the mir-const result.
|
||||
let _ = tcx.mir_const_qualif(def_id);
|
||||
}
|
||||
ty::queries::unsafety_violations::force(tcx, DUMMY_SP, def_id);
|
||||
let _ = tcx.unsafety_violations(def_id);
|
||||
|
||||
let mut mir = tcx.mir_const(def_id).steal();
|
||||
transform::run_suite(tcx, source, MIR_VALIDATED, &mut mir);
|
||||
@ -128,8 +127,8 @@ fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx
|
||||
fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Mir<'tcx> {
|
||||
// (Mir-)Borrowck uses `mir_validated`, so we have to force it to
|
||||
// execute before we can steal.
|
||||
ty::queries::mir_borrowck::force(tcx, DUMMY_SP, def_id);
|
||||
ty::queries::borrowck::force(tcx, DUMMY_SP, def_id);
|
||||
let _ = tcx.mir_borrowck(def_id);
|
||||
let _ = tcx.borrowck(def_id);
|
||||
|
||||
let mut mir = tcx.mir_validated(def_id).steal();
|
||||
let source = MirSource::from_local_def_id(tcx, def_id);
|
||||
|
@ -27,47 +27,32 @@
|
||||
//! the HIR doesn't change as a result of the annotations, which might
|
||||
//! perturb the reuse results.
|
||||
|
||||
use rustc::dep_graph::{DepNode, DepConstructor};
|
||||
use rustc::ty::TyCtxt;
|
||||
use syntax::ast;
|
||||
|
||||
use {ModuleSource, ModuleTranslation};
|
||||
|
||||
use rustc::ich::{ATTR_PARTITION_REUSED, ATTR_PARTITION_TRANSLATED};
|
||||
|
||||
const MODULE: &'static str = "module";
|
||||
const CFG: &'static str = "cfg";
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum Disposition { Reused, Translated }
|
||||
enum Disposition { Reused, Translated }
|
||||
|
||||
impl ModuleTranslation {
|
||||
pub fn disposition(&self) -> (String, Disposition) {
|
||||
let disposition = match self.source {
|
||||
ModuleSource::Preexisting(_) => Disposition::Reused,
|
||||
ModuleSource::Translated(_) => Disposition::Translated,
|
||||
};
|
||||
|
||||
(self.name.clone(), disposition)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
modules: &[(String, Disposition)]) {
|
||||
pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
|
||||
let _ignore = tcx.dep_graph.in_ignore();
|
||||
|
||||
if tcx.sess.opts.incremental.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
let ams = AssertModuleSource { tcx: tcx, modules: modules };
|
||||
let ams = AssertModuleSource { tcx };
|
||||
for attr in &tcx.hir.krate().attrs {
|
||||
ams.check_attr(attr);
|
||||
}
|
||||
}
|
||||
|
||||
struct AssertModuleSource<'a, 'tcx: 'a> {
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
modules: &'a [(String, Disposition)],
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> {
|
||||
@ -86,32 +71,31 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> {
|
||||
}
|
||||
|
||||
let mname = self.field(attr, MODULE);
|
||||
let mtrans = self.modules.iter().find(|&&(ref name, _)| name == mname.as_str());
|
||||
let mtrans = match mtrans {
|
||||
Some(m) => m,
|
||||
None => {
|
||||
debug!("module name `{}` not found amongst:", mname);
|
||||
for &(ref name, ref disposition) in self.modules {
|
||||
debug!("module named `{}` with disposition {:?}",
|
||||
name,
|
||||
disposition);
|
||||
|
||||
let dep_node = DepNode::new(self.tcx,
|
||||
DepConstructor::CompileCodegenUnit(mname.as_str()));
|
||||
|
||||
if let Some(loaded_from_cache) = self.tcx.dep_graph.was_loaded_from_cache(&dep_node) {
|
||||
match (disposition, loaded_from_cache) {
|
||||
(Disposition::Reused, false) => {
|
||||
self.tcx.sess.span_err(
|
||||
attr.span,
|
||||
&format!("expected module named `{}` to be Reused but is Translated",
|
||||
mname));
|
||||
}
|
||||
(Disposition::Translated, true) => {
|
||||
self.tcx.sess.span_err(
|
||||
attr.span,
|
||||
&format!("expected module named `{}` to be Translated but is Reused",
|
||||
mname));
|
||||
}
|
||||
(Disposition::Reused, true) |
|
||||
(Disposition::Translated, false) => {
|
||||
// These are what we would expect.
|
||||
}
|
||||
|
||||
self.tcx.sess.span_err(
|
||||
attr.span,
|
||||
&format!("no module named `{}`", mname));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mtrans_disposition = mtrans.1;
|
||||
if disposition != mtrans_disposition {
|
||||
self.tcx.sess.span_err(
|
||||
attr.span,
|
||||
&format!("expected module named `{}` to be {:?} but is {:?}",
|
||||
mname,
|
||||
disposition,
|
||||
mtrans_disposition));
|
||||
} else {
|
||||
self.tcx.sess.span_err(attr.span, &format!("no module named `{}`", mname));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,11 +77,7 @@ pub fn provide_local(providers: &mut Providers) {
|
||||
};
|
||||
|
||||
providers.is_exported_symbol = |tcx, id| {
|
||||
// FIXME(#42293) needs red/green to not break a bunch of incremental
|
||||
// tests
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
tcx.exported_symbol_ids(id.krate).contains(&id)
|
||||
})
|
||||
tcx.exported_symbol_ids(id.krate).contains(&id)
|
||||
};
|
||||
|
||||
providers.exported_symbols = |tcx, cnum| {
|
||||
|
@ -884,7 +884,6 @@ fn copy_module_artifacts_into_incr_comp_cache(sess: &Session,
|
||||
save_trans_partition(sess,
|
||||
dep_graph,
|
||||
&module.name,
|
||||
module.symbol_name_hash,
|
||||
&files);
|
||||
}
|
||||
}
|
||||
@ -1134,7 +1133,6 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem)
|
||||
name: module_name,
|
||||
kind: ModuleKind::Regular,
|
||||
pre_existing: true,
|
||||
symbol_name_hash: mtrans.symbol_name_hash,
|
||||
emit_bc: config.emit_bc,
|
||||
emit_obj: config.emit_obj,
|
||||
}))
|
||||
|
@ -28,7 +28,7 @@ use super::ModuleSource;
|
||||
use super::ModuleTranslation;
|
||||
use super::ModuleKind;
|
||||
|
||||
use assert_module_sources::{self, Disposition};
|
||||
use assert_module_sources;
|
||||
use back::link;
|
||||
use back::symbol_export;
|
||||
use back::write::{self, OngoingCrateTranslation, create_target_machine};
|
||||
@ -41,7 +41,7 @@ use rustc::middle::trans::{Linkage, Visibility, Stats};
|
||||
use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes};
|
||||
use rustc::ty::{self, Ty, TyCtxt};
|
||||
use rustc::ty::maps::Providers;
|
||||
use rustc::dep_graph::{DepNode, DepKind};
|
||||
use rustc::dep_graph::{DepNode, DepKind, DepConstructor};
|
||||
use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
|
||||
use rustc::util::common::{time, print_time_passes_entry};
|
||||
use rustc::session::config::{self, NoDebugInfo};
|
||||
@ -78,7 +78,6 @@ use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet};
|
||||
use CrateInfo;
|
||||
|
||||
use std::any::Any;
|
||||
use std::cell::RefCell;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::str;
|
||||
use std::sync::Arc;
|
||||
@ -904,7 +903,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
let metadata_module = ModuleTranslation {
|
||||
name: link::METADATA_MODULE_NAME.to_string(),
|
||||
llmod_id: llmod_id.to_string(),
|
||||
symbol_name_hash: 0, // we always rebuild metadata, at least for now
|
||||
source: ModuleSource::Translated(ModuleLlvm {
|
||||
llcx: metadata_llcx,
|
||||
llmod: metadata_llmod,
|
||||
@ -947,6 +945,17 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
shared_ccx.tcx().collect_and_partition_translation_items(LOCAL_CRATE).1;
|
||||
let codegen_units = (*codegen_units).clone();
|
||||
|
||||
// Force all codegen_unit queries so they are already either red or green
|
||||
// when compile_codegen_unit accesses them. We are not able to re-execute
|
||||
// the codegen_unit query from just the DepNode, so an unknown color would
|
||||
// lead to having to re-execute compile_codegen_unit, possibly
|
||||
// unnecessarily.
|
||||
if tcx.dep_graph.is_fully_enabled() {
|
||||
for cgu in &codegen_units {
|
||||
tcx.codegen_unit(cgu.name().clone());
|
||||
}
|
||||
}
|
||||
|
||||
let ongoing_translation = write::start_async_translation(
|
||||
tcx,
|
||||
time_graph.clone(),
|
||||
@ -972,7 +981,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
Some(ModuleTranslation {
|
||||
name: link::ALLOCATOR_MODULE_NAME.to_string(),
|
||||
llmod_id: llmod_id.to_string(),
|
||||
symbol_name_hash: 0, // we always rebuild allocator shims
|
||||
source: ModuleSource::Translated(modules),
|
||||
kind: ModuleKind::Allocator,
|
||||
})
|
||||
@ -1004,6 +1012,50 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
ongoing_translation.wait_for_signal_to_translate_item();
|
||||
ongoing_translation.check_for_errors(tcx.sess);
|
||||
|
||||
// First, if incremental compilation is enabled, we try to re-use the
|
||||
// codegen unit from the cache.
|
||||
if tcx.dep_graph.is_fully_enabled() {
|
||||
let cgu_id = cgu.work_product_id();
|
||||
|
||||
// Check whether there is a previous work-product we can
|
||||
// re-use. Not only must the file exist, and the inputs not
|
||||
// be dirty, but the hash of the symbols we will generate must
|
||||
// be the same.
|
||||
if let Some(buf) = tcx.dep_graph.previous_work_product(&cgu_id) {
|
||||
let dep_node = &DepNode::new(tcx,
|
||||
DepConstructor::CompileCodegenUnit(cgu.name().clone()));
|
||||
|
||||
// We try to mark the DepNode::CompileCodegenUnit green. If we
|
||||
// succeed it means that none of the dependencies has changed
|
||||
// and we can safely re-use.
|
||||
if let Some(dep_node_index) = tcx.dep_graph.try_mark_green(tcx, dep_node) {
|
||||
// Append ".rs" to LLVM module identifier.
|
||||
//
|
||||
// LLVM code generator emits a ".file filename" directive
|
||||
// for ELF backends. Value of the "filename" is set as the
|
||||
// LLVM module identifier. Due to a LLVM MC bug[1], LLVM
|
||||
// crashes if the module identifier is same as other symbols
|
||||
// such as a function name in the module.
|
||||
// 1. http://llvm.org/bugs/show_bug.cgi?id=11479
|
||||
let llmod_id = format!("{}.rs", cgu.name());
|
||||
|
||||
let module = ModuleTranslation {
|
||||
name: cgu.name().to_string(),
|
||||
source: ModuleSource::Preexisting(buf),
|
||||
kind: ModuleKind::Regular,
|
||||
llmod_id,
|
||||
};
|
||||
tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true);
|
||||
write::submit_translated_module_to_llvm(tcx, module, 0);
|
||||
// Continue to next cgu, this one is done.
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// This can happen if files were deleted from the cache
|
||||
// directory for some reason. We just re-compile then.
|
||||
}
|
||||
}
|
||||
|
||||
let _timing_guard = time_graph.as_ref().map(|time_graph| {
|
||||
time_graph.start(write::TRANS_WORKER_TIMELINE,
|
||||
write::TRANS_WORK_PACKAGE_KIND,
|
||||
@ -1024,9 +1076,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
total_trans_time);
|
||||
|
||||
if tcx.sess.opts.incremental.is_some() {
|
||||
DISPOSITIONS.with(|d| {
|
||||
assert_module_sources::assert_module_sources(tcx, &d.borrow());
|
||||
});
|
||||
assert_module_sources::assert_module_sources(tcx);
|
||||
}
|
||||
|
||||
symbol_names_test::report_symbol_names(tcx);
|
||||
@ -1061,10 +1111,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
ongoing_translation
|
||||
}
|
||||
|
||||
// FIXME(#42293) hopefully once red/green is enabled we're testing everything
|
||||
// via a method that doesn't require this!
|
||||
thread_local!(static DISPOSITIONS: RefCell<Vec<(String, Disposition)>> = Default::default());
|
||||
|
||||
fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
metadata_incr_hashes: EncodedMetadataHashes,
|
||||
link_meta: LinkMeta) {
|
||||
@ -1288,38 +1334,19 @@ impl CrateInfo {
|
||||
}
|
||||
|
||||
fn is_translated_function(tcx: TyCtxt, id: DefId) -> bool {
|
||||
// FIXME(#42293) needs red/green tracking to avoid failing a bunch of
|
||||
// existing tests
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
let (all_trans_items, _) =
|
||||
tcx.collect_and_partition_translation_items(LOCAL_CRATE);
|
||||
all_trans_items.contains(&id)
|
||||
})
|
||||
let (all_trans_items, _) =
|
||||
tcx.collect_and_partition_translation_items(LOCAL_CRATE);
|
||||
all_trans_items.contains(&id)
|
||||
}
|
||||
|
||||
fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
cgu: InternedString) -> Stats {
|
||||
// FIXME(#42293) needs red/green tracking to avoid failing a bunch of
|
||||
// existing tests
|
||||
let cgu = tcx.dep_graph.with_ignore(|| {
|
||||
tcx.codegen_unit(cgu)
|
||||
});
|
||||
let cgu = tcx.codegen_unit(cgu);
|
||||
|
||||
let start_time = Instant::now();
|
||||
let dep_node = cgu.work_product_dep_node();
|
||||
let ((stats, module), _) =
|
||||
tcx.dep_graph.with_task(dep_node,
|
||||
tcx,
|
||||
cgu,
|
||||
module_translation);
|
||||
let (stats, module) = module_translation(tcx, cgu);
|
||||
let time_to_translate = start_time.elapsed();
|
||||
|
||||
if tcx.sess.opts.incremental.is_some() {
|
||||
DISPOSITIONS.with(|d| {
|
||||
d.borrow_mut().push(module.disposition());
|
||||
});
|
||||
}
|
||||
|
||||
// We assume that the cost to run LLVM on a CGU is proportional to
|
||||
// the time we needed for translating it.
|
||||
let cost = time_to_translate.as_secs() * 1_000_000_000 +
|
||||
@ -1336,8 +1363,6 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
-> (Stats, ModuleTranslation)
|
||||
{
|
||||
let cgu_name = cgu.name().to_string();
|
||||
let cgu_id = cgu.work_product_id();
|
||||
let symbol_name_hash = cgu.compute_symbol_name_hash(tcx);
|
||||
|
||||
// Append ".rs" to LLVM module identifier.
|
||||
//
|
||||
@ -1349,40 +1374,6 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
// 1. http://llvm.org/bugs/show_bug.cgi?id=11479
|
||||
let llmod_id = format!("{}.rs", cgu.name());
|
||||
|
||||
// Check whether there is a previous work-product we can
|
||||
// re-use. Not only must the file exist, and the inputs not
|
||||
// be dirty, but the hash of the symbols we will generate must
|
||||
// be the same.
|
||||
let previous_work_product =
|
||||
tcx.dep_graph.previous_work_product(&cgu_id).and_then(|work_product| {
|
||||
if work_product.input_hash == symbol_name_hash {
|
||||
debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
|
||||
Some(work_product)
|
||||
} else {
|
||||
if tcx.sess.opts.debugging_opts.incremental_info {
|
||||
eprintln!("incremental: CGU `{}` invalidated because of \
|
||||
changed partitioning hash.",
|
||||
cgu.name());
|
||||
}
|
||||
debug!("trans_reuse_previous_work_products: \
|
||||
not reusing {:?} because hash changed to {:?}",
|
||||
work_product, symbol_name_hash);
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(buf) = previous_work_product {
|
||||
// Don't need to translate this module.
|
||||
let module = ModuleTranslation {
|
||||
llmod_id: llmod_id,
|
||||
name: cgu_name,
|
||||
symbol_name_hash,
|
||||
source: ModuleSource::Preexisting(buf.clone()),
|
||||
kind: ModuleKind::Regular,
|
||||
};
|
||||
return (Stats::default(), module);
|
||||
}
|
||||
|
||||
// Instantiate translation items without filling out definitions yet...
|
||||
let scx = SharedCrateContext::new(tcx);
|
||||
let lcx = LocalCrateContext::new(&scx, cgu, &llmod_id);
|
||||
@ -1448,7 +1439,6 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
|
||||
ModuleTranslation {
|
||||
name: cgu_name,
|
||||
symbol_name_hash,
|
||||
source: ModuleSource::Translated(llvm_module),
|
||||
kind: ModuleKind::Regular,
|
||||
llmod_id,
|
||||
|
@ -156,17 +156,14 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME(#42293) we should actually track this, but fails too many tests
|
||||
// today.
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
if ccx.use_dll_storage_attrs() &&
|
||||
tcx.is_dllimport_foreign_item(instance_def_id)
|
||||
{
|
||||
unsafe {
|
||||
llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
|
||||
}
|
||||
if ccx.use_dll_storage_attrs() &&
|
||||
tcx.is_dllimport_foreign_item(instance_def_id)
|
||||
{
|
||||
unsafe {
|
||||
llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
llfn
|
||||
};
|
||||
|
||||
|
@ -296,26 +296,22 @@ pub fn collect_crate_translation_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
mode: TransItemCollectionMode)
|
||||
-> (FxHashSet<TransItem<'tcx>>,
|
||||
InliningMap<'tcx>) {
|
||||
// We are not tracking dependencies of this pass as it has to be re-executed
|
||||
// every time no matter what.
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
let roots = collect_roots(tcx, mode);
|
||||
let roots = collect_roots(tcx, mode);
|
||||
|
||||
debug!("Building translation item graph, beginning at roots");
|
||||
let mut visited = FxHashSet();
|
||||
let mut recursion_depths = DefIdMap();
|
||||
let mut inlining_map = InliningMap::new();
|
||||
debug!("Building translation item graph, beginning at roots");
|
||||
let mut visited = FxHashSet();
|
||||
let mut recursion_depths = DefIdMap();
|
||||
let mut inlining_map = InliningMap::new();
|
||||
|
||||
for root in roots {
|
||||
collect_items_rec(tcx,
|
||||
root,
|
||||
&mut visited,
|
||||
&mut recursion_depths,
|
||||
&mut inlining_map);
|
||||
}
|
||||
for root in roots {
|
||||
collect_items_rec(tcx,
|
||||
root,
|
||||
&mut visited,
|
||||
&mut recursion_depths,
|
||||
&mut inlining_map);
|
||||
}
|
||||
|
||||
(visited, inlining_map)
|
||||
})
|
||||
(visited, inlining_map)
|
||||
}
|
||||
|
||||
// Find all non-generic items by walking the HIR. These items serve as roots to
|
||||
|
@ -231,17 +231,13 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
|
||||
g
|
||||
};
|
||||
|
||||
|
||||
// FIXME(#42293) we should actually track this, but fails too many tests
|
||||
// today.
|
||||
ccx.tcx().dep_graph.with_ignore(|| {
|
||||
if ccx.use_dll_storage_attrs() && ccx.tcx().is_dllimport_foreign_item(def_id) {
|
||||
// For foreign (native) libs we know the exact storage type to use.
|
||||
unsafe {
|
||||
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
|
||||
}
|
||||
if ccx.use_dll_storage_attrs() && ccx.tcx().is_dllimport_foreign_item(def_id) {
|
||||
// For foreign (native) libs we know the exact storage type to use.
|
||||
unsafe {
|
||||
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ccx.instances().borrow_mut().insert(instance, g);
|
||||
ccx.statics().borrow_mut().insert(g, def_id);
|
||||
g
|
||||
|
@ -204,7 +204,6 @@ pub struct ModuleTranslation {
|
||||
/// as the crate name and disambiguator.
|
||||
name: String,
|
||||
llmod_id: String,
|
||||
symbol_name_hash: u64,
|
||||
pub source: ModuleSource,
|
||||
pub kind: ModuleKind,
|
||||
}
|
||||
@ -238,7 +237,6 @@ impl ModuleTranslation {
|
||||
llmod_id: self.llmod_id,
|
||||
name: self.name.clone(),
|
||||
kind: self.kind,
|
||||
symbol_name_hash: self.symbol_name_hash,
|
||||
pre_existing,
|
||||
emit_obj,
|
||||
emit_bc,
|
||||
@ -253,7 +251,6 @@ pub struct CompiledModule {
|
||||
pub llmod_id: String,
|
||||
pub object: PathBuf,
|
||||
pub kind: ModuleKind,
|
||||
pub symbol_name_hash: u64,
|
||||
pub pre_existing: bool,
|
||||
pub emit_obj: bool,
|
||||
pub emit_bc: bool,
|
||||
|
@ -108,14 +108,11 @@ use rustc::dep_graph::{DepNode, WorkProductId};
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::hir::map::DefPathData;
|
||||
use rustc::middle::trans::{Linkage, Visibility};
|
||||
use rustc::ich::Fingerprint;
|
||||
use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER;
|
||||
use rustc::ty::{self, TyCtxt, InstanceDef};
|
||||
use rustc::ty::item_path::characteristic_def_id_of_type;
|
||||
use rustc::util::nodemap::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::stable_hasher::StableHasher;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::hash::Hash;
|
||||
use syntax::ast::NodeId;
|
||||
use syntax::symbol::{Symbol, InternedString};
|
||||
use trans_item::{TransItem, TransItemExt, InstantiationMode};
|
||||
@ -155,19 +152,6 @@ pub trait CodegenUnitExt<'tcx> {
|
||||
self.work_product_id().to_dep_node()
|
||||
}
|
||||
|
||||
fn compute_symbol_name_hash<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> u64 {
|
||||
let mut state: StableHasher<Fingerprint> = StableHasher::new();
|
||||
let all_items = self.items_in_deterministic_order(tcx);
|
||||
for (item, (linkage, visibility)) in all_items {
|
||||
let symbol_name = item.symbol_name(tcx);
|
||||
symbol_name.len().hash(&mut state);
|
||||
symbol_name.hash(&mut state);
|
||||
linkage.hash(&mut state);
|
||||
visibility.hash(&mut state);
|
||||
}
|
||||
state.finish().to_smaller_hash()
|
||||
}
|
||||
|
||||
fn items_in_deterministic_order<'a>(&self,
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>)
|
||||
-> Vec<(TransItem<'tcx>,
|
||||
|
@ -14,7 +14,7 @@
|
||||
//! We walk the set of items and, for each member, generate new constraints.
|
||||
|
||||
use hir::def_id::DefId;
|
||||
use rustc::dep_graph::{DepGraphSafe, DepKind};
|
||||
use rustc::dep_graph::{DepGraphSafe, DepKind, DepNodeColor};
|
||||
use rustc::ich::StableHashingContext;
|
||||
use rustc::ty::subst::Substs;
|
||||
use rustc::ty::{self, Ty, TyCtxt};
|
||||
@ -162,10 +162,22 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
|
||||
// See README.md for a detailed discussion
|
||||
// on dep-graph management.
|
||||
let dep_node = def_id.to_dep_node(tcx, DepKind::ItemVarianceConstraints);
|
||||
tcx.dep_graph.with_task(dep_node,
|
||||
self,
|
||||
def_id,
|
||||
visit_item_task);
|
||||
|
||||
if let Some(DepNodeColor::Green(_)) = tcx.dep_graph.node_color(&dep_node) {
|
||||
// If the corresponding node has already been marked as green, the
|
||||
// appropriate portion of the DepGraph has already been loaded from
|
||||
// the previous graph, so we don't do any dep-tracking. Since we
|
||||
// don't cache any values though, we still have to re-run the
|
||||
// computation.
|
||||
tcx.dep_graph.with_ignore(|| {
|
||||
self.build_constraints_for_item(def_id);
|
||||
});
|
||||
} else {
|
||||
tcx.dep_graph.with_task(dep_node,
|
||||
self,
|
||||
def_id,
|
||||
visit_item_task);
|
||||
}
|
||||
|
||||
fn visit_item_task<'a, 'tcx>(ccx: &mut ConstraintContext<'a, 'tcx>,
|
||||
def_id: DefId)
|
||||
|
@ -15,7 +15,6 @@
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// aux-build:point.rs
|
||||
// ignore-test FIXME(#42293) this regressed in #44142 but should get fixed with red/green
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
|
@ -20,6 +20,7 @@
|
||||
//[rpass1] rustc-env:RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER="l33t haxx0r rustc 2.1 LTS"
|
||||
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![rustc_partition_translated(module="cache_file_headers", cfg="rpass2")]
|
||||
|
@ -12,8 +12,6 @@
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags:-Z query-dep-graph
|
||||
|
||||
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
extern crate a;
|
||||
|
@ -15,8 +15,6 @@
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// aux-build:point.rs
|
||||
|
||||
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
#![allow(dead_code)]
|
||||
|
@ -15,8 +15,6 @@
|
||||
// compile-flags: -Z query-dep-graph
|
||||
// aux-build:point.rs
|
||||
|
||||
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
#![allow(dead_code)]
|
||||
|
@ -9,13 +9,13 @@
|
||||
// except according to those terms.
|
||||
|
||||
// revisions: rpass1 rpass2
|
||||
// compile-flags: -Zquery-dep-graph
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![allow(private_no_mangle_fns)]
|
||||
|
||||
#![rustc_partition_reused(module="change_symbol_export_status", cfg="rpass2")]
|
||||
#![rustc_partition_translated(module="change_symbol_export_status-mod1", cfg="rpass2")]
|
||||
|
||||
#![rustc_partition_reused(module="change_symbol_export_status-mod2", cfg="rpass2")]
|
||||
|
||||
// This test case makes sure that a change in symbol visibility is detected by
|
||||
// our dependency tracking. We do this by changing a module's visibility to
|
||||
@ -37,6 +37,11 @@ mod mod1 {
|
||||
pub fn foo() {}
|
||||
}
|
||||
|
||||
pub mod mod2 {
|
||||
#[no_mangle]
|
||||
pub fn bar() {}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
mod1::foo();
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
// the cache while changing an untracked one doesn't.
|
||||
|
||||
// revisions:rpass1 rpass2 rpass3
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
// equal example.
|
||||
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
#![rustc_partition_reused(module="issue_35593", cfg="rpass2")]
|
||||
|
@ -12,6 +12,8 @@
|
||||
// dep-node.
|
||||
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
|
@ -11,8 +11,6 @@
|
||||
// revisions:rpass1 rpass2 rpass3
|
||||
// compile-flags: -Z query-dep-graph -g -Zincremental-cc
|
||||
// aux-build:extern_crate.rs
|
||||
// ignore-test FIXME(#42293) this regressed in #44142 but should get fixed with red/green
|
||||
|
||||
|
||||
// This test case makes sure that we detect if paths emitted into debuginfo
|
||||
// are changed, even when the change happens in an external crate.
|
||||
|
@ -17,8 +17,7 @@
|
||||
#![feature(rustc_attrs)]
|
||||
#![crate_type = "bin"]
|
||||
|
||||
// FIXME(#42293) this regressed in #44142 but should get fixed with red/green
|
||||
// #![rustc_partition_reused(module="main", cfg="rpass2")]
|
||||
#![rustc_partition_reused(module="main", cfg="rpass2")]
|
||||
|
||||
extern crate a;
|
||||
|
||||
|
@ -18,8 +18,6 @@
|
||||
// no-prefer-dynamic
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
extern crate a;
|
||||
|
@ -13,6 +13,7 @@
|
||||
// `y` module entirely (but not the `x` module).
|
||||
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
|
@ -12,8 +12,6 @@
|
||||
// revisions:rpass1 rpass2
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
extern crate a;
|
||||
|
@ -12,8 +12,6 @@
|
||||
// revisions:rpass1 rpass2 rpass3
|
||||
// compile-flags: -Z query-dep-graph
|
||||
|
||||
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
|
||||
|
||||
#![feature(rustc_attrs)]
|
||||
|
||||
extern crate a;
|
||||
|
Loading…
Reference in New Issue
Block a user