mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-01 15:01:51 +00:00
rustc_query_system: explicitly register reused dep nodes
Register nodes that we've reused from the previous session explicitly with `OnDiskCache`. Previously, we relied on this happening as a side effect of accessing the nodes in the `PreviousDepGraph`. For the sake of performance and avoiding unintended side effects, register explictily.
This commit is contained in:
parent
eb4fc71dc9
commit
7795801902
@ -5,7 +5,7 @@ use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::thin_vec::ThinVec;
|
||||
use rustc_errors::Diagnostic;
|
||||
use rustc_hir::def_id::{DefPathHash, LocalDefId};
|
||||
use rustc_hir::def_id::LocalDefId;
|
||||
|
||||
mod dep_node;
|
||||
|
||||
@ -91,9 +91,9 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
|
||||
type DepKind = DepKind;
|
||||
type StableHashingContext = StableHashingContext<'tcx>;
|
||||
|
||||
fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
|
||||
fn register_reused_dep_node(&self, dep_node: &DepNode) {
|
||||
if let Some(cache) = self.queries.on_disk_cache.as_ref() {
|
||||
cache.register_reused_dep_path_hash(*self, hash)
|
||||
cache.register_reused_dep_node(*self, dep_node)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
|
||||
use crate::dep_graph::{DepNode, DepNodeIndex, SerializedDepNodeIndex};
|
||||
use crate::mir::interpret::{AllocDecodingSession, AllocDecodingState};
|
||||
use crate::mir::{self, interpret};
|
||||
use crate::ty::codec::{OpaqueEncoder, RefDecodable, TyDecoder, TyEncoder};
|
||||
@ -264,6 +264,13 @@ impl<'sess> OnDiskCache<'sess> {
|
||||
(file_to_file_index, file_index_to_stable_id)
|
||||
};
|
||||
|
||||
// Register any dep nodes that we reused from the previous session,
|
||||
// but didn't `DepNode::construct` in this session. This ensures
|
||||
// that their `DefPathHash` to `RawDefId` mappings are registered
|
||||
// in 'latest_foreign_def_path_hashes' if necessary, since that
|
||||
// normally happens in `DepNode::construct`.
|
||||
tcx.dep_graph.register_reused_dep_nodes(tcx);
|
||||
|
||||
// Load everything into memory so we can write it out to the on-disk
|
||||
// cache. The vast majority of cacheable query results should already
|
||||
// be in memory, so this should be a cheap operation.
|
||||
@ -467,7 +474,7 @@ impl<'sess> OnDiskCache<'sess> {
|
||||
.insert(hash, RawDefId { krate: def_id.krate.as_u32(), index: def_id.index.as_u32() });
|
||||
}
|
||||
|
||||
/// If the given `hash` still exists in the current compilation,
|
||||
/// If the given `dep_node`'s hash still exists in the current compilation,
|
||||
/// calls `store_foreign_def_id` with its current `DefId`.
|
||||
///
|
||||
/// Normally, `store_foreign_def_id_hash` can be called directly by
|
||||
@ -476,7 +483,15 @@ impl<'sess> OnDiskCache<'sess> {
|
||||
/// session, we only have the `DefPathHash` available. This method is used
|
||||
/// to that any `DepNode` that we re-use has a `DefPathHash` -> `RawId` written
|
||||
/// out for usage in the next compilation session.
|
||||
pub fn register_reused_dep_path_hash(&self, tcx: TyCtxt<'tcx>, hash: DefPathHash) {
|
||||
pub fn register_reused_dep_node(&self, tcx: TyCtxt<'tcx>, dep_node: &DepNode) {
|
||||
// For reused dep nodes, we only need to store the mapping if the node
|
||||
// is one whose query key we can reconstruct from the hash. We use the
|
||||
// mapping to aid that reconstruction in the next session. While we also
|
||||
// use it to decode `DefId`s we encoded in the cache as `DefPathHashes`,
|
||||
// they're already registered during `DefId` encoding.
|
||||
if dep_node.kind.can_reconstruct_query_key() {
|
||||
let hash = DefPathHash(dep_node.hash.into());
|
||||
|
||||
// We can't simply copy the `RawDefId` from `foreign_def_path_hashes` to
|
||||
// `latest_foreign_def_path_hashes`, since the `RawDefId` might have
|
||||
// changed in the current compilation session (e.g. we've added/removed crates,
|
||||
@ -485,6 +500,7 @@ impl<'sess> OnDiskCache<'sess> {
|
||||
self.store_foreign_def_id_hash(def_id, hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the cached query result if there is something in the cache for
|
||||
/// the given `SerializedDepNodeIndex`; otherwise returns `None`.
|
||||
|
@ -60,9 +60,8 @@ pub struct DepNode<K> {
|
||||
// * When a `DepNode::construct` is called, `arg.to_fingerprint()`
|
||||
// is responsible for calling `OnDiskCache::store_foreign_def_id_hash`
|
||||
// if needed
|
||||
// * When a `DepNode` is loaded from the `PreviousDepGraph`,
|
||||
// then `PreviousDepGraph::index_to_node` is responsible for calling
|
||||
// `tcx.register_reused_dep_path_hash`
|
||||
// * When we serialize the on-disk cache, `OnDiskCache::serialize` is
|
||||
// responsible for calling `DepGraph::register_reused_dep_nodes`.
|
||||
//
|
||||
// FIXME: Enforce this by preventing manual construction of `DefNode`
|
||||
// (e.g. add a `_priv: ()` field)
|
||||
|
@ -554,7 +554,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
// We never try to mark eval_always nodes as green
|
||||
debug_assert!(!dep_node.kind.is_eval_always());
|
||||
|
||||
data.previous.debug_assert_eq(prev_dep_node_index, *dep_node);
|
||||
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
|
||||
|
||||
let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
|
||||
|
||||
@ -572,7 +572,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
"try_mark_previous_green({:?}) --- found dependency {:?} to \
|
||||
be immediately green",
|
||||
dep_node,
|
||||
data.previous.debug_dep_node(dep_dep_node_index),
|
||||
data.previous.index_to_node(dep_dep_node_index)
|
||||
);
|
||||
current_deps.push(node_index);
|
||||
}
|
||||
@ -585,12 +585,12 @@ impl<K: DepKind> DepGraph<K> {
|
||||
"try_mark_previous_green({:?}) - END - dependency {:?} was \
|
||||
immediately red",
|
||||
dep_node,
|
||||
data.previous.debug_dep_node(dep_dep_node_index)
|
||||
data.previous.index_to_node(dep_dep_node_index)
|
||||
);
|
||||
return None;
|
||||
}
|
||||
None => {
|
||||
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index, tcx);
|
||||
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
|
||||
|
||||
// We don't know the state of this dependency. If it isn't
|
||||
// an eval_always node, let's try to mark it green recursively.
|
||||
@ -801,7 +801,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
for prev_index in data.colors.values.indices() {
|
||||
match data.colors.get(prev_index) {
|
||||
Some(DepNodeColor::Green(_)) => {
|
||||
let dep_node = data.previous.index_to_node(prev_index, tcx);
|
||||
let dep_node = data.previous.index_to_node(prev_index);
|
||||
tcx.try_load_from_on_disk_cache(&dep_node);
|
||||
}
|
||||
None | Some(DepNodeColor::Red) => {
|
||||
@ -813,6 +813,20 @@ impl<K: DepKind> DepGraph<K> {
|
||||
}
|
||||
}
|
||||
|
||||
// Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
|
||||
pub fn register_reused_dep_nodes<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
|
||||
let data = self.data.as_ref().unwrap();
|
||||
for prev_index in data.colors.values.indices() {
|
||||
match data.colors.get(prev_index) {
|
||||
Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => {
|
||||
let dep_node = data.previous.index_to_node(prev_index);
|
||||
tcx.register_reused_dep_node(&dep_node);
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn next_virtual_depnode_index(&self) -> DepNodeIndex {
|
||||
let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
|
||||
DepNodeIndex::from_u32(index)
|
||||
|
@ -15,7 +15,6 @@ use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::thin_vec::ThinVec;
|
||||
use rustc_errors::Diagnostic;
|
||||
use rustc_span::def_id::DefPathHash;
|
||||
|
||||
use std::fmt;
|
||||
use std::hash::Hash;
|
||||
@ -33,7 +32,7 @@ pub trait DepContext: Copy {
|
||||
/// Try to force a dep node to execute and see if it's green.
|
||||
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
|
||||
|
||||
fn register_reused_dep_path_hash(&self, hash: DefPathHash);
|
||||
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
|
||||
|
||||
/// Return whether the current session is tainted by errors.
|
||||
fn has_errors_or_delayed_span_bugs(&self) -> bool;
|
||||
|
@ -1,9 +1,7 @@
|
||||
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
|
||||
use super::{DepKind, DepNode};
|
||||
use crate::dep_graph::DepContext;
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_span::def_id::DefPathHash;
|
||||
|
||||
#[derive(Debug, Encodable, Decodable)]
|
||||
pub struct PreviousDepGraph<K: DepKind> {
|
||||
@ -33,44 +31,7 @@ impl<K: DepKind> PreviousDepGraph<K> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn index_to_node<CTX: DepContext<DepKind = K>>(
|
||||
&self,
|
||||
dep_node_index: SerializedDepNodeIndex,
|
||||
tcx: CTX,
|
||||
) -> DepNode<K> {
|
||||
let dep_node = self.data.nodes[dep_node_index];
|
||||
// We have just loaded a deserialized `DepNode` from the previous
|
||||
// compilation session into the current one. If this was a foreign `DefId`,
|
||||
// then we stored additional information in the incr comp cache when we
|
||||
// initially created its fingerprint (see `DepNodeParams::to_fingerprint`)
|
||||
// We won't be calling `to_fingerprint` again for this `DepNode` (we no longer
|
||||
// have the original value), so we need to copy over this additional information
|
||||
// from the old incremental cache into the new cache that we serialize
|
||||
// and the end of this compilation session.
|
||||
if dep_node.kind.can_reconstruct_query_key() {
|
||||
tcx.register_reused_dep_path_hash(DefPathHash(dep_node.hash.into()));
|
||||
}
|
||||
dep_node
|
||||
}
|
||||
|
||||
/// When debug assertions are enabled, asserts that the dep node at `dep_node_index` is equal to `dep_node`.
|
||||
/// This method should be preferred over manually calling `index_to_node`.
|
||||
/// Calls to `index_to_node` may affect global state, so gating a call
|
||||
/// to `index_to_node` on debug assertions could cause behavior changes when debug assertions
|
||||
/// are enabled.
|
||||
#[inline]
|
||||
pub fn debug_assert_eq(&self, dep_node_index: SerializedDepNodeIndex, dep_node: DepNode<K>) {
|
||||
debug_assert_eq!(self.data.nodes[dep_node_index], dep_node);
|
||||
}
|
||||
|
||||
/// Obtains a debug-printable version of the `DepNode`.
|
||||
/// See `debug_assert_eq` for why this should be preferred over manually
|
||||
/// calling `dep_node_index`
|
||||
pub fn debug_dep_node(&self, dep_node_index: SerializedDepNodeIndex) -> impl std::fmt::Debug {
|
||||
// We're returning the `DepNode` without calling `register_reused_dep_path_hash`,
|
||||
// but `impl Debug` return type means that it can only be used for debug printing.
|
||||
// So, there's no risk of calls trying to create new dep nodes that have this
|
||||
// node as a dependency
|
||||
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
|
||||
self.data.nodes[dep_node_index]
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user