Auto merge of #89978 - cjgillot:qarray, r=Mark-Simulacrum

Merge the two depkind vtables

Knowledge of `DepKind`s is managed using two arrays containing flags (is_anon, eval_always, fingerprint_style), and function pointers (forcing and loading code).

This PR aims at merging the two arrays so as to reduce unneeded indirect calls and (hopefully) increase code locality.
r? `@ghost`
This commit is contained in:
bors 2021-10-20 17:57:35 +00:00
commit efd0483949
22 changed files with 421 additions and 539 deletions

View File

@ -4319,7 +4319,6 @@ dependencies = [
"rustc_serialize", "rustc_serialize",
"rustc_session", "rustc_session",
"rustc_span", "rustc_span",
"tracing",
] ]
[[package]] [[package]]

View File

@ -224,7 +224,7 @@ pub(crate) fn run_aot(
tcx, tcx,
(backend_config.clone(), cgu.name()), (backend_config.clone(), cgu.name()),
module_codegen, module_codegen,
rustc_middle::dep_graph::hash_result, Some(rustc_middle::dep_graph::hash_result),
); );
if let Some((id, product)) = work_product { if let Some((id, product)) = work_product {

View File

@ -59,7 +59,13 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
let start_time = Instant::now(); let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx); let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result); let (module, _) = tcx.dep_graph.with_task(
dep_node,
tcx,
cgu_name,
module_codegen,
Some(dep_graph::hash_result),
);
let time_to_codegen = start_time.elapsed(); let time_to_codegen = start_time.elapsed();
drop(prof_timer); drop(prof_timer);

View File

@ -113,8 +113,13 @@ pub fn compile_codegen_unit(
let start_time = Instant::now(); let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx); let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let (module, _) = let (module, _) = tcx.dep_graph.with_task(
tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result); dep_node,
tcx,
cgu_name,
module_codegen,
Some(dep_graph::hash_result),
);
let time_to_codegen = start_time.elapsed(); let time_to_codegen = start_time.elapsed();
// We assume that the cost to run LLVM on a CGU is proportional to // We assume that the cost to run LLVM on a CGU is proportional to

View File

@ -126,30 +126,36 @@ impl IfThisChanged<'tcx> {
if attr.has_name(sym::rustc_if_this_changed) { if attr.has_name(sym::rustc_if_this_changed) {
let dep_node_interned = self.argument(attr); let dep_node_interned = self.argument(attr);
let dep_node = match dep_node_interned { let dep_node = match dep_node_interned {
None => DepNode::from_def_path_hash(def_path_hash, DepKind::hir_owner), None => {
Some(n) => match DepNode::from_label_string(&n.as_str(), def_path_hash) { DepNode::from_def_path_hash(self.tcx, def_path_hash, DepKind::hir_owner)
Ok(n) => n, }
Err(()) => { Some(n) => {
self.tcx.sess.span_fatal( match DepNode::from_label_string(self.tcx, &n.as_str(), def_path_hash) {
attr.span, Ok(n) => n,
&format!("unrecognized DepNode variant {:?}", n), Err(()) => {
); self.tcx.sess.span_fatal(
attr.span,
&format!("unrecognized DepNode variant {:?}", n),
);
}
} }
}, }
}; };
self.if_this_changed.push((attr.span, def_id.to_def_id(), dep_node)); self.if_this_changed.push((attr.span, def_id.to_def_id(), dep_node));
} else if attr.has_name(sym::rustc_then_this_would_need) { } else if attr.has_name(sym::rustc_then_this_would_need) {
let dep_node_interned = self.argument(attr); let dep_node_interned = self.argument(attr);
let dep_node = match dep_node_interned { let dep_node = match dep_node_interned {
Some(n) => match DepNode::from_label_string(&n.as_str(), def_path_hash) { Some(n) => {
Ok(n) => n, match DepNode::from_label_string(self.tcx, &n.as_str(), def_path_hash) {
Err(()) => { Ok(n) => n,
self.tcx.sess.span_fatal( Err(()) => {
attr.span, self.tcx.sess.span_fatal(
&format!("unrecognized DepNode variant {:?}", n), attr.span,
); &format!("unrecognized DepNode variant {:?}", n),
);
}
} }
}, }
None => { None => {
self.tcx.sess.span_fatal(attr.span, "missing DepNode variant"); self.tcx.sess.span_fatal(attr.span, "missing DepNode variant");
} }

View File

@ -15,7 +15,7 @@
use rustc_ast::{self as ast, Attribute, NestedMetaItem}; use rustc_ast::{self as ast, Attribute, NestedMetaItem};
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit; use rustc_hir::intravisit;
use rustc_hir::itemlikevisit::ItemLikeVisitor; use rustc_hir::itemlikevisit::ItemLikeVisitor;
use rustc_hir::Node as HirNode; use rustc_hir::Node as HirNode;
@ -302,18 +302,6 @@ impl DirtyCleanVisitor<'tcx> {
out out
} }
fn dep_nodes<'l>(
&self,
labels: &'l Labels,
def_id: DefId,
) -> impl Iterator<Item = DepNode> + 'l {
let def_path_hash = self.tcx.def_path_hash(def_id);
labels.iter().map(move |label| match DepNode::from_label_string(label, def_path_hash) {
Ok(dep_node) => dep_node,
Err(()) => unreachable!("label: {}", label),
})
}
fn dep_node_str(&self, dep_node: &DepNode) -> String { fn dep_node_str(&self, dep_node: &DepNode) -> String {
if let Some(def_id) = dep_node.extract_def_id(self.tcx) { if let Some(def_id) = dep_node.extract_def_id(self.tcx) {
format!("{:?}({})", dep_node.kind, self.tcx.def_path_str(def_id)) format!("{:?}({})", dep_node.kind, self.tcx.def_path_str(def_id))
@ -345,16 +333,19 @@ impl DirtyCleanVisitor<'tcx> {
} }
fn check_item(&mut self, item_id: LocalDefId, item_span: Span) { fn check_item(&mut self, item_id: LocalDefId, item_span: Span) {
let def_path_hash = self.tcx.def_path_hash(item_id.to_def_id());
for attr in self.tcx.get_attrs(item_id.to_def_id()).iter() { for attr in self.tcx.get_attrs(item_id.to_def_id()).iter() {
let assertion = match self.assertion_maybe(item_id, attr) { let assertion = match self.assertion_maybe(item_id, attr) {
Some(a) => a, Some(a) => a,
None => continue, None => continue,
}; };
self.checked_attrs.insert(attr.id); self.checked_attrs.insert(attr.id);
for dep_node in self.dep_nodes(&assertion.clean, item_id.to_def_id()) { for label in assertion.clean {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_clean(item_span, dep_node); self.assert_clean(item_span, dep_node);
} }
for dep_node in self.dep_nodes(&assertion.dirty, item_id.to_def_id()) { for label in assertion.dirty {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_dirty(item_span, dep_node); self.assert_dirty(item_span, dep_node);
} }
} }

View File

@ -838,6 +838,7 @@ pub fn create_global_ctxt<'tcx>(
dep_graph, dep_graph,
queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn), queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn),
queries.as_dyn(), queries.as_dyn(),
rustc_query_impl::query_callbacks(arena),
crate_name, crate_name,
outputs, outputs,
) )

View File

@ -100,6 +100,8 @@ macro_rules! arena_types {
// This is used to decode the &'tcx [Span] for InlineAsm's line_spans. // This is used to decode the &'tcx [Span] for InlineAsm's line_spans.
[decode] span: rustc_span::Span, [decode] span: rustc_span::Span,
[decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>, [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
[] dep_kind: rustc_middle::dep_graph::DepKindStruct,
], $tcx); ], $tcx);
) )
} }

View File

@ -75,147 +75,73 @@ pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual /// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
/// jump table instead of large matches. /// jump table instead of large matches.
pub struct DepKindStruct { pub struct DepKindStruct {
/// Whether the DepNode has parameters (query keys).
pub(super) has_params: bool,
/// Anonymous queries cannot be replayed from one compiler invocation to the next. /// Anonymous queries cannot be replayed from one compiler invocation to the next.
/// When their result is needed, it is recomputed. They are useful for fine-grained /// When their result is needed, it is recomputed. They are useful for fine-grained
/// dependency tracking, and caching within one compiler invocation. /// dependency tracking, and caching within one compiler invocation.
pub(super) is_anon: bool, pub is_anon: bool,
/// Eval-always queries do not track their dependencies, and are always recomputed, even if /// Eval-always queries do not track their dependencies, and are always recomputed, even if
/// their inputs have not changed since the last compiler invocation. The result is still /// their inputs have not changed since the last compiler invocation. The result is still
/// cached within one compiler invocation. /// cached within one compiler invocation.
pub(super) is_eval_always: bool, pub is_eval_always: bool,
/// Whether the query key can be recovered from the hashed fingerprint. /// Whether the query key can be recovered from the hashed fingerprint.
/// See [DepNodeParams] trait for the behaviour of each key type. /// See [DepNodeParams] trait for the behaviour of each key type.
// FIXME: Make this a simple boolean once DepNodeParams::fingerprint_style pub fingerprint_style: FingerprintStyle,
// can be made a specialized associated const.
fingerprint_style: fn() -> FingerprintStyle,
}
impl std::ops::Deref for DepKind { /// The red/green evaluation system will try to mark a specific DepNode in the
type Target = DepKindStruct; /// dependency graph as green by recursively trying to mark the dependencies of
fn deref(&self) -> &DepKindStruct { /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
&DEP_KINDS[*self as usize] /// where we don't know if it is red or green and we therefore actually have
} /// to recompute its value in order to find out. Since the only piece of
/// information that we have at that point is the `DepNode` we are trying to
/// re-evaluate, we need some way to re-run a query from just that. This is what
/// `force_from_dep_node()` implements.
///
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
/// is usually constructed by computing a stable hash of the query-key that the
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
/// back from hash to query-key (since hash functions are not reversible). For
/// this reason `force_from_dep_node()` is expected to fail from time to time
/// because we just cannot find out, from the `DepNode` alone, what the
/// corresponding query-key is and therefore cannot re-run the query.
///
/// The system deals with this case letting `try_mark_green` fail which forces
/// the root query to be re-evaluated.
///
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
/// Fortunately, we can use some contextual information that will allow us to
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
/// valid `DefPathHash`. Since we also always build a huge table that maps every
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
/// everything we need to re-run the query.
///
/// Take the `mir_promoted` query as an example. Like many other queries, it
/// just has a single parameter: the `DefId` of the item it will compute the
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
/// `DefId` in `tcx.def_path_hash_to_def_id`.
pub force_from_dep_node: Option<fn(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool>,
/// Invoke a query to put the on-disk cached value in memory.
pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'_>, DepNode)>,
} }
impl DepKind { impl DepKind {
#[inline(always)] #[inline(always)]
pub fn fingerprint_style(&self) -> FingerprintStyle { pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle {
// Only fetch the DepKindStruct once. // Only fetch the DepKindStruct once.
let data: &DepKindStruct = &**self; let data = tcx.query_kind(self);
if data.is_anon { if data.is_anon {
return FingerprintStyle::Opaque; return FingerprintStyle::Opaque;
} }
data.fingerprint_style
(data.fingerprint_style)()
} }
} }
// erase!() just makes tokens go away. It's used to specify which macro argument
// is repeated (i.e., which sub-expression of the macro we are in) but don't need
// to actually use any of the arguments.
macro_rules! erase {
($x:tt) => {{}};
}
macro_rules! is_anon_attr {
(anon) => {
true
};
($attr:ident) => {
false
};
}
macro_rules! is_eval_always_attr {
(eval_always) => {
true
};
($attr:ident) => {
false
};
}
macro_rules! contains_anon_attr {
($(($attr:ident $($attr_args:tt)* )),*) => ({$(is_anon_attr!($attr) | )* false});
}
macro_rules! contains_eval_always_attr {
($(($attr:ident $($attr_args:tt)* )),*) => ({$(is_eval_always_attr!($attr) | )* false});
}
#[allow(non_upper_case_globals)]
pub mod dep_kind {
use super::*;
use crate::ty::query::query_keys;
use rustc_query_system::dep_graph::FingerprintStyle;
// We use this for most things when incr. comp. is turned off.
pub const Null: DepKindStruct = DepKindStruct {
has_params: false,
is_anon: false,
is_eval_always: false,
fingerprint_style: || FingerprintStyle::Unit,
};
pub const TraitSelect: DepKindStruct = DepKindStruct {
has_params: false,
is_anon: true,
is_eval_always: false,
fingerprint_style: || FingerprintStyle::Unit,
};
pub const CompileCodegenUnit: DepKindStruct = DepKindStruct {
has_params: true,
is_anon: false,
is_eval_always: false,
fingerprint_style: || FingerprintStyle::Opaque,
};
pub const CompileMonoItem: DepKindStruct = DepKindStruct {
has_params: true,
is_anon: false,
is_eval_always: false,
fingerprint_style: || FingerprintStyle::Opaque,
};
macro_rules! define_query_dep_kinds {
($(
[$($attrs:tt)*]
$variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
,)*) => (
$(pub const $variant: DepKindStruct = {
const has_params: bool = $({ erase!($tuple_arg_ty); true } |)* false;
const is_anon: bool = contains_anon_attr!($($attrs)*);
const is_eval_always: bool = contains_eval_always_attr!($($attrs)*);
#[inline(always)]
fn fingerprint_style() -> rustc_query_system::dep_graph::FingerprintStyle {
<query_keys::$variant<'_> as DepNodeParams<TyCtxt<'_>>>
::fingerprint_style()
}
DepKindStruct {
has_params,
is_anon,
is_eval_always,
fingerprint_style,
}
};)*
);
}
rustc_dep_node_append!([define_query_dep_kinds!][]);
}
macro_rules! define_dep_nodes { macro_rules! define_dep_nodes {
(<$tcx:tt> (<$tcx:tt>
$( $(
@ -225,12 +151,10 @@ macro_rules! define_dep_nodes {
) => ( ) => (
#[macro_export] #[macro_export]
macro_rules! make_dep_kind_array { macro_rules! make_dep_kind_array {
($mod:ident) => {[ $(($mod::$variant),)* ]}; ($mod:ident) => {[ $($mod::$variant()),* ]};
} }
static DEP_KINDS: &[DepKindStruct] = &make_dep_kind_array!(dep_kind); /// This enum serves as an index into arrays built by `make_dep_kind_array`.
/// This enum serves as an index into the `DEP_KINDS` array.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
pub enum DepKind { pub enum DepKind {
@ -296,7 +220,7 @@ pub trait DepNodeExt: Sized {
/// Construct a DepNode from the given DepKind and DefPathHash. This /// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a /// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter. /// single DefId/DefPathHash parameter.
fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> Self; fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self;
/// Extracts the DefId corresponding to this DepNode. This will work /// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met: /// if two conditions are met:
@ -311,7 +235,11 @@ pub trait DepNodeExt: Sized {
fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId>; fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId>;
/// Used in testing /// Used in testing
fn from_label_string(label: &str, def_path_hash: DefPathHash) -> Result<Self, ()>; fn from_label_string(
tcx: TyCtxt<'_>,
label: &str,
def_path_hash: DefPathHash,
) -> Result<Self, ()>;
/// Used in testing /// Used in testing
fn has_label_string(label: &str) -> bool; fn has_label_string(label: &str) -> bool;
@ -321,8 +249,8 @@ impl DepNodeExt for DepNode {
/// Construct a DepNode from the given DepKind and DefPathHash. This /// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a /// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter. /// single DefId/DefPathHash parameter.
fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> DepNode { fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
debug_assert!(kind.fingerprint_style() == FingerprintStyle::DefPathHash); debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash);
DepNode { kind, hash: def_path_hash.0.into() } DepNode { kind, hash: def_path_hash.0.into() }
} }
@ -337,31 +265,27 @@ impl DepNodeExt for DepNode {
/// refers to something from the previous compilation session that /// refers to something from the previous compilation session that
/// has been removed. /// has been removed.
fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> { fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
if self.kind.fingerprint_style() == FingerprintStyle::DefPathHash { if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash {
Some( Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into())))
tcx.on_disk_cache
.as_ref()?
.def_path_hash_to_def_id(tcx, DefPathHash(self.hash.into())),
)
} else { } else {
None None
} }
} }
/// Used in testing /// Used in testing
fn from_label_string(label: &str, def_path_hash: DefPathHash) -> Result<DepNode, ()> { fn from_label_string(
tcx: TyCtxt<'_>,
label: &str,
def_path_hash: DefPathHash,
) -> Result<DepNode, ()> {
let kind = dep_kind_from_label_string(label)?; let kind = dep_kind_from_label_string(label)?;
match kind.fingerprint_style() { match kind.fingerprint_style(tcx) {
FingerprintStyle::Opaque => Err(()), FingerprintStyle::Opaque => Err(()),
FingerprintStyle::Unit => { FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)),
if !kind.has_params { FingerprintStyle::DefPathHash => {
Ok(DepNode::new_no_params(kind)) Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind))
} else {
Err(())
}
} }
FingerprintStyle::DefPathHash => Ok(DepNode::from_def_path_hash(def_path_hash, kind)),
} }
} }
@ -377,10 +301,12 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for () {
FingerprintStyle::Unit FingerprintStyle::Unit
} }
#[inline(always)]
fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint { fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint {
Fingerprint::ZERO Fingerprint::ZERO
} }
#[inline(always)]
fn recover(_: TyCtxt<'tcx>, _: &DepNode) -> Option<Self> { fn recover(_: TyCtxt<'tcx>, _: &DepNode) -> Option<Self> {
Some(()) Some(())
} }
@ -392,14 +318,17 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for DefId {
FingerprintStyle::DefPathHash FingerprintStyle::DefPathHash
} }
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
tcx.def_path_hash(*self).0 tcx.def_path_hash(*self).0
} }
#[inline(always)]
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String { fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
tcx.def_path_str(*self) tcx.def_path_str(*self)
} }
#[inline(always)]
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> { fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
dep_node.extract_def_id(tcx) dep_node.extract_def_id(tcx)
} }
@ -411,14 +340,17 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
FingerprintStyle::DefPathHash FingerprintStyle::DefPathHash
} }
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
self.to_def_id().to_fingerprint(tcx) self.to_def_id().to_fingerprint(tcx)
} }
#[inline(always)]
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String { fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
self.to_def_id().to_debug_str(tcx) self.to_def_id().to_debug_str(tcx)
} }
#[inline(always)]
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> { fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
dep_node.extract_def_id(tcx).map(|id| id.expect_local()) dep_node.extract_def_id(tcx).map(|id| id.expect_local())
} }
@ -430,15 +362,18 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
FingerprintStyle::DefPathHash FingerprintStyle::DefPathHash
} }
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX }; let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX };
def_id.to_fingerprint(tcx) def_id.to_fingerprint(tcx)
} }
#[inline(always)]
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String { fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
tcx.crate_name(*self).to_string() tcx.crate_name(*self).to_string()
} }
#[inline(always)]
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> { fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
dep_node.extract_def_id(tcx).map(|id| id.krate) dep_node.extract_def_id(tcx).map(|id| id.krate)
} }
@ -453,6 +388,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
// We actually would not need to specialize the implementation of this // We actually would not need to specialize the implementation of this
// method but it's faster to combine the hashes than to instantiate a full // method but it's faster to combine the hashes than to instantiate a full
// hashing context and stable-hashing state. // hashing context and stable-hashing state.
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let (def_id_0, def_id_1) = *self; let (def_id_0, def_id_1) = *self;
@ -462,6 +398,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
def_path_hash_0.0.combine(def_path_hash_1.0) def_path_hash_0.0.combine(def_path_hash_1.0)
} }
#[inline(always)]
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String { fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
let (def_id_0, def_id_1) = *self; let (def_id_0, def_id_1) = *self;
@ -478,6 +415,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
// We actually would not need to specialize the implementation of this // We actually would not need to specialize the implementation of this
// method but it's faster to combine the hashes than to instantiate a full // method but it's faster to combine the hashes than to instantiate a full
// hashing context and stable-hashing state. // hashing context and stable-hashing state.
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let HirId { owner, local_id } = *self; let HirId { owner, local_id } = *self;

View File

@ -12,7 +12,7 @@ pub use rustc_query_system::dep_graph::{
SerializedDepNodeIndex, WorkProduct, WorkProductId, SerializedDepNodeIndex, WorkProduct, WorkProductId,
}; };
pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt}; pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt};
crate use dep_node::{make_compile_codegen_unit, make_compile_mono_item}; crate use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>; pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
@ -24,29 +24,8 @@ pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
impl rustc_query_system::dep_graph::DepKind for DepKind { impl rustc_query_system::dep_graph::DepKind for DepKind {
const NULL: Self = DepKind::Null; const NULL: Self = DepKind::Null;
#[inline(always)]
fn fingerprint_style(&self) -> rustc_query_system::dep_graph::FingerprintStyle {
DepKind::fingerprint_style(self)
}
#[inline(always)]
fn is_eval_always(&self) -> bool {
self.is_eval_always
}
#[inline(always)]
fn has_params(&self) -> bool {
self.has_params
}
fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", node.kind)?; write!(f, "{:?}(", node.kind)?;
if !node.kind.has_params && !node.kind.is_anon {
return Ok(());
}
write!(f, "(")?;
ty::tls::with_opt(|opt_tcx| { ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx { if let Some(tcx) = opt_tcx {
@ -110,4 +89,51 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
fn sess(&self) -> &Session { fn sess(&self) -> &Session {
self.sess self.sess
} }
#[inline(always)]
fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle {
kind.fingerprint_style(*self)
}
#[inline(always)]
fn is_eval_always(&self, kind: DepKind) -> bool {
self.query_kind(kind).is_eval_always
}
fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool {
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);
let cb = self.query_kind(dep_node.kind);
if let Some(f) = cb.force_from_dep_node {
f(*self, dep_node);
true
} else {
false
}
}
fn try_load_from_on_disk_cache(&self, dep_node: DepNode) {
let cb = self.query_kind(dep_node.kind);
if let Some(f) = cb.try_load_from_on_disk_cache {
f(*self, dep_node)
}
}
} }

View File

@ -1,7 +1,7 @@
//! Type context book-keeping. //! Type context book-keeping.
use crate::arena::Arena; use crate::arena::Arena;
use crate::dep_graph::DepGraph; use crate::dep_graph::{DepGraph, DepKind, DepKindStruct};
use crate::hir::place::Place as HirPlace; use crate::hir::place::Place as HirPlace;
use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos}; use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
use crate::lint::{struct_lint_level, LintDiagnosticBuilder, LintLevelSource}; use crate::lint::{struct_lint_level, LintDiagnosticBuilder, LintLevelSource};
@ -79,11 +79,6 @@ pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
where where
Self: Sized; Self: Sized;
/// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
/// session, if it still exists. This is used during incremental compilation to
/// turn a deserialized `DefPathHash` into its current `DefId`.
fn def_path_hash_to_def_id(&self, tcx: TyCtxt<'tcx>, def_path_hash: DefPathHash) -> DefId;
fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>); fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>);
fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: &mut FileEncoder) -> FileEncodeResult; fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: &mut FileEncoder) -> FileEncodeResult;
@ -1016,6 +1011,7 @@ pub struct GlobalCtxt<'tcx> {
pub queries: &'tcx dyn query::QueryEngine<'tcx>, pub queries: &'tcx dyn query::QueryEngine<'tcx>,
pub query_caches: query::QueryCaches<'tcx>, pub query_caches: query::QueryCaches<'tcx>,
query_kinds: &'tcx [DepKindStruct],
// Internal caches for metadata decoding. No need to track deps on this. // Internal caches for metadata decoding. No need to track deps on this.
pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>, pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
@ -1149,6 +1145,7 @@ impl<'tcx> TyCtxt<'tcx> {
dep_graph: DepGraph, dep_graph: DepGraph,
on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>, on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
queries: &'tcx dyn query::QueryEngine<'tcx>, queries: &'tcx dyn query::QueryEngine<'tcx>,
query_kinds: &'tcx [DepKindStruct],
crate_name: &str, crate_name: &str,
output_filenames: OutputFilenames, output_filenames: OutputFilenames,
) -> GlobalCtxt<'tcx> { ) -> GlobalCtxt<'tcx> {
@ -1175,6 +1172,7 @@ impl<'tcx> TyCtxt<'tcx> {
on_disk_cache, on_disk_cache,
queries, queries,
query_caches: query::QueryCaches::default(), query_caches: query::QueryCaches::default(),
query_kinds,
ty_rcache: Default::default(), ty_rcache: Default::default(),
pred_rcache: Default::default(), pred_rcache: Default::default(),
selection_cache: Default::default(), selection_cache: Default::default(),
@ -1188,6 +1186,10 @@ impl<'tcx> TyCtxt<'tcx> {
} }
} }
crate fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct {
&self.query_kinds[k as usize]
}
/// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used. /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
#[track_caller] #[track_caller]
pub fn ty_error(self) -> Ty<'tcx> { pub fn ty_error(self) -> Ty<'tcx> {
@ -1301,6 +1303,27 @@ impl<'tcx> TyCtxt<'tcx> {
} }
} }
/// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
/// session, if it still exists. This is used during incremental compilation to
/// turn a deserialized `DefPathHash` into its current `DefId`.
pub fn def_path_hash_to_def_id(self, hash: DefPathHash) -> DefId {
debug!("def_path_hash_to_def_id({:?})", hash);
let stable_crate_id = hash.stable_crate_id();
// If this is a DefPathHash from the local crate, we can look up the
// DefId in the tcx's `Definitions`.
if stable_crate_id == self.sess.local_stable_crate_id() {
self.untracked_resolutions.definitions.local_def_path_hash_to_def_id(hash).to_def_id()
} else {
// If this is a DefPathHash from an upstream crate, let the CrateStore map
// it to a DefId.
let cstore = &self.untracked_resolutions.cstore;
let cnum = cstore.stable_crate_id_to_crate_num(stable_crate_id);
cstore.def_path_hash_to_def_id(cnum, hash)
}
}
pub fn def_path_debug_str(self, def_id: DefId) -> String { pub fn def_path_debug_str(self, def_id: DefId) -> String {
// We are explicitly not going through queries here in order to get // We are explicitly not going through queries here in order to get
// crate name and stable crate id since this code is called from debug!() // crate name and stable crate id since this code is called from debug!()

View File

@ -102,6 +102,10 @@ impl TyCtxt<'tcx> {
} }
} }
/// Helper for `TyCtxtEnsure` to avoid a closure.
#[inline(always)]
fn noop<T>(_: &T) {}
macro_rules! query_helper_param_ty { macro_rules! query_helper_param_ty {
(DefId) => { impl IntoQueryParam<DefId> }; (DefId) => { impl IntoQueryParam<DefId> };
($K:ty) => { $K }; ($K:ty) => { $K };
@ -165,7 +169,7 @@ macro_rules! define_callbacks {
#[inline(always)] #[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) { pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
let key = key.into_query_param(); let key = key.into_query_param();
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |_| {}); let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, noop);
let lookup = match cached { let lookup = match cached {
Ok(()) => return, Ok(()) => return,
@ -192,9 +196,7 @@ macro_rules! define_callbacks {
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx> pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
{ {
let key = key.into_query_param(); let key = key.into_query_param();
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |value| { let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, Clone::clone);
value.clone()
});
let lookup = match cached { let lookup = match cached {
Ok(value) => return value, Ok(value) => return value,

View File

@ -9,7 +9,6 @@ doctest = false
[dependencies] [dependencies]
measureme = "10.0.0" measureme = "10.0.0"
rustc-rayon-core = "0.3.1" rustc-rayon-core = "0.3.1"
tracing = "0.1"
rustc_ast = { path = "../rustc_ast" } rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" } rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" } rustc_errors = { path = "../rustc_errors" }

View File

@ -13,13 +13,12 @@
extern crate rustc_macros; extern crate rustc_macros;
#[macro_use] #[macro_use]
extern crate rustc_middle; extern crate rustc_middle;
#[macro_use]
extern crate tracing;
use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_errors::DiagnosticBuilder; use rustc_errors::DiagnosticBuilder;
use rustc_middle::dep_graph; use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::{self, DepKindStruct};
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values}; use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
use rustc_middle::ty::query::{Providers, QueryEngine}; use rustc_middle::ty::query::{Providers, QueryEngine};
use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
@ -29,7 +28,6 @@ use rustc_span::Span;
#[macro_use] #[macro_use]
mod plumbing; mod plumbing;
pub use plumbing::QueryCtxt; pub use plumbing::QueryCtxt;
use plumbing::QueryStruct;
use rustc_query_system::query::*; use rustc_query_system::query::*;
mod stats; mod stats;

View File

@ -219,7 +219,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
// Do this *before* we clone 'latest_foreign_def_path_hashes', since // Do this *before* we clone 'latest_foreign_def_path_hashes', since
// loading existing queries may cause us to create new DepNodes, which // loading existing queries may cause us to create new DepNodes, which
// may in turn end up invoking `store_foreign_def_id_hash` // may in turn end up invoking `store_foreign_def_id_hash`
tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx)); tcx.dep_graph.exec_cache_promotions(tcx);
*self.serialized_data.write() = None; *self.serialized_data.write() = None;
} }
@ -358,23 +358,6 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
Ok(()) Ok(())
}) })
} }
fn def_path_hash_to_def_id(&self, tcx: TyCtxt<'tcx>, hash: DefPathHash) -> DefId {
debug!("def_path_hash_to_def_id({:?})", hash);
let stable_crate_id = hash.stable_crate_id();
// If this is a DefPathHash from the local crate, we can look up the
// DefId in the tcx's `Definitions`.
if stable_crate_id == tcx.sess.local_stable_crate_id() {
tcx.definitions_untracked().local_def_path_hash_to_def_id(hash).to_def_id()
} else {
// If this is a DefPathHash from an upstream crate, let the CrateStore map
// it to a DefId.
let cnum = tcx.cstore_untracked().stable_crate_id_to_crate_num(stable_crate_id);
tcx.cstore_untracked().def_path_hash_to_def_id(cnum, hash)
}
}
} }
impl<'sess> OnDiskCache<'sess> { impl<'sess> OnDiskCache<'sess> {
@ -764,7 +747,7 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
// If we get to this point, then all of the query inputs were green, // If we get to this point, then all of the query inputs were green,
// which means that the definition with this hash is guaranteed to // which means that the definition with this hash is guaranteed to
// still exist in the current compilation session. // still exist in the current compilation session.
Ok(d.tcx().on_disk_cache.as_ref().unwrap().def_path_hash_to_def_id(d.tcx(), def_path_hash)) Ok(d.tcx().def_path_hash_to_def_id(def_path_hash))
} }
} }

View File

@ -3,7 +3,7 @@
//! manage the caches, and so forth. //! manage the caches, and so forth.
use crate::{on_disk_cache, queries, Queries}; use crate::{on_disk_cache, queries, Queries};
use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex}; use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::tls::{self, ImplicitCtxt};
use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
use rustc_query_system::dep_graph::HasDepContext; use rustc_query_system::dep_graph::HasDepContext;
@ -53,36 +53,6 @@ impl QueryContext for QueryCtxt<'tcx> {
self.queries.try_collect_active_jobs(**self) self.queries.try_collect_active_jobs(**self)
} }
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
(cb.try_load_from_on_disk_cache)(*self, dep_node)
}
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
(cb.force_from_dep_node)(*self, dep_node)
}
// Interactions with on_disk_cache // Interactions with on_disk_cache
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects { fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
self.queries self.queries
@ -193,60 +163,6 @@ impl<'tcx> QueryCtxt<'tcx> {
} }
} }
/// This struct stores metadata about each Query.
///
/// Information is retrieved by indexing the `QUERIES` array using the integer value
/// of the `DepKind`. Overall, this allows to implement `QueryContext` using this manual
/// jump table instead of large matches.
pub struct QueryStruct {
/// The red/green evaluation system will try to mark a specific DepNode in the
/// dependency graph as green by recursively trying to mark the dependencies of
/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
/// where we don't know if it is red or green and we therefore actually have
/// to recompute its value in order to find out. Since the only piece of
/// information that we have at that point is the `DepNode` we are trying to
/// re-evaluate, we need some way to re-run a query from just that. This is what
/// `force_from_dep_node()` implements.
///
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
/// is usually constructed by computing a stable hash of the query-key that the
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
/// back from hash to query-key (since hash functions are not reversible). For
/// this reason `force_from_dep_node()` is expected to fail from time to time
/// because we just cannot find out, from the `DepNode` alone, what the
/// corresponding query-key is and therefore cannot re-run the query.
///
/// The system deals with this case letting `try_mark_green` fail which forces
/// the root query to be re-evaluated.
///
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
/// Fortunately, we can use some contextual information that will allow us to
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
/// valid `DefPathHash`. Since we also always build a huge table that maps every
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
/// everything we need to re-run the query.
///
/// Take the `mir_promoted` query as an example. Like many other queries, it
/// just has a single parameter: the `DefId` of the item it will compute the
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
/// `DefId` in `tcx.def_path_hash_to_def_id`.
///
/// When you implement a new query, it will likely have a corresponding new
/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
/// add it to the "We don't have enough information to reconstruct..." group in
/// the match below.
pub(crate) force_from_dep_node: fn(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool,
/// Invoke a query to put the on-disk cached value in memory.
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),
}
macro_rules! handle_cycle_error { macro_rules! handle_cycle_error {
([][$tcx: expr, $error:expr]) => {{ ([][$tcx: expr, $error:expr]) => {{
$error.emit(); $error.emit();
@ -291,14 +207,14 @@ macro_rules! is_eval_always {
} }
macro_rules! hash_result { macro_rules! hash_result {
([][$hcx:expr, $result:expr]) => {{ ([]) => {{
dep_graph::hash_result($hcx, &$result) Some(dep_graph::hash_result)
}}; }};
([(no_hash) $($rest:tt)*][$hcx:expr, $result:expr]) => {{ ([(no_hash) $($rest:tt)*]) => {{
None None
}}; }};
([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { ([$other:tt $($modifiers:tt)*]) => {
hash_result!([$($modifiers)*][$($args)*]) hash_result!([$($modifiers)*])
}; };
} }
@ -378,6 +294,7 @@ macro_rules! define_queries {
const ANON: bool = is_anon!([$($modifiers)*]); const ANON: bool = is_anon!([$($modifiers)*]);
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name; const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
const HASH_RESULT: Option<fn(&mut StableHashingContext<'_>, &Self::Value) -> Fingerprint> = hash_result!([$($modifiers)*]);
type Cache = query_storage::$name<$tcx>; type Cache = query_storage::$name<$tcx>;
@ -406,13 +323,6 @@ macro_rules! define_queries {
} }
} }
fn hash_result(
_hcx: &mut StableHashingContext<'_>,
_result: &Self::Value
) -> Option<Fingerprint> {
hash_result!([$($modifiers)*][_hcx, _result])
}
fn handle_cycle_error( fn handle_cycle_error(
tcx: QueryCtxt<'tcx>, tcx: QueryCtxt<'tcx>,
mut error: DiagnosticBuilder<'_>, mut error: DiagnosticBuilder<'_>,
@ -421,7 +331,7 @@ macro_rules! define_queries {
} }
})* })*
#[allow(non_upper_case_globals)] #[allow(nonstandard_style)]
pub mod query_callbacks { pub mod query_callbacks {
use super::*; use super::*;
use rustc_middle::dep_graph::DepNode; use rustc_middle::dep_graph::DepNode;
@ -431,68 +341,101 @@ macro_rules! define_queries {
use rustc_query_system::dep_graph::FingerprintStyle; use rustc_query_system::dep_graph::FingerprintStyle;
// We use this for most things when incr. comp. is turned off. // We use this for most things when incr. comp. is turned off.
pub const Null: QueryStruct = QueryStruct { pub fn Null() -> DepKindStruct {
force_from_dep_node: |_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node), DepKindStruct {
try_load_from_on_disk_cache: |_, _| {}, is_anon: false,
}; is_eval_always: false,
fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
try_load_from_on_disk_cache: None,
}
}
pub const TraitSelect: QueryStruct = QueryStruct { pub fn TraitSelect() -> DepKindStruct {
force_from_dep_node: |_, _| false, DepKindStruct {
try_load_from_on_disk_cache: |_, _| {}, is_anon: true,
}; is_eval_always: false,
fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
}
}
pub const CompileCodegenUnit: QueryStruct = QueryStruct { pub fn CompileCodegenUnit() -> DepKindStruct {
force_from_dep_node: |_, _| false, DepKindStruct {
try_load_from_on_disk_cache: |_, _| {}, is_anon: false,
}; is_eval_always: false,
fingerprint_style: FingerprintStyle::Opaque,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
}
}
pub const CompileMonoItem: QueryStruct = QueryStruct { pub fn CompileMonoItem() -> DepKindStruct {
force_from_dep_node: |_, _| false, DepKindStruct {
try_load_from_on_disk_cache: |_, _| {}, is_anon: false,
}; is_eval_always: false,
fingerprint_style: FingerprintStyle::Opaque,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
}
}
$(pub const $name: QueryStruct = { $(pub fn $name()-> DepKindStruct {
const is_anon: bool = is_anon!([$($modifiers)*]); let is_anon = is_anon!([$($modifiers)*]);
let is_eval_always = is_eval_always!([$($modifiers)*]);
let fingerprint_style =
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::fingerprint_style();
if is_anon || !fingerprint_style.reconstructible() {
return DepKindStruct {
is_anon,
is_eval_always,
fingerprint_style,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
}
}
#[inline(always)] #[inline(always)]
fn fingerprint_style() -> FingerprintStyle { fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> Option<query_keys::$name<'tcx>> {
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>> <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, &dep_node)
::fingerprint_style()
} }
fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<query_keys::$name<'tcx>> { fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool {
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node) if let Some(key) = recover(tcx, dep_node) {
} let tcx = QueryCtxt::from_tcx(tcx);
force_query::<queries::$name<'_>, _>(tcx, key, dep_node);
fn force_from_dep_node(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool { true
force_query::<queries::$name<'_>, _>(tcx, dep_node) } else {
} false
fn try_load_from_on_disk_cache(tcx: QueryCtxt<'_>, dep_node: &DepNode) {
if is_anon {
return
} }
}
if !fingerprint_style().reconstructible() { fn try_load_from_on_disk_cache(tcx: TyCtxt<'_>, dep_node: DepNode) {
return debug_assert!(tcx.dep_graph.is_green(&dep_node));
}
debug_assert!(tcx.dep_graph.is_green(dep_node)); let key = recover(tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
let tcx = QueryCtxt::from_tcx(tcx);
let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
if queries::$name::cache_on_disk(tcx, &key, None) { if queries::$name::cache_on_disk(tcx, &key, None) {
let _ = tcx.$name(key); let _ = tcx.$name(key);
} }
} }
QueryStruct { DepKindStruct {
force_from_dep_node, is_anon,
try_load_from_on_disk_cache, is_eval_always,
fingerprint_style,
force_from_dep_node: Some(force_from_dep_node),
try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache),
} }
};)* })*
} }
static QUERY_CALLBACKS: &[QueryStruct] = &make_dep_kind_array!(query_callbacks); pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct] {
arena.alloc_from_iter(make_dep_kind_array!(query_callbacks))
}
} }
} }

View File

@ -60,8 +60,11 @@ impl<K: DepKind> DepNode<K> {
/// Creates a new, parameterless DepNode. This method will assert /// Creates a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually /// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters. /// does not require any parameters.
pub fn new_no_params(kind: K) -> DepNode<K> { pub fn new_no_params<Ctxt>(tcx: Ctxt, kind: K) -> DepNode<K>
debug_assert!(!kind.has_params()); where
Ctxt: super::DepContext<DepKind = K>,
{
debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
DepNode { kind, hash: Fingerprint::ZERO.into() } DepNode { kind, hash: Fingerprint::ZERO.into() }
} }
@ -75,7 +78,7 @@ impl<K: DepKind> DepNode<K> {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
if !kind.fingerprint_style().reconstructible() if !tcx.fingerprint_style(kind).reconstructible()
&& (tcx.sess().opts.debugging_opts.incremental_info && (tcx.sess().opts.debugging_opts.incremental_info
|| tcx.sess().opts.debugging_opts.query_dep_graph) || tcx.sess().opts.debugging_opts.query_dep_graph)
{ {
@ -121,11 +124,12 @@ impl<Ctxt: DepContext, T> DepNodeParams<Ctxt> for T
where where
T: for<'a> HashStable<StableHashingContext<'a>> + fmt::Debug, T: for<'a> HashStable<StableHashingContext<'a>> + fmt::Debug,
{ {
#[inline] #[inline(always)]
default fn fingerprint_style() -> FingerprintStyle { default fn fingerprint_style() -> FingerprintStyle {
FingerprintStyle::Opaque FingerprintStyle::Opaque
} }
#[inline(always)]
default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint { default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint {
let mut hcx = tcx.create_stable_hashing_context(); let mut hcx = tcx.create_stable_hashing_context();
let mut hasher = StableHasher::new(); let mut hasher = StableHasher::new();
@ -135,10 +139,12 @@ where
hasher.finish() hasher.finish()
} }
#[inline(always)]
default fn to_debug_str(&self, _: Ctxt) -> String { default fn to_debug_str(&self, _: Ctxt) -> String {
format!("{:?}", *self) format!("{:?}", *self)
} }
#[inline(always)]
default fn recover(_: Ctxt, _: &DepNode<Ctxt::DepKind>) -> Option<Self> { default fn recover(_: Ctxt, _: &DepNode<Ctxt::DepKind>) -> Option<Self> {
None None
} }

View File

@ -33,12 +33,6 @@ pub struct DepGraph<K: DepKind> {
/// each task has a `DepNodeIndex` that uniquely identifies it. This unique /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
/// ID is used for self-profiling. /// ID is used for self-profiling.
virtual_dep_node_index: Lrc<AtomicU32>, virtual_dep_node_index: Lrc<AtomicU32>,
/// The cached event id for profiling node interning. This saves us
/// from having to look up the event id every time we intern a node
/// which may incur too much overhead.
/// This will be None if self-profiling is disabled.
node_intern_event_id: Option<EventId>,
} }
rustc_index::newtype_index! { rustc_index::newtype_index! {
@ -96,14 +90,13 @@ struct DepGraphData<K: DepKind> {
dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>, dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
} }
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint> pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
where where
R: for<'a> HashStable<StableHashingContext<'a>>, R: for<'a> HashStable<StableHashingContext<'a>>,
{ {
let mut stable_hasher = StableHasher::new(); let mut stable_hasher = StableHasher::new();
result.hash_stable(hcx, &mut stable_hasher); result.hash_stable(hcx, &mut stable_hasher);
stable_hasher.finish()
Some(stable_hasher.finish())
} }
impl<K: DepKind> DepGraph<K> { impl<K: DepKind> DepGraph<K> {
@ -117,8 +110,13 @@ impl<K: DepKind> DepGraph<K> {
) -> DepGraph<K> { ) -> DepGraph<K> {
let prev_graph_node_count = prev_graph.node_count(); let prev_graph_node_count = prev_graph.node_count();
let current = let current = CurrentDepGraph::new(
CurrentDepGraph::new(prev_graph_node_count, encoder, record_graph, record_stats); profiler,
prev_graph_node_count,
encoder,
record_graph,
record_stats,
);
// Instantiate a dependy-less node only once for anonymous queries. // Instantiate a dependy-less node only once for anonymous queries.
let _green_node_index = current.intern_new_node( let _green_node_index = current.intern_new_node(
@ -129,10 +127,6 @@ impl<K: DepKind> DepGraph<K> {
); );
debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE); debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
let node_intern_event_id = profiler
.get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
.map(EventId::from_label);
DepGraph { DepGraph {
data: Some(Lrc::new(DepGraphData { data: Some(Lrc::new(DepGraphData {
previous_work_products: prev_work_products, previous_work_products: prev_work_products,
@ -143,16 +137,11 @@ impl<K: DepKind> DepGraph<K> {
colors: DepNodeColorMap::new(prev_graph_node_count), colors: DepNodeColorMap::new(prev_graph_node_count),
})), })),
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)), virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
node_intern_event_id,
} }
} }
pub fn new_disabled() -> DepGraph<K> { pub fn new_disabled() -> DepGraph<K> {
DepGraph { DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
data: None,
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
node_intern_event_id: None,
}
} }
/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise. /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
@ -215,7 +204,7 @@ impl<K: DepKind> DepGraph<K> {
cx: Ctxt, cx: Ctxt,
arg: A, arg: A,
task: fn(Ctxt, A) -> R, task: fn(Ctxt, A) -> R,
hash_result: fn(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
) -> (R, DepNodeIndex) { ) -> (R, DepNodeIndex) {
if self.is_fully_enabled() { if self.is_fully_enabled() {
self.with_task_impl(key, cx, arg, task, hash_result) self.with_task_impl(key, cx, arg, task, hash_result)
@ -234,7 +223,7 @@ impl<K: DepKind> DepGraph<K> {
cx: Ctxt, cx: Ctxt,
arg: A, arg: A,
task: fn(Ctxt, A) -> R, task: fn(Ctxt, A) -> R,
hash_result: fn(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
) -> (R, DepNodeIndex) { ) -> (R, DepNodeIndex) {
// This function is only called when the graph is enabled. // This function is only called when the graph is enabled.
let data = self.data.as_ref().unwrap(); let data = self.data.as_ref().unwrap();
@ -253,7 +242,7 @@ impl<K: DepKind> DepGraph<K> {
key key
); );
let task_deps = if key.kind.is_eval_always() { let task_deps = if cx.dep_context().is_eval_always(key.kind) {
None None
} else { } else {
Some(Lock::new(TaskDeps { Some(Lock::new(TaskDeps {
@ -268,15 +257,14 @@ impl<K: DepKind> DepGraph<K> {
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads); let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
let dcx = cx.dep_context(); let dcx = cx.dep_context();
let mut hcx = dcx.create_stable_hashing_context();
let hashing_timer = dcx.profiler().incr_result_hashing(); let hashing_timer = dcx.profiler().incr_result_hashing();
let current_fingerprint = hash_result(&mut hcx, &result); let current_fingerprint = hash_result.map(|f| {
let mut hcx = dcx.create_stable_hashing_context();
f(&mut hcx, &result)
});
let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks; let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
// Get timer for profiling `DepNode` interning
let node_intern_timer =
self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
// Intern the new `DepNode`. // Intern the new `DepNode`.
let (dep_node_index, prev_and_color) = data.current.intern_node( let (dep_node_index, prev_and_color) = data.current.intern_node(
dcx.profiler(), dcx.profiler(),
@ -286,7 +274,6 @@ impl<K: DepKind> DepGraph<K> {
current_fingerprint, current_fingerprint,
print_status, print_status,
); );
drop(node_intern_timer);
hashing_timer.finish_with_query_invocation_id(dep_node_index.into()); hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -315,7 +302,7 @@ impl<K: DepKind> DepGraph<K> {
where where
OP: FnOnce() -> R, OP: FnOnce() -> R,
{ {
debug_assert!(!dep_kind.is_eval_always()); debug_assert!(!cx.is_eval_always(dep_kind));
if let Some(ref data) = self.data { if let Some(ref data) = self.data {
let task_deps = Lock::new(TaskDeps::default()); let task_deps = Lock::new(TaskDeps::default());
@ -492,7 +479,7 @@ impl<K: DepKind> DepGraph<K> {
tcx: Ctxt, tcx: Ctxt,
dep_node: &DepNode<K>, dep_node: &DepNode<K>,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
debug_assert!(!dep_node.kind.is_eval_always()); debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
// Return None if the dep graph is disabled // Return None if the dep graph is disabled
let data = self.data.as_ref()?; let data = self.data.as_ref()?;
@ -552,7 +539,7 @@ impl<K: DepKind> DepGraph<K> {
// We don't know the state of this dependency. If it isn't // We don't know the state of this dependency. If it isn't
// an eval_always node, let's try to mark it green recursively. // an eval_always node, let's try to mark it green recursively.
if !dep_dep_node.kind.is_eval_always() { if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
debug!( debug!(
"try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \ "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
is unknown, trying to mark it green", is unknown, trying to mark it green",
@ -575,7 +562,7 @@ impl<K: DepKind> DepGraph<K> {
"try_mark_previous_green({:?}) --- trying to force dependency {:?}", "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
dep_node, dep_dep_node dep_node, dep_dep_node
); );
if !tcx.try_force_from_dep_node(dep_dep_node) { if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
// The DepNode could not be forced. // The DepNode could not be forced.
debug!( debug!(
"try_mark_previous_green({:?}) - END - dependency {:?} could not be forced", "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
@ -642,7 +629,7 @@ impl<K: DepKind> DepGraph<K> {
} }
// We never try to mark eval_always nodes as green // We never try to mark eval_always nodes as green
debug_assert!(!dep_node.kind.is_eval_always()); debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
@ -740,8 +727,7 @@ impl<K: DepKind> DepGraph<K> {
// //
// This method will only load queries that will end up in the disk cache. // This method will only load queries that will end up in the disk cache.
// Other queries will not be executed. // Other queries will not be executed.
pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) { pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
let tcx = qcx.dep_context();
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap(); let data = self.data.as_ref().unwrap();
@ -749,7 +735,7 @@ impl<K: DepKind> DepGraph<K> {
match data.colors.get(prev_index) { match data.colors.get(prev_index) {
Some(DepNodeColor::Green(_)) => { Some(DepNodeColor::Green(_)) => {
let dep_node = data.previous.index_to_node(prev_index); let dep_node = data.previous.index_to_node(prev_index);
qcx.try_load_from_on_disk_cache(&dep_node); tcx.try_load_from_on_disk_cache(dep_node);
} }
None | Some(DepNodeColor::Red) => { None | Some(DepNodeColor::Red) => {
// We can skip red nodes because a node can only be marked // We can skip red nodes because a node can only be marked
@ -876,10 +862,17 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
/// debugging and only active with `debug_assertions`. /// debugging and only active with `debug_assertions`.
total_read_count: AtomicU64, total_read_count: AtomicU64,
total_duplicate_read_count: AtomicU64, total_duplicate_read_count: AtomicU64,
/// The cached event id for profiling node interning. This saves us
/// from having to look up the event id every time we intern a node
/// which may incur too much overhead.
/// This will be None if self-profiling is disabled.
node_intern_event_id: Option<EventId>,
} }
impl<K: DepKind> CurrentDepGraph<K> { impl<K: DepKind> CurrentDepGraph<K> {
fn new( fn new(
profiler: &SelfProfilerRef,
prev_graph_node_count: usize, prev_graph_node_count: usize,
encoder: FileEncoder, encoder: FileEncoder,
record_graph: bool, record_graph: bool,
@ -908,6 +901,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200; let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
let node_intern_event_id = profiler
.get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
.map(EventId::from_label);
CurrentDepGraph { CurrentDepGraph {
encoder: Steal::new(GraphEncoder::new( encoder: Steal::new(GraphEncoder::new(
encoder, encoder,
@ -927,6 +924,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
forbidden_edge, forbidden_edge,
total_read_count: AtomicU64::new(0), total_read_count: AtomicU64::new(0),
total_duplicate_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0),
node_intern_event_id,
} }
} }
@ -970,6 +968,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) { ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
let print_status = cfg!(debug_assertions) && print_status; let print_status = cfg!(debug_assertions) && print_status;
// Get timer for profiling `DepNode` interning
let _node_intern_timer =
self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
if let Some(prev_index) = prev_graph.node_to_index_opt(&key) { if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
// Determine the color and index of the new `DepNode`. // Determine the color and index of the new `DepNode`.
if let Some(fingerprint) = fingerprint { if let Some(fingerprint) = fingerprint {

View File

@ -32,6 +32,17 @@ pub trait DepContext: Copy {
/// Access the compiler session. /// Access the compiler session.
fn sess(&self) -> &Session; fn sess(&self) -> &Session;
/// Return whether this kind always require evaluation.
fn is_eval_always(&self, kind: Self::DepKind) -> bool;
fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle;
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: DepNode<Self::DepKind>) -> bool;
/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: DepNode<Self::DepKind>);
} }
pub trait HasDepContext: Copy { pub trait HasDepContext: Copy {
@ -51,7 +62,7 @@ impl<T: DepContext> HasDepContext for T {
} }
/// Describes the contents of the fingerprint generated by a given query. /// Describes the contents of the fingerprint generated by a given query.
#[derive(PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum FingerprintStyle { pub enum FingerprintStyle {
/// The fingerprint is actually a DefPathHash. /// The fingerprint is actually a DefPathHash.
DefPathHash, DefPathHash,
@ -75,12 +86,6 @@ impl FingerprintStyle {
pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static { pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
const NULL: Self; const NULL: Self;
/// Return whether this kind always require evaluation.
fn is_eval_always(&self) -> bool;
/// Return whether this kind requires additional parameters to be executed.
fn has_params(&self) -> bool;
/// Implementation of `std::fmt::Debug` for `DepNode`. /// Implementation of `std::fmt::Debug` for `DepNode`.
fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result; fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
@ -93,6 +98,4 @@ pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder>
fn read_deps<OP>(op: OP) fn read_deps<OP>(op: OP)
where where
OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps<Self>>>); OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps<Self>>>);
fn fingerprint_style(&self) -> FingerprintStyle;
} }

View File

@ -24,7 +24,8 @@ pub(crate) struct QueryVtable<CTX: QueryContext, K, V> {
pub dep_kind: CTX::DepKind, pub dep_kind: CTX::DepKind,
pub eval_always: bool, pub eval_always: bool,
pub hash_result: fn(&mut StableHashingContext<'_>, &V) -> Option<Fingerprint>, pub compute: fn(CTX::DepContext, K) -> V,
pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V, pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool, pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool,
pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option<V>, pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option<V>,
@ -38,12 +39,8 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
DepNode::construct(tcx, self.dep_kind, key) DepNode::construct(tcx, self.dep_kind, key)
} }
pub(crate) fn hash_result( pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
&self, (self.compute)(tcx, key)
hcx: &mut StableHashingContext<'_>,
value: &V,
) -> Option<Fingerprint> {
(self.hash_result)(hcx, value)
} }
pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool { pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool {
@ -59,6 +56,9 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
const ANON: bool; const ANON: bool;
const EVAL_ALWAYS: bool; const EVAL_ALWAYS: bool;
const DEP_KIND: CTX::DepKind; const DEP_KIND: CTX::DepKind;
const HASH_RESULT: Option<
fn(hcx: &mut StableHashingContext<'_>, result: &Self::Value) -> Fingerprint,
>;
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>; type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
@ -75,9 +75,6 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
// Don't use this method to compute query results, instead use the methods on TyCtxt // Don't use this method to compute query results, instead use the methods on TyCtxt
fn compute_fn(tcx: CTX, key: &Self::Key) -> fn(CTX::DepContext, Self::Key) -> Self::Value; fn compute_fn(tcx: CTX, key: &Self::Key) -> fn(CTX::DepContext, Self::Key) -> Self::Value;
fn hash_result(hcx: &mut StableHashingContext<'_>, result: &Self::Value)
-> Option<Fingerprint>;
fn handle_cycle_error(tcx: CTX, diag: DiagnosticBuilder<'_>) -> Self::Value; fn handle_cycle_error(tcx: CTX, diag: DiagnosticBuilder<'_>) -> Self::Value;
} }
@ -95,7 +92,7 @@ pub trait QueryDescription<CTX: QueryContext>: QueryAccessors<CTX> {
} }
pub(crate) trait QueryVtableExt<CTX: QueryContext, K, V> { pub(crate) trait QueryVtableExt<CTX: QueryContext, K, V> {
const VTABLE: QueryVtable<CTX, K, V>; fn make_vtable(tcx: CTX, key: &K) -> QueryVtable<CTX, K, V>;
} }
impl<CTX, Q> QueryVtableExt<CTX, Q::Key, Q::Value> for Q impl<CTX, Q> QueryVtableExt<CTX, Q::Key, Q::Value> for Q
@ -103,13 +100,16 @@ where
CTX: QueryContext, CTX: QueryContext,
Q: QueryDescription<CTX>, Q: QueryDescription<CTX>,
{ {
const VTABLE: QueryVtable<CTX, Q::Key, Q::Value> = QueryVtable { fn make_vtable(tcx: CTX, key: &Q::Key) -> QueryVtable<CTX, Q::Key, Q::Value> {
anon: Q::ANON, QueryVtable {
dep_kind: Q::DEP_KIND, anon: Q::ANON,
eval_always: Q::EVAL_ALWAYS, dep_kind: Q::DEP_KIND,
hash_result: Q::hash_result, eval_always: Q::EVAL_ALWAYS,
handle_cycle_error: Q::handle_cycle_error, hash_result: Q::HASH_RESULT,
cache_on_disk: Q::cache_on_disk, compute: Q::compute_fn(tcx, key),
try_load_from_disk: Q::try_load_from_disk, handle_cycle_error: Q::handle_cycle_error,
}; cache_on_disk: Q::cache_on_disk,
try_load_from_disk: Q::try_load_from_disk,
}
}
} }

View File

@ -14,7 +14,7 @@ pub use self::caches::{
mod config; mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec; use rustc_data_structures::thin_vec::ThinVec;
@ -122,12 +122,6 @@ pub trait QueryContext: HasDepContext {
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>; fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
/// Load side effects associated to the node in the previous session. /// Load side effects associated to the node in the previous session.
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects; fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;

View File

@ -2,7 +2,7 @@
//! generate the actual methods on tcx which find and execute the provider, //! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth. //! manage the caches, and so forth.
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
use crate::query::caches::QueryCache; use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt}; use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
use crate::query::job::{ use crate::query::job::{
@ -382,7 +382,6 @@ fn try_execute_query<CTX, C>(
lookup: QueryLookup, lookup: QueryLookup,
dep_node: Option<DepNode<CTX::DepKind>>, dep_node: Option<DepNode<CTX::DepKind>>,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
compute: fn(CTX::DepContext, C::Key) -> C::Value,
) -> (C::Stored, Option<DepNodeIndex>) ) -> (C::Stored, Option<DepNodeIndex>)
where where
C: QueryCache, C: QueryCache,
@ -398,7 +397,7 @@ where
query.dep_kind, query.dep_kind,
) { ) {
TryGetJob::NotYetStarted(job) => { TryGetJob::NotYetStarted(job) => {
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id, compute); let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
let result = job.complete(cache, result, dep_node_index); let result = job.complete(cache, result, dep_node_index);
(result, Some(dep_node_index)) (result, Some(dep_node_index))
} }
@ -429,7 +428,6 @@ fn execute_job<CTX, K, V>(
mut dep_node_opt: Option<DepNode<CTX::DepKind>>, mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
query: &QueryVtable<CTX, K, V>, query: &QueryVtable<CTX, K, V>,
job_id: QueryJobId<CTX::DepKind>, job_id: QueryJobId<CTX::DepKind>,
compute: fn(CTX::DepContext, K) -> V,
) -> (V, DepNodeIndex) ) -> (V, DepNodeIndex)
where where
K: Clone + DepNodeParams<CTX::DepContext>, K: Clone + DepNodeParams<CTX::DepContext>,
@ -441,7 +439,7 @@ where
// Fast path for when incr. comp. is off. // Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() { if !dep_graph.is_fully_enabled() {
let prof_timer = tcx.dep_context().profiler().query_provider(); let prof_timer = tcx.dep_context().profiler().query_provider();
let result = tcx.start_query(job_id, None, || compute(*tcx.dep_context(), key)); let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
let dep_node_index = dep_graph.next_virtual_depnode_index(); let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into()); prof_timer.finish_with_query_invocation_id(dep_node_index.into());
return (result, dep_node_index); return (result, dep_node_index);
@ -455,7 +453,7 @@ where
// The diagnostics for this query will be promoted to the current session during // The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here. // `try_mark_green()`, so we can ignore them here.
if let Some(ret) = tcx.start_query(job_id, None, || { if let Some(ret) = tcx.start_query(job_id, None, || {
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute) try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
}) { }) {
return ret; return ret;
} }
@ -467,14 +465,14 @@ where
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || { let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
if query.anon { if query.anon {
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || { return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
compute(*tcx.dep_context(), key) query.compute(*tcx.dep_context(), key)
}); });
} }
// `to_dep_node` is expensive for some `DepKind`s. // `to_dep_node` is expensive for some `DepKind`s.
let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key)); let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
dep_graph.with_task(dep_node, *tcx.dep_context(), key, compute, query.hash_result) dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
}); });
prof_timer.finish_with_query_invocation_id(dep_node_index.into()); prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -498,7 +496,6 @@ fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
key: &K, key: &K,
dep_node: &DepNode<CTX::DepKind>, dep_node: &DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, K, V>, query: &QueryVtable<CTX, K, V>,
compute: fn(CTX::DepContext, K) -> V,
) -> Option<(V, DepNodeIndex)> ) -> Option<(V, DepNodeIndex)>
where where
K: Clone, K: Clone,
@ -520,14 +517,6 @@ where
let result = query.try_load_from_disk(tcx, prev_dep_node_index); let result = query.try_load_from_disk(tcx, prev_dep_node_index);
prof_timer.finish_with_query_invocation_id(dep_node_index.into()); prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// We always expect to find a cached result for things that
// can be forced from `DepNode`.
debug_assert!(
!dep_node.kind.fingerprint_style().reconstructible() || result.is_some(),
"missing on-disk cache entry for {:?}",
dep_node
);
if let Some(result) = result { if let Some(result) = result {
// If `-Zincremental-verify-ich` is specified, re-hash results from // If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint. // the cache and make sure that they have the expected fingerprint.
@ -537,6 +526,14 @@ where
return Some((result, dep_node_index)); return Some((result, dep_node_index));
} }
// We always expect to find a cached result for things that
// can be forced from `DepNode`.
debug_assert!(
!tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
"missing on-disk cache entry for {:?}",
dep_node
);
} }
// We could not load a result from the on-disk cache, so // We could not load a result from the on-disk cache, so
@ -544,7 +541,7 @@ where
let prof_timer = tcx.dep_context().profiler().query_provider(); let prof_timer = tcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place. // The dep-graph for this computation is already in-place.
let result = dep_graph.with_ignore(|| compute(*tcx.dep_context(), key.clone())); let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
prof_timer.finish_with_query_invocation_id(dep_node_index.into()); prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -577,12 +574,12 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
); );
debug!("BEGIN verify_ich({:?})", dep_node); debug!("BEGIN verify_ich({:?})", dep_node);
let mut hcx = tcx.create_stable_hashing_context(); let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
let mut hcx = tcx.create_stable_hashing_context();
let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO); f(&mut hcx, result)
debug!("END verify_ich({:?})", dep_node); });
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node); let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
debug!("END verify_ich({:?})", dep_node);
if Some(new_hash) != old_hash { if Some(new_hash) != old_hash {
let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name { let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
@ -665,41 +662,6 @@ where
} }
} }
#[inline(never)]
fn force_query_impl<CTX, C>(
tcx: CTX,
state: &QueryState<CTX::DepKind, C::Key>,
cache: &QueryCacheStore<C>,
key: C::Key,
dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>,
compute: fn(CTX::DepContext, C::Key) -> C::Value,
) -> bool
where
C: QueryCache,
C::Key: DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
debug_assert!(!query.anon);
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
let cached = cache.cache.lookup(cache, &key, |_, index| {
if unlikely!(tcx.dep_context().profiler().enabled()) {
tcx.dep_context().profiler().query_cache_hit(index.into());
}
});
let lookup = match cached {
Ok(()) => return true,
Err(lookup) => lookup,
};
let _ =
try_execute_query(tcx, state, cache, DUMMY_SP, key, lookup, Some(dep_node), query, compute);
true
}
pub enum QueryMode { pub enum QueryMode {
Get, Get,
Ensure, Ensure,
@ -717,9 +679,9 @@ where
Q::Key: DepNodeParams<CTX::DepContext>, Q::Key: DepNodeParams<CTX::DepContext>,
CTX: QueryContext, CTX: QueryContext,
{ {
let query = &Q::VTABLE; let query = Q::make_vtable(tcx, &key);
let dep_node = if let QueryMode::Ensure = mode { let dep_node = if let QueryMode::Ensure = mode {
let (must_run, dep_node) = ensure_must_run(tcx, &key, query); let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
if !must_run { if !must_run {
return None; return None;
} }
@ -729,7 +691,6 @@ where
}; };
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
let compute = Q::compute_fn(tcx, &key);
let (result, dep_node_index) = try_execute_query( let (result, dep_node_index) = try_execute_query(
tcx, tcx,
Q::query_state(tcx), Q::query_state(tcx),
@ -738,8 +699,7 @@ where
key, key,
lookup, lookup,
dep_node, dep_node,
query, &query,
compute,
); );
if let Some(dep_node_index) = dep_node_index { if let Some(dep_node_index) = dep_node_index {
tcx.dep_context().dep_graph().read_index(dep_node_index) tcx.dep_context().dep_graph().read_index(dep_node_index)
@ -747,34 +707,29 @@ where
Some(result) Some(result)
} }
pub fn force_query<Q, CTX>(tcx: CTX, dep_node: &DepNode<CTX::DepKind>) -> bool pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
where where
Q: QueryDescription<CTX>, Q: QueryDescription<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>, Q::Key: DepNodeParams<CTX::DepContext>,
CTX: QueryContext, CTX: QueryContext,
{ {
if Q::ANON { assert!(!Q::ANON);
return false;
}
if !<Q::Key as DepNodeParams<CTX::DepContext>>::fingerprint_style().reconstructible() { // We may be concurrently trying both execute and force a query.
return false; // Ensure that only one of them runs the query.
} let cache = Q::query_cache(tcx);
let cached = cache.cache.lookup(cache, &key, |_, index| {
if unlikely!(tcx.dep_context().profiler().enabled()) {
tcx.dep_context().profiler().query_cache_hit(index.into());
}
});
let Some(key) = let lookup = match cached {
<Q::Key as DepNodeParams<CTX::DepContext>>::recover(*tcx.dep_context(), &dep_node) Ok(()) => return,
else { Err(lookup) => lookup,
return false;
}; };
let compute = Q::compute_fn(tcx, &key); let query = Q::make_vtable(tcx, &key);
force_query_impl( let state = Q::query_state(tcx);
tcx, try_execute_query(tcx, state, cache, DUMMY_SP, key, lookup, Some(dep_node), &query);
Q::query_state(tcx),
Q::query_cache(tcx),
key,
*dep_node,
&Q::VTABLE,
compute,
)
} }