mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Auto merge of #89978 - cjgillot:qarray, r=Mark-Simulacrum
Merge the two depkind vtables Knowledge of `DepKind`s is managed using two arrays containing flags (is_anon, eval_always, fingerprint_style), and function pointers (forcing and loading code). This PR aims at merging the two arrays so as to reduce unneeded indirect calls and (hopefully) increase code locality. r? `@ghost`
This commit is contained in:
commit
efd0483949
@ -4319,7 +4319,6 @@ dependencies = [
|
||||
"rustc_serialize",
|
||||
"rustc_session",
|
||||
"rustc_span",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -224,7 +224,7 @@ pub(crate) fn run_aot(
|
||||
tcx,
|
||||
(backend_config.clone(), cgu.name()),
|
||||
module_codegen,
|
||||
rustc_middle::dep_graph::hash_result,
|
||||
Some(rustc_middle::dep_graph::hash_result),
|
||||
);
|
||||
|
||||
if let Some((id, product)) = work_product {
|
||||
|
@ -59,7 +59,13 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
||||
let start_time = Instant::now();
|
||||
|
||||
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
|
||||
let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
|
||||
let (module, _) = tcx.dep_graph.with_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
cgu_name,
|
||||
module_codegen,
|
||||
Some(dep_graph::hash_result),
|
||||
);
|
||||
let time_to_codegen = start_time.elapsed();
|
||||
drop(prof_timer);
|
||||
|
||||
|
@ -113,8 +113,13 @@ pub fn compile_codegen_unit(
|
||||
let start_time = Instant::now();
|
||||
|
||||
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
|
||||
let (module, _) =
|
||||
tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
|
||||
let (module, _) = tcx.dep_graph.with_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
cgu_name,
|
||||
module_codegen,
|
||||
Some(dep_graph::hash_result),
|
||||
);
|
||||
let time_to_codegen = start_time.elapsed();
|
||||
|
||||
// We assume that the cost to run LLVM on a CGU is proportional to
|
||||
|
@ -126,30 +126,36 @@ impl IfThisChanged<'tcx> {
|
||||
if attr.has_name(sym::rustc_if_this_changed) {
|
||||
let dep_node_interned = self.argument(attr);
|
||||
let dep_node = match dep_node_interned {
|
||||
None => DepNode::from_def_path_hash(def_path_hash, DepKind::hir_owner),
|
||||
Some(n) => match DepNode::from_label_string(&n.as_str(), def_path_hash) {
|
||||
Ok(n) => n,
|
||||
Err(()) => {
|
||||
self.tcx.sess.span_fatal(
|
||||
attr.span,
|
||||
&format!("unrecognized DepNode variant {:?}", n),
|
||||
);
|
||||
None => {
|
||||
DepNode::from_def_path_hash(self.tcx, def_path_hash, DepKind::hir_owner)
|
||||
}
|
||||
Some(n) => {
|
||||
match DepNode::from_label_string(self.tcx, &n.as_str(), def_path_hash) {
|
||||
Ok(n) => n,
|
||||
Err(()) => {
|
||||
self.tcx.sess.span_fatal(
|
||||
attr.span,
|
||||
&format!("unrecognized DepNode variant {:?}", n),
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
};
|
||||
self.if_this_changed.push((attr.span, def_id.to_def_id(), dep_node));
|
||||
} else if attr.has_name(sym::rustc_then_this_would_need) {
|
||||
let dep_node_interned = self.argument(attr);
|
||||
let dep_node = match dep_node_interned {
|
||||
Some(n) => match DepNode::from_label_string(&n.as_str(), def_path_hash) {
|
||||
Ok(n) => n,
|
||||
Err(()) => {
|
||||
self.tcx.sess.span_fatal(
|
||||
attr.span,
|
||||
&format!("unrecognized DepNode variant {:?}", n),
|
||||
);
|
||||
Some(n) => {
|
||||
match DepNode::from_label_string(self.tcx, &n.as_str(), def_path_hash) {
|
||||
Ok(n) => n,
|
||||
Err(()) => {
|
||||
self.tcx.sess.span_fatal(
|
||||
attr.span,
|
||||
&format!("unrecognized DepNode variant {:?}", n),
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
None => {
|
||||
self.tcx.sess.span_fatal(attr.span, "missing DepNode variant");
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
use rustc_ast::{self as ast, Attribute, NestedMetaItem};
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
use rustc_hir::def_id::LocalDefId;
|
||||
use rustc_hir::intravisit;
|
||||
use rustc_hir::itemlikevisit::ItemLikeVisitor;
|
||||
use rustc_hir::Node as HirNode;
|
||||
@ -302,18 +302,6 @@ impl DirtyCleanVisitor<'tcx> {
|
||||
out
|
||||
}
|
||||
|
||||
fn dep_nodes<'l>(
|
||||
&self,
|
||||
labels: &'l Labels,
|
||||
def_id: DefId,
|
||||
) -> impl Iterator<Item = DepNode> + 'l {
|
||||
let def_path_hash = self.tcx.def_path_hash(def_id);
|
||||
labels.iter().map(move |label| match DepNode::from_label_string(label, def_path_hash) {
|
||||
Ok(dep_node) => dep_node,
|
||||
Err(()) => unreachable!("label: {}", label),
|
||||
})
|
||||
}
|
||||
|
||||
fn dep_node_str(&self, dep_node: &DepNode) -> String {
|
||||
if let Some(def_id) = dep_node.extract_def_id(self.tcx) {
|
||||
format!("{:?}({})", dep_node.kind, self.tcx.def_path_str(def_id))
|
||||
@ -345,16 +333,19 @@ impl DirtyCleanVisitor<'tcx> {
|
||||
}
|
||||
|
||||
fn check_item(&mut self, item_id: LocalDefId, item_span: Span) {
|
||||
let def_path_hash = self.tcx.def_path_hash(item_id.to_def_id());
|
||||
for attr in self.tcx.get_attrs(item_id.to_def_id()).iter() {
|
||||
let assertion = match self.assertion_maybe(item_id, attr) {
|
||||
Some(a) => a,
|
||||
None => continue,
|
||||
};
|
||||
self.checked_attrs.insert(attr.id);
|
||||
for dep_node in self.dep_nodes(&assertion.clean, item_id.to_def_id()) {
|
||||
for label in assertion.clean {
|
||||
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
|
||||
self.assert_clean(item_span, dep_node);
|
||||
}
|
||||
for dep_node in self.dep_nodes(&assertion.dirty, item_id.to_def_id()) {
|
||||
for label in assertion.dirty {
|
||||
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
|
||||
self.assert_dirty(item_span, dep_node);
|
||||
}
|
||||
}
|
||||
|
@ -838,6 +838,7 @@ pub fn create_global_ctxt<'tcx>(
|
||||
dep_graph,
|
||||
queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn),
|
||||
queries.as_dyn(),
|
||||
rustc_query_impl::query_callbacks(arena),
|
||||
crate_name,
|
||||
outputs,
|
||||
)
|
||||
|
@ -100,6 +100,8 @@ macro_rules! arena_types {
|
||||
// This is used to decode the &'tcx [Span] for InlineAsm's line_spans.
|
||||
[decode] span: rustc_span::Span,
|
||||
[decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
|
||||
|
||||
[] dep_kind: rustc_middle::dep_graph::DepKindStruct,
|
||||
], $tcx);
|
||||
)
|
||||
}
|
||||
|
@ -75,147 +75,73 @@ pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
|
||||
/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
|
||||
/// jump table instead of large matches.
|
||||
pub struct DepKindStruct {
|
||||
/// Whether the DepNode has parameters (query keys).
|
||||
pub(super) has_params: bool,
|
||||
|
||||
/// Anonymous queries cannot be replayed from one compiler invocation to the next.
|
||||
/// When their result is needed, it is recomputed. They are useful for fine-grained
|
||||
/// dependency tracking, and caching within one compiler invocation.
|
||||
pub(super) is_anon: bool,
|
||||
pub is_anon: bool,
|
||||
|
||||
/// Eval-always queries do not track their dependencies, and are always recomputed, even if
|
||||
/// their inputs have not changed since the last compiler invocation. The result is still
|
||||
/// cached within one compiler invocation.
|
||||
pub(super) is_eval_always: bool,
|
||||
pub is_eval_always: bool,
|
||||
|
||||
/// Whether the query key can be recovered from the hashed fingerprint.
|
||||
/// See [DepNodeParams] trait for the behaviour of each key type.
|
||||
// FIXME: Make this a simple boolean once DepNodeParams::fingerprint_style
|
||||
// can be made a specialized associated const.
|
||||
fingerprint_style: fn() -> FingerprintStyle,
|
||||
}
|
||||
pub fingerprint_style: FingerprintStyle,
|
||||
|
||||
impl std::ops::Deref for DepKind {
|
||||
type Target = DepKindStruct;
|
||||
fn deref(&self) -> &DepKindStruct {
|
||||
&DEP_KINDS[*self as usize]
|
||||
}
|
||||
/// The red/green evaluation system will try to mark a specific DepNode in the
|
||||
/// dependency graph as green by recursively trying to mark the dependencies of
|
||||
/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
|
||||
/// where we don't know if it is red or green and we therefore actually have
|
||||
/// to recompute its value in order to find out. Since the only piece of
|
||||
/// information that we have at that point is the `DepNode` we are trying to
|
||||
/// re-evaluate, we need some way to re-run a query from just that. This is what
|
||||
/// `force_from_dep_node()` implements.
|
||||
///
|
||||
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
|
||||
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
|
||||
/// is usually constructed by computing a stable hash of the query-key that the
|
||||
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
|
||||
/// back from hash to query-key (since hash functions are not reversible). For
|
||||
/// this reason `force_from_dep_node()` is expected to fail from time to time
|
||||
/// because we just cannot find out, from the `DepNode` alone, what the
|
||||
/// corresponding query-key is and therefore cannot re-run the query.
|
||||
///
|
||||
/// The system deals with this case letting `try_mark_green` fail which forces
|
||||
/// the root query to be re-evaluated.
|
||||
///
|
||||
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
|
||||
/// Fortunately, we can use some contextual information that will allow us to
|
||||
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
|
||||
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
|
||||
/// valid `DefPathHash`. Since we also always build a huge table that maps every
|
||||
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
|
||||
/// everything we need to re-run the query.
|
||||
///
|
||||
/// Take the `mir_promoted` query as an example. Like many other queries, it
|
||||
/// just has a single parameter: the `DefId` of the item it will compute the
|
||||
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
|
||||
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
|
||||
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
|
||||
/// `DefId` in `tcx.def_path_hash_to_def_id`.
|
||||
pub force_from_dep_node: Option<fn(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool>,
|
||||
|
||||
/// Invoke a query to put the on-disk cached value in memory.
|
||||
pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'_>, DepNode)>,
|
||||
}
|
||||
|
||||
impl DepKind {
|
||||
#[inline(always)]
|
||||
pub fn fingerprint_style(&self) -> FingerprintStyle {
|
||||
pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle {
|
||||
// Only fetch the DepKindStruct once.
|
||||
let data: &DepKindStruct = &**self;
|
||||
let data = tcx.query_kind(self);
|
||||
if data.is_anon {
|
||||
return FingerprintStyle::Opaque;
|
||||
}
|
||||
|
||||
(data.fingerprint_style)()
|
||||
data.fingerprint_style
|
||||
}
|
||||
}
|
||||
|
||||
// erase!() just makes tokens go away. It's used to specify which macro argument
|
||||
// is repeated (i.e., which sub-expression of the macro we are in) but don't need
|
||||
// to actually use any of the arguments.
|
||||
macro_rules! erase {
|
||||
($x:tt) => {{}};
|
||||
}
|
||||
|
||||
macro_rules! is_anon_attr {
|
||||
(anon) => {
|
||||
true
|
||||
};
|
||||
($attr:ident) => {
|
||||
false
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! is_eval_always_attr {
|
||||
(eval_always) => {
|
||||
true
|
||||
};
|
||||
($attr:ident) => {
|
||||
false
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! contains_anon_attr {
|
||||
($(($attr:ident $($attr_args:tt)* )),*) => ({$(is_anon_attr!($attr) | )* false});
|
||||
}
|
||||
|
||||
macro_rules! contains_eval_always_attr {
|
||||
($(($attr:ident $($attr_args:tt)* )),*) => ({$(is_eval_always_attr!($attr) | )* false});
|
||||
}
|
||||
|
||||
#[allow(non_upper_case_globals)]
|
||||
pub mod dep_kind {
|
||||
use super::*;
|
||||
use crate::ty::query::query_keys;
|
||||
use rustc_query_system::dep_graph::FingerprintStyle;
|
||||
|
||||
// We use this for most things when incr. comp. is turned off.
|
||||
pub const Null: DepKindStruct = DepKindStruct {
|
||||
has_params: false,
|
||||
is_anon: false,
|
||||
is_eval_always: false,
|
||||
|
||||
fingerprint_style: || FingerprintStyle::Unit,
|
||||
};
|
||||
|
||||
pub const TraitSelect: DepKindStruct = DepKindStruct {
|
||||
has_params: false,
|
||||
is_anon: true,
|
||||
is_eval_always: false,
|
||||
|
||||
fingerprint_style: || FingerprintStyle::Unit,
|
||||
};
|
||||
|
||||
pub const CompileCodegenUnit: DepKindStruct = DepKindStruct {
|
||||
has_params: true,
|
||||
is_anon: false,
|
||||
is_eval_always: false,
|
||||
|
||||
fingerprint_style: || FingerprintStyle::Opaque,
|
||||
};
|
||||
|
||||
pub const CompileMonoItem: DepKindStruct = DepKindStruct {
|
||||
has_params: true,
|
||||
is_anon: false,
|
||||
is_eval_always: false,
|
||||
|
||||
fingerprint_style: || FingerprintStyle::Opaque,
|
||||
};
|
||||
|
||||
macro_rules! define_query_dep_kinds {
|
||||
($(
|
||||
[$($attrs:tt)*]
|
||||
$variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
|
||||
,)*) => (
|
||||
$(pub const $variant: DepKindStruct = {
|
||||
const has_params: bool = $({ erase!($tuple_arg_ty); true } |)* false;
|
||||
const is_anon: bool = contains_anon_attr!($($attrs)*);
|
||||
const is_eval_always: bool = contains_eval_always_attr!($($attrs)*);
|
||||
|
||||
#[inline(always)]
|
||||
fn fingerprint_style() -> rustc_query_system::dep_graph::FingerprintStyle {
|
||||
<query_keys::$variant<'_> as DepNodeParams<TyCtxt<'_>>>
|
||||
::fingerprint_style()
|
||||
}
|
||||
|
||||
DepKindStruct {
|
||||
has_params,
|
||||
is_anon,
|
||||
is_eval_always,
|
||||
fingerprint_style,
|
||||
}
|
||||
};)*
|
||||
);
|
||||
}
|
||||
|
||||
rustc_dep_node_append!([define_query_dep_kinds!][]);
|
||||
}
|
||||
|
||||
macro_rules! define_dep_nodes {
|
||||
(<$tcx:tt>
|
||||
$(
|
||||
@ -225,12 +151,10 @@ macro_rules! define_dep_nodes {
|
||||
) => (
|
||||
#[macro_export]
|
||||
macro_rules! make_dep_kind_array {
|
||||
($mod:ident) => {[ $(($mod::$variant),)* ]};
|
||||
($mod:ident) => {[ $($mod::$variant()),* ]};
|
||||
}
|
||||
|
||||
static DEP_KINDS: &[DepKindStruct] = &make_dep_kind_array!(dep_kind);
|
||||
|
||||
/// This enum serves as an index into the `DEP_KINDS` array.
|
||||
/// This enum serves as an index into arrays built by `make_dep_kind_array`.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum DepKind {
|
||||
@ -296,7 +220,7 @@ pub trait DepNodeExt: Sized {
|
||||
/// Construct a DepNode from the given DepKind and DefPathHash. This
|
||||
/// method will assert that the given DepKind actually requires a
|
||||
/// single DefId/DefPathHash parameter.
|
||||
fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> Self;
|
||||
fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self;
|
||||
|
||||
/// Extracts the DefId corresponding to this DepNode. This will work
|
||||
/// if two conditions are met:
|
||||
@ -311,7 +235,11 @@ pub trait DepNodeExt: Sized {
|
||||
fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId>;
|
||||
|
||||
/// Used in testing
|
||||
fn from_label_string(label: &str, def_path_hash: DefPathHash) -> Result<Self, ()>;
|
||||
fn from_label_string(
|
||||
tcx: TyCtxt<'_>,
|
||||
label: &str,
|
||||
def_path_hash: DefPathHash,
|
||||
) -> Result<Self, ()>;
|
||||
|
||||
/// Used in testing
|
||||
fn has_label_string(label: &str) -> bool;
|
||||
@ -321,8 +249,8 @@ impl DepNodeExt for DepNode {
|
||||
/// Construct a DepNode from the given DepKind and DefPathHash. This
|
||||
/// method will assert that the given DepKind actually requires a
|
||||
/// single DefId/DefPathHash parameter.
|
||||
fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
|
||||
debug_assert!(kind.fingerprint_style() == FingerprintStyle::DefPathHash);
|
||||
fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
|
||||
debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash);
|
||||
DepNode { kind, hash: def_path_hash.0.into() }
|
||||
}
|
||||
|
||||
@ -337,31 +265,27 @@ impl DepNodeExt for DepNode {
|
||||
/// refers to something from the previous compilation session that
|
||||
/// has been removed.
|
||||
fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
|
||||
if self.kind.fingerprint_style() == FingerprintStyle::DefPathHash {
|
||||
Some(
|
||||
tcx.on_disk_cache
|
||||
.as_ref()?
|
||||
.def_path_hash_to_def_id(tcx, DefPathHash(self.hash.into())),
|
||||
)
|
||||
if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash {
|
||||
Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into())))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Used in testing
|
||||
fn from_label_string(label: &str, def_path_hash: DefPathHash) -> Result<DepNode, ()> {
|
||||
fn from_label_string(
|
||||
tcx: TyCtxt<'_>,
|
||||
label: &str,
|
||||
def_path_hash: DefPathHash,
|
||||
) -> Result<DepNode, ()> {
|
||||
let kind = dep_kind_from_label_string(label)?;
|
||||
|
||||
match kind.fingerprint_style() {
|
||||
match kind.fingerprint_style(tcx) {
|
||||
FingerprintStyle::Opaque => Err(()),
|
||||
FingerprintStyle::Unit => {
|
||||
if !kind.has_params {
|
||||
Ok(DepNode::new_no_params(kind))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)),
|
||||
FingerprintStyle::DefPathHash => {
|
||||
Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind))
|
||||
}
|
||||
FingerprintStyle::DefPathHash => Ok(DepNode::from_def_path_hash(def_path_hash, kind)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,10 +301,12 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for () {
|
||||
FingerprintStyle::Unit
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint {
|
||||
Fingerprint::ZERO
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn recover(_: TyCtxt<'tcx>, _: &DepNode) -> Option<Self> {
|
||||
Some(())
|
||||
}
|
||||
@ -392,14 +318,17 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for DefId {
|
||||
FingerprintStyle::DefPathHash
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
|
||||
tcx.def_path_hash(*self).0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
|
||||
tcx.def_path_str(*self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
|
||||
dep_node.extract_def_id(tcx)
|
||||
}
|
||||
@ -411,14 +340,17 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
|
||||
FingerprintStyle::DefPathHash
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
|
||||
self.to_def_id().to_fingerprint(tcx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
|
||||
self.to_def_id().to_debug_str(tcx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
|
||||
dep_node.extract_def_id(tcx).map(|id| id.expect_local())
|
||||
}
|
||||
@ -430,15 +362,18 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
|
||||
FingerprintStyle::DefPathHash
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
|
||||
let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX };
|
||||
def_id.to_fingerprint(tcx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
|
||||
tcx.crate_name(*self).to_string()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
|
||||
dep_node.extract_def_id(tcx).map(|id| id.krate)
|
||||
}
|
||||
@ -453,6 +388,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
|
||||
// We actually would not need to specialize the implementation of this
|
||||
// method but it's faster to combine the hashes than to instantiate a full
|
||||
// hashing context and stable-hashing state.
|
||||
#[inline(always)]
|
||||
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
|
||||
let (def_id_0, def_id_1) = *self;
|
||||
|
||||
@ -462,6 +398,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
|
||||
def_path_hash_0.0.combine(def_path_hash_1.0)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
|
||||
let (def_id_0, def_id_1) = *self;
|
||||
|
||||
@ -478,6 +415,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
|
||||
// We actually would not need to specialize the implementation of this
|
||||
// method but it's faster to combine the hashes than to instantiate a full
|
||||
// hashing context and stable-hashing state.
|
||||
#[inline(always)]
|
||||
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
|
||||
let HirId { owner, local_id } = *self;
|
||||
|
||||
|
@ -12,7 +12,7 @@ pub use rustc_query_system::dep_graph::{
|
||||
SerializedDepNodeIndex, WorkProduct, WorkProductId,
|
||||
};
|
||||
|
||||
pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt};
|
||||
pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt};
|
||||
crate use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
|
||||
|
||||
pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
|
||||
@ -24,29 +24,8 @@ pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
|
||||
impl rustc_query_system::dep_graph::DepKind for DepKind {
|
||||
const NULL: Self = DepKind::Null;
|
||||
|
||||
#[inline(always)]
|
||||
fn fingerprint_style(&self) -> rustc_query_system::dep_graph::FingerprintStyle {
|
||||
DepKind::fingerprint_style(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn is_eval_always(&self) -> bool {
|
||||
self.is_eval_always
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn has_params(&self) -> bool {
|
||||
self.has_params
|
||||
}
|
||||
|
||||
fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", node.kind)?;
|
||||
|
||||
if !node.kind.has_params && !node.kind.is_anon {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
write!(f, "(")?;
|
||||
write!(f, "{:?}(", node.kind)?;
|
||||
|
||||
ty::tls::with_opt(|opt_tcx| {
|
||||
if let Some(tcx) = opt_tcx {
|
||||
@ -110,4 +89,51 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
|
||||
fn sess(&self) -> &Session {
|
||||
self.sess
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle {
|
||||
kind.fingerprint_style(*self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn is_eval_always(&self, kind: DepKind) -> bool {
|
||||
self.query_kind(kind).is_eval_always
|
||||
}
|
||||
|
||||
fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool {
|
||||
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
|
||||
|
||||
// We must avoid ever having to call `force_from_dep_node()` for a
|
||||
// `DepNode::codegen_unit`:
|
||||
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
|
||||
// would always end up having to evaluate the first caller of the
|
||||
// `codegen_unit` query that *is* reconstructible. This might very well be
|
||||
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
|
||||
// to re-trigger calling the `codegen_unit` query with the right key. At
|
||||
// that point we would already have re-done all the work we are trying to
|
||||
// avoid doing in the first place.
|
||||
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
||||
// each CGU, right after partitioning. This way `try_mark_green` will always
|
||||
// hit the cache instead of having to go through `force_from_dep_node`.
|
||||
// This assertion makes sure, we actually keep applying the solution above.
|
||||
debug_assert!(
|
||||
dep_node.kind != DepKind::codegen_unit,
|
||||
"calling force_from_dep_node() on DepKind::codegen_unit"
|
||||
);
|
||||
|
||||
let cb = self.query_kind(dep_node.kind);
|
||||
if let Some(f) = cb.force_from_dep_node {
|
||||
f(*self, dep_node);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn try_load_from_on_disk_cache(&self, dep_node: DepNode) {
|
||||
let cb = self.query_kind(dep_node.kind);
|
||||
if let Some(f) = cb.try_load_from_on_disk_cache {
|
||||
f(*self, dep_node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! Type context book-keeping.
|
||||
|
||||
use crate::arena::Arena;
|
||||
use crate::dep_graph::DepGraph;
|
||||
use crate::dep_graph::{DepGraph, DepKind, DepKindStruct};
|
||||
use crate::hir::place::Place as HirPlace;
|
||||
use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
|
||||
use crate::lint::{struct_lint_level, LintDiagnosticBuilder, LintLevelSource};
|
||||
@ -79,11 +79,6 @@ pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
/// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
|
||||
/// session, if it still exists. This is used during incremental compilation to
|
||||
/// turn a deserialized `DefPathHash` into its current `DefId`.
|
||||
fn def_path_hash_to_def_id(&self, tcx: TyCtxt<'tcx>, def_path_hash: DefPathHash) -> DefId;
|
||||
|
||||
fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>);
|
||||
|
||||
fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: &mut FileEncoder) -> FileEncodeResult;
|
||||
@ -1016,6 +1011,7 @@ pub struct GlobalCtxt<'tcx> {
|
||||
|
||||
pub queries: &'tcx dyn query::QueryEngine<'tcx>,
|
||||
pub query_caches: query::QueryCaches<'tcx>,
|
||||
query_kinds: &'tcx [DepKindStruct],
|
||||
|
||||
// Internal caches for metadata decoding. No need to track deps on this.
|
||||
pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
|
||||
@ -1149,6 +1145,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
dep_graph: DepGraph,
|
||||
on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
|
||||
queries: &'tcx dyn query::QueryEngine<'tcx>,
|
||||
query_kinds: &'tcx [DepKindStruct],
|
||||
crate_name: &str,
|
||||
output_filenames: OutputFilenames,
|
||||
) -> GlobalCtxt<'tcx> {
|
||||
@ -1175,6 +1172,7 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
on_disk_cache,
|
||||
queries,
|
||||
query_caches: query::QueryCaches::default(),
|
||||
query_kinds,
|
||||
ty_rcache: Default::default(),
|
||||
pred_rcache: Default::default(),
|
||||
selection_cache: Default::default(),
|
||||
@ -1188,6 +1186,10 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
crate fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct {
|
||||
&self.query_kinds[k as usize]
|
||||
}
|
||||
|
||||
/// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
|
||||
#[track_caller]
|
||||
pub fn ty_error(self) -> Ty<'tcx> {
|
||||
@ -1301,6 +1303,27 @@ impl<'tcx> TyCtxt<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
|
||||
/// session, if it still exists. This is used during incremental compilation to
|
||||
/// turn a deserialized `DefPathHash` into its current `DefId`.
|
||||
pub fn def_path_hash_to_def_id(self, hash: DefPathHash) -> DefId {
|
||||
debug!("def_path_hash_to_def_id({:?})", hash);
|
||||
|
||||
let stable_crate_id = hash.stable_crate_id();
|
||||
|
||||
// If this is a DefPathHash from the local crate, we can look up the
|
||||
// DefId in the tcx's `Definitions`.
|
||||
if stable_crate_id == self.sess.local_stable_crate_id() {
|
||||
self.untracked_resolutions.definitions.local_def_path_hash_to_def_id(hash).to_def_id()
|
||||
} else {
|
||||
// If this is a DefPathHash from an upstream crate, let the CrateStore map
|
||||
// it to a DefId.
|
||||
let cstore = &self.untracked_resolutions.cstore;
|
||||
let cnum = cstore.stable_crate_id_to_crate_num(stable_crate_id);
|
||||
cstore.def_path_hash_to_def_id(cnum, hash)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn def_path_debug_str(self, def_id: DefId) -> String {
|
||||
// We are explicitly not going through queries here in order to get
|
||||
// crate name and stable crate id since this code is called from debug!()
|
||||
|
@ -102,6 +102,10 @@ impl TyCtxt<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper for `TyCtxtEnsure` to avoid a closure.
|
||||
#[inline(always)]
|
||||
fn noop<T>(_: &T) {}
|
||||
|
||||
macro_rules! query_helper_param_ty {
|
||||
(DefId) => { impl IntoQueryParam<DefId> };
|
||||
($K:ty) => { $K };
|
||||
@ -165,7 +169,7 @@ macro_rules! define_callbacks {
|
||||
#[inline(always)]
|
||||
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
|
||||
let key = key.into_query_param();
|
||||
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |_| {});
|
||||
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, noop);
|
||||
|
||||
let lookup = match cached {
|
||||
Ok(()) => return,
|
||||
@ -192,9 +196,7 @@ macro_rules! define_callbacks {
|
||||
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
|
||||
{
|
||||
let key = key.into_query_param();
|
||||
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, |value| {
|
||||
value.clone()
|
||||
});
|
||||
let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, Clone::clone);
|
||||
|
||||
let lookup = match cached {
|
||||
Ok(value) => return value,
|
||||
|
@ -9,7 +9,6 @@ doctest = false
|
||||
[dependencies]
|
||||
measureme = "10.0.0"
|
||||
rustc-rayon-core = "0.3.1"
|
||||
tracing = "0.1"
|
||||
rustc_ast = { path = "../rustc_ast" }
|
||||
rustc_data_structures = { path = "../rustc_data_structures" }
|
||||
rustc_errors = { path = "../rustc_errors" }
|
||||
|
@ -13,13 +13,12 @@
|
||||
extern crate rustc_macros;
|
||||
#[macro_use]
|
||||
extern crate rustc_middle;
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||
use rustc_errors::DiagnosticBuilder;
|
||||
use rustc_middle::dep_graph;
|
||||
use rustc_middle::arena::Arena;
|
||||
use rustc_middle::dep_graph::{self, DepKindStruct};
|
||||
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
|
||||
use rustc_middle::ty::query::{Providers, QueryEngine};
|
||||
use rustc_middle::ty::{self, TyCtxt};
|
||||
@ -29,7 +28,6 @@ use rustc_span::Span;
|
||||
#[macro_use]
|
||||
mod plumbing;
|
||||
pub use plumbing::QueryCtxt;
|
||||
use plumbing::QueryStruct;
|
||||
use rustc_query_system::query::*;
|
||||
|
||||
mod stats;
|
||||
|
@ -219,7 +219,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
|
||||
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
|
||||
// loading existing queries may cause us to create new DepNodes, which
|
||||
// may in turn end up invoking `store_foreign_def_id_hash`
|
||||
tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx));
|
||||
tcx.dep_graph.exec_cache_promotions(tcx);
|
||||
|
||||
*self.serialized_data.write() = None;
|
||||
}
|
||||
@ -358,23 +358,6 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn def_path_hash_to_def_id(&self, tcx: TyCtxt<'tcx>, hash: DefPathHash) -> DefId {
|
||||
debug!("def_path_hash_to_def_id({:?})", hash);
|
||||
|
||||
let stable_crate_id = hash.stable_crate_id();
|
||||
|
||||
// If this is a DefPathHash from the local crate, we can look up the
|
||||
// DefId in the tcx's `Definitions`.
|
||||
if stable_crate_id == tcx.sess.local_stable_crate_id() {
|
||||
tcx.definitions_untracked().local_def_path_hash_to_def_id(hash).to_def_id()
|
||||
} else {
|
||||
// If this is a DefPathHash from an upstream crate, let the CrateStore map
|
||||
// it to a DefId.
|
||||
let cnum = tcx.cstore_untracked().stable_crate_id_to_crate_num(stable_crate_id);
|
||||
tcx.cstore_untracked().def_path_hash_to_def_id(cnum, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'sess> OnDiskCache<'sess> {
|
||||
@ -764,7 +747,7 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
|
||||
// If we get to this point, then all of the query inputs were green,
|
||||
// which means that the definition with this hash is guaranteed to
|
||||
// still exist in the current compilation session.
|
||||
Ok(d.tcx().on_disk_cache.as_ref().unwrap().def_path_hash_to_def_id(d.tcx(), def_path_hash))
|
||||
Ok(d.tcx().def_path_hash_to_def_id(def_path_hash))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
//! manage the caches, and so forth.
|
||||
|
||||
use crate::{on_disk_cache, queries, Queries};
|
||||
use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
|
||||
use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
|
||||
use rustc_middle::ty::tls::{self, ImplicitCtxt};
|
||||
use rustc_middle::ty::{self, TyCtxt};
|
||||
use rustc_query_system::dep_graph::HasDepContext;
|
||||
@ -53,36 +53,6 @@ impl QueryContext for QueryCtxt<'tcx> {
|
||||
self.queries.try_collect_active_jobs(**self)
|
||||
}
|
||||
|
||||
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
|
||||
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
|
||||
(cb.try_load_from_on_disk_cache)(*self, dep_node)
|
||||
}
|
||||
|
||||
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
|
||||
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
|
||||
|
||||
// We must avoid ever having to call `force_from_dep_node()` for a
|
||||
// `DepNode::codegen_unit`:
|
||||
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
|
||||
// would always end up having to evaluate the first caller of the
|
||||
// `codegen_unit` query that *is* reconstructible. This might very well be
|
||||
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
|
||||
// to re-trigger calling the `codegen_unit` query with the right key. At
|
||||
// that point we would already have re-done all the work we are trying to
|
||||
// avoid doing in the first place.
|
||||
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
||||
// each CGU, right after partitioning. This way `try_mark_green` will always
|
||||
// hit the cache instead of having to go through `force_from_dep_node`.
|
||||
// This assertion makes sure, we actually keep applying the solution above.
|
||||
debug_assert!(
|
||||
dep_node.kind != DepKind::codegen_unit,
|
||||
"calling force_from_dep_node() on DepKind::codegen_unit"
|
||||
);
|
||||
|
||||
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
|
||||
(cb.force_from_dep_node)(*self, dep_node)
|
||||
}
|
||||
|
||||
// Interactions with on_disk_cache
|
||||
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
|
||||
self.queries
|
||||
@ -193,60 +163,6 @@ impl<'tcx> QueryCtxt<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct stores metadata about each Query.
|
||||
///
|
||||
/// Information is retrieved by indexing the `QUERIES` array using the integer value
|
||||
/// of the `DepKind`. Overall, this allows to implement `QueryContext` using this manual
|
||||
/// jump table instead of large matches.
|
||||
pub struct QueryStruct {
|
||||
/// The red/green evaluation system will try to mark a specific DepNode in the
|
||||
/// dependency graph as green by recursively trying to mark the dependencies of
|
||||
/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
|
||||
/// where we don't know if it is red or green and we therefore actually have
|
||||
/// to recompute its value in order to find out. Since the only piece of
|
||||
/// information that we have at that point is the `DepNode` we are trying to
|
||||
/// re-evaluate, we need some way to re-run a query from just that. This is what
|
||||
/// `force_from_dep_node()` implements.
|
||||
///
|
||||
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
|
||||
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
|
||||
/// is usually constructed by computing a stable hash of the query-key that the
|
||||
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
|
||||
/// back from hash to query-key (since hash functions are not reversible). For
|
||||
/// this reason `force_from_dep_node()` is expected to fail from time to time
|
||||
/// because we just cannot find out, from the `DepNode` alone, what the
|
||||
/// corresponding query-key is and therefore cannot re-run the query.
|
||||
///
|
||||
/// The system deals with this case letting `try_mark_green` fail which forces
|
||||
/// the root query to be re-evaluated.
|
||||
///
|
||||
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
|
||||
/// Fortunately, we can use some contextual information that will allow us to
|
||||
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
|
||||
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
|
||||
/// valid `DefPathHash`. Since we also always build a huge table that maps every
|
||||
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
|
||||
/// everything we need to re-run the query.
|
||||
///
|
||||
/// Take the `mir_promoted` query as an example. Like many other queries, it
|
||||
/// just has a single parameter: the `DefId` of the item it will compute the
|
||||
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
|
||||
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
|
||||
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
|
||||
/// `DefId` in `tcx.def_path_hash_to_def_id`.
|
||||
///
|
||||
/// When you implement a new query, it will likely have a corresponding new
|
||||
/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
|
||||
/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
|
||||
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
|
||||
/// add it to the "We don't have enough information to reconstruct..." group in
|
||||
/// the match below.
|
||||
pub(crate) force_from_dep_node: fn(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool,
|
||||
|
||||
/// Invoke a query to put the on-disk cached value in memory.
|
||||
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),
|
||||
}
|
||||
|
||||
macro_rules! handle_cycle_error {
|
||||
([][$tcx: expr, $error:expr]) => {{
|
||||
$error.emit();
|
||||
@ -291,14 +207,14 @@ macro_rules! is_eval_always {
|
||||
}
|
||||
|
||||
macro_rules! hash_result {
|
||||
([][$hcx:expr, $result:expr]) => {{
|
||||
dep_graph::hash_result($hcx, &$result)
|
||||
([]) => {{
|
||||
Some(dep_graph::hash_result)
|
||||
}};
|
||||
([(no_hash) $($rest:tt)*][$hcx:expr, $result:expr]) => {{
|
||||
([(no_hash) $($rest:tt)*]) => {{
|
||||
None
|
||||
}};
|
||||
([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
|
||||
hash_result!([$($modifiers)*][$($args)*])
|
||||
([$other:tt $($modifiers:tt)*]) => {
|
||||
hash_result!([$($modifiers)*])
|
||||
};
|
||||
}
|
||||
|
||||
@ -378,6 +294,7 @@ macro_rules! define_queries {
|
||||
const ANON: bool = is_anon!([$($modifiers)*]);
|
||||
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
|
||||
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$name;
|
||||
const HASH_RESULT: Option<fn(&mut StableHashingContext<'_>, &Self::Value) -> Fingerprint> = hash_result!([$($modifiers)*]);
|
||||
|
||||
type Cache = query_storage::$name<$tcx>;
|
||||
|
||||
@ -406,13 +323,6 @@ macro_rules! define_queries {
|
||||
}
|
||||
}
|
||||
|
||||
fn hash_result(
|
||||
_hcx: &mut StableHashingContext<'_>,
|
||||
_result: &Self::Value
|
||||
) -> Option<Fingerprint> {
|
||||
hash_result!([$($modifiers)*][_hcx, _result])
|
||||
}
|
||||
|
||||
fn handle_cycle_error(
|
||||
tcx: QueryCtxt<'tcx>,
|
||||
mut error: DiagnosticBuilder<'_>,
|
||||
@ -421,7 +331,7 @@ macro_rules! define_queries {
|
||||
}
|
||||
})*
|
||||
|
||||
#[allow(non_upper_case_globals)]
|
||||
#[allow(nonstandard_style)]
|
||||
pub mod query_callbacks {
|
||||
use super::*;
|
||||
use rustc_middle::dep_graph::DepNode;
|
||||
@ -431,68 +341,101 @@ macro_rules! define_queries {
|
||||
use rustc_query_system::dep_graph::FingerprintStyle;
|
||||
|
||||
// We use this for most things when incr. comp. is turned off.
|
||||
pub const Null: QueryStruct = QueryStruct {
|
||||
force_from_dep_node: |_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node),
|
||||
try_load_from_on_disk_cache: |_, _| {},
|
||||
};
|
||||
pub fn Null() -> DepKindStruct {
|
||||
DepKindStruct {
|
||||
is_anon: false,
|
||||
is_eval_always: false,
|
||||
fingerprint_style: FingerprintStyle::Unit,
|
||||
force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
|
||||
try_load_from_on_disk_cache: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub const TraitSelect: QueryStruct = QueryStruct {
|
||||
force_from_dep_node: |_, _| false,
|
||||
try_load_from_on_disk_cache: |_, _| {},
|
||||
};
|
||||
pub fn TraitSelect() -> DepKindStruct {
|
||||
DepKindStruct {
|
||||
is_anon: true,
|
||||
is_eval_always: false,
|
||||
fingerprint_style: FingerprintStyle::Unit,
|
||||
force_from_dep_node: None,
|
||||
try_load_from_on_disk_cache: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub const CompileCodegenUnit: QueryStruct = QueryStruct {
|
||||
force_from_dep_node: |_, _| false,
|
||||
try_load_from_on_disk_cache: |_, _| {},
|
||||
};
|
||||
pub fn CompileCodegenUnit() -> DepKindStruct {
|
||||
DepKindStruct {
|
||||
is_anon: false,
|
||||
is_eval_always: false,
|
||||
fingerprint_style: FingerprintStyle::Opaque,
|
||||
force_from_dep_node: None,
|
||||
try_load_from_on_disk_cache: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub const CompileMonoItem: QueryStruct = QueryStruct {
|
||||
force_from_dep_node: |_, _| false,
|
||||
try_load_from_on_disk_cache: |_, _| {},
|
||||
};
|
||||
pub fn CompileMonoItem() -> DepKindStruct {
|
||||
DepKindStruct {
|
||||
is_anon: false,
|
||||
is_eval_always: false,
|
||||
fingerprint_style: FingerprintStyle::Opaque,
|
||||
force_from_dep_node: None,
|
||||
try_load_from_on_disk_cache: None,
|
||||
}
|
||||
}
|
||||
|
||||
$(pub const $name: QueryStruct = {
|
||||
const is_anon: bool = is_anon!([$($modifiers)*]);
|
||||
$(pub fn $name()-> DepKindStruct {
|
||||
let is_anon = is_anon!([$($modifiers)*]);
|
||||
let is_eval_always = is_eval_always!([$($modifiers)*]);
|
||||
|
||||
let fingerprint_style =
|
||||
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::fingerprint_style();
|
||||
|
||||
if is_anon || !fingerprint_style.reconstructible() {
|
||||
return DepKindStruct {
|
||||
is_anon,
|
||||
is_eval_always,
|
||||
fingerprint_style,
|
||||
force_from_dep_node: None,
|
||||
try_load_from_on_disk_cache: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn fingerprint_style() -> FingerprintStyle {
|
||||
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>
|
||||
::fingerprint_style()
|
||||
fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> Option<query_keys::$name<'tcx>> {
|
||||
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, &dep_node)
|
||||
}
|
||||
|
||||
fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<query_keys::$name<'tcx>> {
|
||||
<query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, dep_node)
|
||||
}
|
||||
|
||||
fn force_from_dep_node(tcx: QueryCtxt<'_>, dep_node: &DepNode) -> bool {
|
||||
force_query::<queries::$name<'_>, _>(tcx, dep_node)
|
||||
}
|
||||
|
||||
fn try_load_from_on_disk_cache(tcx: QueryCtxt<'_>, dep_node: &DepNode) {
|
||||
if is_anon {
|
||||
return
|
||||
fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool {
|
||||
if let Some(key) = recover(tcx, dep_node) {
|
||||
let tcx = QueryCtxt::from_tcx(tcx);
|
||||
force_query::<queries::$name<'_>, _>(tcx, key, dep_node);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
if !fingerprint_style().reconstructible() {
|
||||
return
|
||||
}
|
||||
fn try_load_from_on_disk_cache(tcx: TyCtxt<'_>, dep_node: DepNode) {
|
||||
debug_assert!(tcx.dep_graph.is_green(&dep_node));
|
||||
|
||||
debug_assert!(tcx.dep_graph.is_green(dep_node));
|
||||
|
||||
let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
|
||||
let key = recover(tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
|
||||
let tcx = QueryCtxt::from_tcx(tcx);
|
||||
if queries::$name::cache_on_disk(tcx, &key, None) {
|
||||
let _ = tcx.$name(key);
|
||||
}
|
||||
}
|
||||
|
||||
QueryStruct {
|
||||
force_from_dep_node,
|
||||
try_load_from_on_disk_cache,
|
||||
DepKindStruct {
|
||||
is_anon,
|
||||
is_eval_always,
|
||||
fingerprint_style,
|
||||
force_from_dep_node: Some(force_from_dep_node),
|
||||
try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache),
|
||||
}
|
||||
};)*
|
||||
})*
|
||||
}
|
||||
|
||||
static QUERY_CALLBACKS: &[QueryStruct] = &make_dep_kind_array!(query_callbacks);
|
||||
pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct] {
|
||||
arena.alloc_from_iter(make_dep_kind_array!(query_callbacks))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,8 +60,11 @@ impl<K: DepKind> DepNode<K> {
|
||||
/// Creates a new, parameterless DepNode. This method will assert
|
||||
/// that the DepNode corresponding to the given DepKind actually
|
||||
/// does not require any parameters.
|
||||
pub fn new_no_params(kind: K) -> DepNode<K> {
|
||||
debug_assert!(!kind.has_params());
|
||||
pub fn new_no_params<Ctxt>(tcx: Ctxt, kind: K) -> DepNode<K>
|
||||
where
|
||||
Ctxt: super::DepContext<DepKind = K>,
|
||||
{
|
||||
debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
|
||||
DepNode { kind, hash: Fingerprint::ZERO.into() }
|
||||
}
|
||||
|
||||
@ -75,7 +78,7 @@ impl<K: DepKind> DepNode<K> {
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
if !kind.fingerprint_style().reconstructible()
|
||||
if !tcx.fingerprint_style(kind).reconstructible()
|
||||
&& (tcx.sess().opts.debugging_opts.incremental_info
|
||||
|| tcx.sess().opts.debugging_opts.query_dep_graph)
|
||||
{
|
||||
@ -121,11 +124,12 @@ impl<Ctxt: DepContext, T> DepNodeParams<Ctxt> for T
|
||||
where
|
||||
T: for<'a> HashStable<StableHashingContext<'a>> + fmt::Debug,
|
||||
{
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
default fn fingerprint_style() -> FingerprintStyle {
|
||||
FingerprintStyle::Opaque
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint {
|
||||
let mut hcx = tcx.create_stable_hashing_context();
|
||||
let mut hasher = StableHasher::new();
|
||||
@ -135,10 +139,12 @@ where
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
default fn to_debug_str(&self, _: Ctxt) -> String {
|
||||
format!("{:?}", *self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
default fn recover(_: Ctxt, _: &DepNode<Ctxt::DepKind>) -> Option<Self> {
|
||||
None
|
||||
}
|
||||
|
@ -33,12 +33,6 @@ pub struct DepGraph<K: DepKind> {
|
||||
/// each task has a `DepNodeIndex` that uniquely identifies it. This unique
|
||||
/// ID is used for self-profiling.
|
||||
virtual_dep_node_index: Lrc<AtomicU32>,
|
||||
|
||||
/// The cached event id for profiling node interning. This saves us
|
||||
/// from having to look up the event id every time we intern a node
|
||||
/// which may incur too much overhead.
|
||||
/// This will be None if self-profiling is disabled.
|
||||
node_intern_event_id: Option<EventId>,
|
||||
}
|
||||
|
||||
rustc_index::newtype_index! {
|
||||
@ -96,14 +90,13 @@ struct DepGraphData<K: DepKind> {
|
||||
dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
|
||||
}
|
||||
|
||||
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
|
||||
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
|
||||
where
|
||||
R: for<'a> HashStable<StableHashingContext<'a>>,
|
||||
{
|
||||
let mut stable_hasher = StableHasher::new();
|
||||
result.hash_stable(hcx, &mut stable_hasher);
|
||||
|
||||
Some(stable_hasher.finish())
|
||||
stable_hasher.finish()
|
||||
}
|
||||
|
||||
impl<K: DepKind> DepGraph<K> {
|
||||
@ -117,8 +110,13 @@ impl<K: DepKind> DepGraph<K> {
|
||||
) -> DepGraph<K> {
|
||||
let prev_graph_node_count = prev_graph.node_count();
|
||||
|
||||
let current =
|
||||
CurrentDepGraph::new(prev_graph_node_count, encoder, record_graph, record_stats);
|
||||
let current = CurrentDepGraph::new(
|
||||
profiler,
|
||||
prev_graph_node_count,
|
||||
encoder,
|
||||
record_graph,
|
||||
record_stats,
|
||||
);
|
||||
|
||||
// Instantiate a dependy-less node only once for anonymous queries.
|
||||
let _green_node_index = current.intern_new_node(
|
||||
@ -129,10 +127,6 @@ impl<K: DepKind> DepGraph<K> {
|
||||
);
|
||||
debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
|
||||
|
||||
let node_intern_event_id = profiler
|
||||
.get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
|
||||
.map(EventId::from_label);
|
||||
|
||||
DepGraph {
|
||||
data: Some(Lrc::new(DepGraphData {
|
||||
previous_work_products: prev_work_products,
|
||||
@ -143,16 +137,11 @@ impl<K: DepKind> DepGraph<K> {
|
||||
colors: DepNodeColorMap::new(prev_graph_node_count),
|
||||
})),
|
||||
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
|
||||
node_intern_event_id,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_disabled() -> DepGraph<K> {
|
||||
DepGraph {
|
||||
data: None,
|
||||
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
|
||||
node_intern_event_id: None,
|
||||
}
|
||||
DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
|
||||
}
|
||||
|
||||
/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
|
||||
@ -215,7 +204,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
cx: Ctxt,
|
||||
arg: A,
|
||||
task: fn(Ctxt, A) -> R,
|
||||
hash_result: fn(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
|
||||
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
|
||||
) -> (R, DepNodeIndex) {
|
||||
if self.is_fully_enabled() {
|
||||
self.with_task_impl(key, cx, arg, task, hash_result)
|
||||
@ -234,7 +223,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
cx: Ctxt,
|
||||
arg: A,
|
||||
task: fn(Ctxt, A) -> R,
|
||||
hash_result: fn(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
|
||||
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
|
||||
) -> (R, DepNodeIndex) {
|
||||
// This function is only called when the graph is enabled.
|
||||
let data = self.data.as_ref().unwrap();
|
||||
@ -253,7 +242,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
key
|
||||
);
|
||||
|
||||
let task_deps = if key.kind.is_eval_always() {
|
||||
let task_deps = if cx.dep_context().is_eval_always(key.kind) {
|
||||
None
|
||||
} else {
|
||||
Some(Lock::new(TaskDeps {
|
||||
@ -268,15 +257,14 @@ impl<K: DepKind> DepGraph<K> {
|
||||
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
|
||||
|
||||
let dcx = cx.dep_context();
|
||||
let mut hcx = dcx.create_stable_hashing_context();
|
||||
let hashing_timer = dcx.profiler().incr_result_hashing();
|
||||
let current_fingerprint = hash_result(&mut hcx, &result);
|
||||
let current_fingerprint = hash_result.map(|f| {
|
||||
let mut hcx = dcx.create_stable_hashing_context();
|
||||
f(&mut hcx, &result)
|
||||
});
|
||||
|
||||
let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
|
||||
|
||||
// Get timer for profiling `DepNode` interning
|
||||
let node_intern_timer =
|
||||
self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
|
||||
// Intern the new `DepNode`.
|
||||
let (dep_node_index, prev_and_color) = data.current.intern_node(
|
||||
dcx.profiler(),
|
||||
@ -286,7 +274,6 @@ impl<K: DepKind> DepGraph<K> {
|
||||
current_fingerprint,
|
||||
print_status,
|
||||
);
|
||||
drop(node_intern_timer);
|
||||
|
||||
hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
@ -315,7 +302,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
where
|
||||
OP: FnOnce() -> R,
|
||||
{
|
||||
debug_assert!(!dep_kind.is_eval_always());
|
||||
debug_assert!(!cx.is_eval_always(dep_kind));
|
||||
|
||||
if let Some(ref data) = self.data {
|
||||
let task_deps = Lock::new(TaskDeps::default());
|
||||
@ -492,7 +479,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
tcx: Ctxt,
|
||||
dep_node: &DepNode<K>,
|
||||
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
|
||||
debug_assert!(!dep_node.kind.is_eval_always());
|
||||
debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
|
||||
|
||||
// Return None if the dep graph is disabled
|
||||
let data = self.data.as_ref()?;
|
||||
@ -552,7 +539,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
|
||||
// We don't know the state of this dependency. If it isn't
|
||||
// an eval_always node, let's try to mark it green recursively.
|
||||
if !dep_dep_node.kind.is_eval_always() {
|
||||
if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
|
||||
debug!(
|
||||
"try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
|
||||
is unknown, trying to mark it green",
|
||||
@ -575,7 +562,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
"try_mark_previous_green({:?}) --- trying to force dependency {:?}",
|
||||
dep_node, dep_dep_node
|
||||
);
|
||||
if !tcx.try_force_from_dep_node(dep_dep_node) {
|
||||
if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
|
||||
// The DepNode could not be forced.
|
||||
debug!(
|
||||
"try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
|
||||
@ -642,7 +629,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
}
|
||||
|
||||
// We never try to mark eval_always nodes as green
|
||||
debug_assert!(!dep_node.kind.is_eval_always());
|
||||
debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
|
||||
|
||||
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
|
||||
|
||||
@ -740,8 +727,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
//
|
||||
// This method will only load queries that will end up in the disk cache.
|
||||
// Other queries will not be executed.
|
||||
pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
|
||||
let tcx = qcx.dep_context();
|
||||
pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
|
||||
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
|
||||
|
||||
let data = self.data.as_ref().unwrap();
|
||||
@ -749,7 +735,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
match data.colors.get(prev_index) {
|
||||
Some(DepNodeColor::Green(_)) => {
|
||||
let dep_node = data.previous.index_to_node(prev_index);
|
||||
qcx.try_load_from_on_disk_cache(&dep_node);
|
||||
tcx.try_load_from_on_disk_cache(dep_node);
|
||||
}
|
||||
None | Some(DepNodeColor::Red) => {
|
||||
// We can skip red nodes because a node can only be marked
|
||||
@ -876,10 +862,17 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
|
||||
/// debugging and only active with `debug_assertions`.
|
||||
total_read_count: AtomicU64,
|
||||
total_duplicate_read_count: AtomicU64,
|
||||
|
||||
/// The cached event id for profiling node interning. This saves us
|
||||
/// from having to look up the event id every time we intern a node
|
||||
/// which may incur too much overhead.
|
||||
/// This will be None if self-profiling is disabled.
|
||||
node_intern_event_id: Option<EventId>,
|
||||
}
|
||||
|
||||
impl<K: DepKind> CurrentDepGraph<K> {
|
||||
fn new(
|
||||
profiler: &SelfProfilerRef,
|
||||
prev_graph_node_count: usize,
|
||||
encoder: FileEncoder,
|
||||
record_graph: bool,
|
||||
@ -908,6 +901,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
||||
|
||||
let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
|
||||
|
||||
let node_intern_event_id = profiler
|
||||
.get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
|
||||
.map(EventId::from_label);
|
||||
|
||||
CurrentDepGraph {
|
||||
encoder: Steal::new(GraphEncoder::new(
|
||||
encoder,
|
||||
@ -927,6 +924,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
||||
forbidden_edge,
|
||||
total_read_count: AtomicU64::new(0),
|
||||
total_duplicate_read_count: AtomicU64::new(0),
|
||||
node_intern_event_id,
|
||||
}
|
||||
}
|
||||
|
||||
@ -970,6 +968,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
||||
) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
|
||||
let print_status = cfg!(debug_assertions) && print_status;
|
||||
|
||||
// Get timer for profiling `DepNode` interning
|
||||
let _node_intern_timer =
|
||||
self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
|
||||
|
||||
if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
|
||||
// Determine the color and index of the new `DepNode`.
|
||||
if let Some(fingerprint) = fingerprint {
|
||||
|
@ -32,6 +32,17 @@ pub trait DepContext: Copy {
|
||||
|
||||
/// Access the compiler session.
|
||||
fn sess(&self) -> &Session;
|
||||
|
||||
/// Return whether this kind always require evaluation.
|
||||
fn is_eval_always(&self, kind: Self::DepKind) -> bool;
|
||||
|
||||
fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle;
|
||||
|
||||
/// Try to force a dep node to execute and see if it's green.
|
||||
fn try_force_from_dep_node(&self, dep_node: DepNode<Self::DepKind>) -> bool;
|
||||
|
||||
/// Load data from the on-disk cache.
|
||||
fn try_load_from_on_disk_cache(&self, dep_node: DepNode<Self::DepKind>);
|
||||
}
|
||||
|
||||
pub trait HasDepContext: Copy {
|
||||
@ -51,7 +62,7 @@ impl<T: DepContext> HasDepContext for T {
|
||||
}
|
||||
|
||||
/// Describes the contents of the fingerprint generated by a given query.
|
||||
#[derive(PartialEq, Eq, Copy, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum FingerprintStyle {
|
||||
/// The fingerprint is actually a DefPathHash.
|
||||
DefPathHash,
|
||||
@ -75,12 +86,6 @@ impl FingerprintStyle {
|
||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
|
||||
const NULL: Self;
|
||||
|
||||
/// Return whether this kind always require evaluation.
|
||||
fn is_eval_always(&self) -> bool;
|
||||
|
||||
/// Return whether this kind requires additional parameters to be executed.
|
||||
fn has_params(&self) -> bool;
|
||||
|
||||
/// Implementation of `std::fmt::Debug` for `DepNode`.
|
||||
fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
|
||||
|
||||
@ -93,6 +98,4 @@ pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder>
|
||||
fn read_deps<OP>(op: OP)
|
||||
where
|
||||
OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps<Self>>>);
|
||||
|
||||
fn fingerprint_style(&self) -> FingerprintStyle;
|
||||
}
|
||||
|
@ -24,7 +24,8 @@ pub(crate) struct QueryVtable<CTX: QueryContext, K, V> {
|
||||
pub dep_kind: CTX::DepKind,
|
||||
pub eval_always: bool,
|
||||
|
||||
pub hash_result: fn(&mut StableHashingContext<'_>, &V) -> Option<Fingerprint>,
|
||||
pub compute: fn(CTX::DepContext, K) -> V,
|
||||
pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
|
||||
pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
|
||||
pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool,
|
||||
pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option<V>,
|
||||
@ -38,12 +39,8 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
|
||||
DepNode::construct(tcx, self.dep_kind, key)
|
||||
}
|
||||
|
||||
pub(crate) fn hash_result(
|
||||
&self,
|
||||
hcx: &mut StableHashingContext<'_>,
|
||||
value: &V,
|
||||
) -> Option<Fingerprint> {
|
||||
(self.hash_result)(hcx, value)
|
||||
pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
|
||||
(self.compute)(tcx, key)
|
||||
}
|
||||
|
||||
pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool {
|
||||
@ -59,6 +56,9 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
|
||||
const ANON: bool;
|
||||
const EVAL_ALWAYS: bool;
|
||||
const DEP_KIND: CTX::DepKind;
|
||||
const HASH_RESULT: Option<
|
||||
fn(hcx: &mut StableHashingContext<'_>, result: &Self::Value) -> Fingerprint,
|
||||
>;
|
||||
|
||||
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
|
||||
|
||||
@ -75,9 +75,6 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
|
||||
// Don't use this method to compute query results, instead use the methods on TyCtxt
|
||||
fn compute_fn(tcx: CTX, key: &Self::Key) -> fn(CTX::DepContext, Self::Key) -> Self::Value;
|
||||
|
||||
fn hash_result(hcx: &mut StableHashingContext<'_>, result: &Self::Value)
|
||||
-> Option<Fingerprint>;
|
||||
|
||||
fn handle_cycle_error(tcx: CTX, diag: DiagnosticBuilder<'_>) -> Self::Value;
|
||||
}
|
||||
|
||||
@ -95,7 +92,7 @@ pub trait QueryDescription<CTX: QueryContext>: QueryAccessors<CTX> {
|
||||
}
|
||||
|
||||
pub(crate) trait QueryVtableExt<CTX: QueryContext, K, V> {
|
||||
const VTABLE: QueryVtable<CTX, K, V>;
|
||||
fn make_vtable(tcx: CTX, key: &K) -> QueryVtable<CTX, K, V>;
|
||||
}
|
||||
|
||||
impl<CTX, Q> QueryVtableExt<CTX, Q::Key, Q::Value> for Q
|
||||
@ -103,13 +100,16 @@ where
|
||||
CTX: QueryContext,
|
||||
Q: QueryDescription<CTX>,
|
||||
{
|
||||
const VTABLE: QueryVtable<CTX, Q::Key, Q::Value> = QueryVtable {
|
||||
anon: Q::ANON,
|
||||
dep_kind: Q::DEP_KIND,
|
||||
eval_always: Q::EVAL_ALWAYS,
|
||||
hash_result: Q::hash_result,
|
||||
handle_cycle_error: Q::handle_cycle_error,
|
||||
cache_on_disk: Q::cache_on_disk,
|
||||
try_load_from_disk: Q::try_load_from_disk,
|
||||
};
|
||||
fn make_vtable(tcx: CTX, key: &Q::Key) -> QueryVtable<CTX, Q::Key, Q::Value> {
|
||||
QueryVtable {
|
||||
anon: Q::ANON,
|
||||
dep_kind: Q::DEP_KIND,
|
||||
eval_always: Q::EVAL_ALWAYS,
|
||||
hash_result: Q::HASH_RESULT,
|
||||
compute: Q::compute_fn(tcx, key),
|
||||
handle_cycle_error: Q::handle_cycle_error,
|
||||
cache_on_disk: Q::cache_on_disk,
|
||||
try_load_from_disk: Q::try_load_from_disk,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ pub use self::caches::{
|
||||
mod config;
|
||||
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
|
||||
|
||||
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
|
||||
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
|
||||
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::thin_vec::ThinVec;
|
||||
@ -122,12 +122,6 @@ pub trait QueryContext: HasDepContext {
|
||||
|
||||
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
|
||||
|
||||
/// Load data from the on-disk cache.
|
||||
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
|
||||
|
||||
/// Try to force a dep node to execute and see if it's green.
|
||||
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
|
||||
|
||||
/// Load side effects associated to the node in the previous session.
|
||||
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
//! generate the actual methods on tcx which find and execute the provider,
|
||||
//! manage the caches, and so forth.
|
||||
|
||||
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
|
||||
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
|
||||
use crate::query::caches::QueryCache;
|
||||
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
|
||||
use crate::query::job::{
|
||||
@ -382,7 +382,6 @@ fn try_execute_query<CTX, C>(
|
||||
lookup: QueryLookup,
|
||||
dep_node: Option<DepNode<CTX::DepKind>>,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
compute: fn(CTX::DepContext, C::Key) -> C::Value,
|
||||
) -> (C::Stored, Option<DepNodeIndex>)
|
||||
where
|
||||
C: QueryCache,
|
||||
@ -398,7 +397,7 @@ where
|
||||
query.dep_kind,
|
||||
) {
|
||||
TryGetJob::NotYetStarted(job) => {
|
||||
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id, compute);
|
||||
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
|
||||
let result = job.complete(cache, result, dep_node_index);
|
||||
(result, Some(dep_node_index))
|
||||
}
|
||||
@ -429,7 +428,6 @@ fn execute_job<CTX, K, V>(
|
||||
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
|
||||
query: &QueryVtable<CTX, K, V>,
|
||||
job_id: QueryJobId<CTX::DepKind>,
|
||||
compute: fn(CTX::DepContext, K) -> V,
|
||||
) -> (V, DepNodeIndex)
|
||||
where
|
||||
K: Clone + DepNodeParams<CTX::DepContext>,
|
||||
@ -441,7 +439,7 @@ where
|
||||
// Fast path for when incr. comp. is off.
|
||||
if !dep_graph.is_fully_enabled() {
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
let result = tcx.start_query(job_id, None, || compute(*tcx.dep_context(), key));
|
||||
let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
|
||||
let dep_node_index = dep_graph.next_virtual_depnode_index();
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
return (result, dep_node_index);
|
||||
@ -455,7 +453,7 @@ where
|
||||
// The diagnostics for this query will be promoted to the current session during
|
||||
// `try_mark_green()`, so we can ignore them here.
|
||||
if let Some(ret) = tcx.start_query(job_id, None, || {
|
||||
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute)
|
||||
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
|
||||
}) {
|
||||
return ret;
|
||||
}
|
||||
@ -467,14 +465,14 @@ where
|
||||
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
|
||||
if query.anon {
|
||||
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
||||
compute(*tcx.dep_context(), key)
|
||||
query.compute(*tcx.dep_context(), key)
|
||||
});
|
||||
}
|
||||
|
||||
// `to_dep_node` is expensive for some `DepKind`s.
|
||||
let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
|
||||
|
||||
dep_graph.with_task(dep_node, *tcx.dep_context(), key, compute, query.hash_result)
|
||||
dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
|
||||
});
|
||||
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
@ -498,7 +496,6 @@ fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
|
||||
key: &K,
|
||||
dep_node: &DepNode<CTX::DepKind>,
|
||||
query: &QueryVtable<CTX, K, V>,
|
||||
compute: fn(CTX::DepContext, K) -> V,
|
||||
) -> Option<(V, DepNodeIndex)>
|
||||
where
|
||||
K: Clone,
|
||||
@ -520,14 +517,6 @@ where
|
||||
let result = query.try_load_from_disk(tcx, prev_dep_node_index);
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
// We always expect to find a cached result for things that
|
||||
// can be forced from `DepNode`.
|
||||
debug_assert!(
|
||||
!dep_node.kind.fingerprint_style().reconstructible() || result.is_some(),
|
||||
"missing on-disk cache entry for {:?}",
|
||||
dep_node
|
||||
);
|
||||
|
||||
if let Some(result) = result {
|
||||
// If `-Zincremental-verify-ich` is specified, re-hash results from
|
||||
// the cache and make sure that they have the expected fingerprint.
|
||||
@ -537,6 +526,14 @@ where
|
||||
|
||||
return Some((result, dep_node_index));
|
||||
}
|
||||
|
||||
// We always expect to find a cached result for things that
|
||||
// can be forced from `DepNode`.
|
||||
debug_assert!(
|
||||
!tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
|
||||
"missing on-disk cache entry for {:?}",
|
||||
dep_node
|
||||
);
|
||||
}
|
||||
|
||||
// We could not load a result from the on-disk cache, so
|
||||
@ -544,7 +541,7 @@ where
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
|
||||
// The dep-graph for this computation is already in-place.
|
||||
let result = dep_graph.with_ignore(|| compute(*tcx.dep_context(), key.clone()));
|
||||
let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
|
||||
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
@ -577,12 +574,12 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
|
||||
);
|
||||
|
||||
debug!("BEGIN verify_ich({:?})", dep_node);
|
||||
let mut hcx = tcx.create_stable_hashing_context();
|
||||
|
||||
let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
|
||||
debug!("END verify_ich({:?})", dep_node);
|
||||
|
||||
let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
|
||||
let mut hcx = tcx.create_stable_hashing_context();
|
||||
f(&mut hcx, result)
|
||||
});
|
||||
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
|
||||
debug!("END verify_ich({:?})", dep_node);
|
||||
|
||||
if Some(new_hash) != old_hash {
|
||||
let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
|
||||
@ -665,41 +662,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn force_query_impl<CTX, C>(
|
||||
tcx: CTX,
|
||||
state: &QueryState<CTX::DepKind, C::Key>,
|
||||
cache: &QueryCacheStore<C>,
|
||||
key: C::Key,
|
||||
dep_node: DepNode<CTX::DepKind>,
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
compute: fn(CTX::DepContext, C::Key) -> C::Value,
|
||||
) -> bool
|
||||
where
|
||||
C: QueryCache,
|
||||
C::Key: DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
debug_assert!(!query.anon);
|
||||
|
||||
// We may be concurrently trying both execute and force a query.
|
||||
// Ensure that only one of them runs the query.
|
||||
let cached = cache.cache.lookup(cache, &key, |_, index| {
|
||||
if unlikely!(tcx.dep_context().profiler().enabled()) {
|
||||
tcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
}
|
||||
});
|
||||
|
||||
let lookup = match cached {
|
||||
Ok(()) => return true,
|
||||
Err(lookup) => lookup,
|
||||
};
|
||||
|
||||
let _ =
|
||||
try_execute_query(tcx, state, cache, DUMMY_SP, key, lookup, Some(dep_node), query, compute);
|
||||
true
|
||||
}
|
||||
|
||||
pub enum QueryMode {
|
||||
Get,
|
||||
Ensure,
|
||||
@ -717,9 +679,9 @@ where
|
||||
Q::Key: DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
let query = &Q::VTABLE;
|
||||
let query = Q::make_vtable(tcx, &key);
|
||||
let dep_node = if let QueryMode::Ensure = mode {
|
||||
let (must_run, dep_node) = ensure_must_run(tcx, &key, query);
|
||||
let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
|
||||
if !must_run {
|
||||
return None;
|
||||
}
|
||||
@ -729,7 +691,6 @@ where
|
||||
};
|
||||
|
||||
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
|
||||
let compute = Q::compute_fn(tcx, &key);
|
||||
let (result, dep_node_index) = try_execute_query(
|
||||
tcx,
|
||||
Q::query_state(tcx),
|
||||
@ -738,8 +699,7 @@ where
|
||||
key,
|
||||
lookup,
|
||||
dep_node,
|
||||
query,
|
||||
compute,
|
||||
&query,
|
||||
);
|
||||
if let Some(dep_node_index) = dep_node_index {
|
||||
tcx.dep_context().dep_graph().read_index(dep_node_index)
|
||||
@ -747,34 +707,29 @@ where
|
||||
Some(result)
|
||||
}
|
||||
|
||||
pub fn force_query<Q, CTX>(tcx: CTX, dep_node: &DepNode<CTX::DepKind>) -> bool
|
||||
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
|
||||
where
|
||||
Q: QueryDescription<CTX>,
|
||||
Q::Key: DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
if Q::ANON {
|
||||
return false;
|
||||
}
|
||||
assert!(!Q::ANON);
|
||||
|
||||
if !<Q::Key as DepNodeParams<CTX::DepContext>>::fingerprint_style().reconstructible() {
|
||||
return false;
|
||||
}
|
||||
// We may be concurrently trying both execute and force a query.
|
||||
// Ensure that only one of them runs the query.
|
||||
let cache = Q::query_cache(tcx);
|
||||
let cached = cache.cache.lookup(cache, &key, |_, index| {
|
||||
if unlikely!(tcx.dep_context().profiler().enabled()) {
|
||||
tcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
}
|
||||
});
|
||||
|
||||
let Some(key) =
|
||||
<Q::Key as DepNodeParams<CTX::DepContext>>::recover(*tcx.dep_context(), &dep_node)
|
||||
else {
|
||||
return false;
|
||||
let lookup = match cached {
|
||||
Ok(()) => return,
|
||||
Err(lookup) => lookup,
|
||||
};
|
||||
|
||||
let compute = Q::compute_fn(tcx, &key);
|
||||
force_query_impl(
|
||||
tcx,
|
||||
Q::query_state(tcx),
|
||||
Q::query_cache(tcx),
|
||||
key,
|
||||
*dep_node,
|
||||
&Q::VTABLE,
|
||||
compute,
|
||||
)
|
||||
let query = Q::make_vtable(tcx, &key);
|
||||
let state = Q::query_state(tcx);
|
||||
try_execute_query(tcx, state, cache, DUMMY_SP, key, lookup, Some(dep_node), &query);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user