Invoke callbacks from rustc_middle.

This commit is contained in:
Camille GILLOT 2021-10-16 20:24:08 +02:00
parent b09de95fab
commit 602d3cbce3
9 changed files with 42 additions and 47 deletions

View File

@ -4319,7 +4319,6 @@ dependencies = [
"rustc_serialize",
"rustc_session",
"rustc_span",
"tracing",
]
[[package]]

View File

@ -99,4 +99,34 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
fn is_eval_always(&self, kind: DepKind) -> bool {
self.query_kind(kind).is_eval_always
}
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);
let cb = self.query_kind(dep_node.kind);
(cb.force_from_dep_node)(*self, dep_node)
}
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
let cb = self.query_kind(dep_node.kind);
(cb.try_load_from_on_disk_cache)(*self, dep_node)
}
}

View File

@ -9,7 +9,6 @@ doctest = false
[dependencies]
measureme = "10.0.0"
rustc-rayon-core = "0.3.1"
tracing = "0.1"
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }

View File

@ -13,8 +13,6 @@
extern crate rustc_macros;
#[macro_use]
extern crate rustc_middle;
#[macro_use]
extern crate tracing;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};

View File

@ -219,7 +219,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
// loading existing queries may cause us to create new DepNodes, which
// may in turn end up invoking `store_foreign_def_id_hash`
tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx));
tcx.dep_graph.exec_cache_promotions(tcx);
*self.serialized_data.write() = None;
}

View File

@ -3,7 +3,7 @@
//! manage the caches, and so forth.
use crate::{on_disk_cache, queries, Queries};
use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::ty::tls::{self, ImplicitCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_query_system::dep_graph::HasDepContext;
@ -53,36 +53,6 @@ impl QueryContext for QueryCtxt<'tcx> {
self.queries.try_collect_active_jobs(**self)
}
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
(cb.try_load_from_on_disk_cache)(**self, dep_node)
}
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
(cb.force_from_dep_node)(**self, dep_node)
}
// Interactions with on_disk_cache
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
self.queries

View File

@ -576,7 +576,7 @@ impl<K: DepKind> DepGraph<K> {
"try_mark_previous_green({:?}) --- trying to force dependency {:?}",
dep_node, dep_dep_node
);
if !tcx.try_force_from_dep_node(dep_dep_node) {
if !tcx.dep_context().try_force_from_dep_node(dep_dep_node) {
// The DepNode could not be forced.
debug!(
"try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
@ -741,8 +741,7 @@ impl<K: DepKind> DepGraph<K> {
//
// This method will only load queries that will end up in the disk cache.
// Other queries will not be executed.
pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
let tcx = qcx.dep_context();
pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap();
@ -750,7 +749,7 @@ impl<K: DepKind> DepGraph<K> {
match data.colors.get(prev_index) {
Some(DepNodeColor::Green(_)) => {
let dep_node = data.previous.index_to_node(prev_index);
qcx.try_load_from_on_disk_cache(&dep_node);
tcx.try_load_from_on_disk_cache(&dep_node);
}
None | Some(DepNodeColor::Red) => {
// We can skip red nodes because a node can only be marked

View File

@ -37,6 +37,12 @@ pub trait DepContext: Copy {
fn is_eval_always(&self, kind: Self::DepKind) -> bool;
fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle;
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
}
pub trait HasDepContext: Copy {

View File

@ -14,7 +14,7 @@ pub use self::caches::{
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
@ -122,12 +122,6 @@ pub trait QueryContext: HasDepContext {
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
/// Load side effects associated to the node in the previous session.
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;