mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 14:55:26 +00:00
Decouple QueryContext from DepContext.
This commit is contained in:
parent
6f04883023
commit
49c1b07a9e
@ -48,7 +48,7 @@ impl QueryContext for TyCtxt<'tcx> {
|
||||
&self,
|
||||
token: QueryJobId<Self::DepKind>,
|
||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||
compute: impl FnOnce(Self) -> R,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R {
|
||||
// The `TyCtxt` stored in TLS has the same global interner lifetime
|
||||
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
||||
@ -65,7 +65,7 @@ impl QueryContext for TyCtxt<'tcx> {
|
||||
|
||||
// Use the `ImplicitCtxt` while we execute the query.
|
||||
tls::enter_context(&new_icx, |_| {
|
||||
rustc_data_structures::stack::ensure_sufficient_stack(|| compute(*self))
|
||||
rustc_data_structures::stack::ensure_sufficient_stack(compute)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ use super::debug::EdgeFilter;
|
||||
use super::prev::PreviousDepGraph;
|
||||
use super::query::DepGraphQuery;
|
||||
use super::serialized::SerializedDepNodeIndex;
|
||||
use super::{DepContext, DepKind, DepNode, WorkProductId};
|
||||
use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DepGraph<K: DepKind> {
|
||||
@ -235,7 +235,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
/// `arg` parameter.
|
||||
///
|
||||
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
|
||||
pub fn with_task<Ctxt: DepContext<DepKind = K>, A, R>(
|
||||
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
|
||||
&self,
|
||||
key: DepNode<K>,
|
||||
cx: Ctxt,
|
||||
@ -261,7 +261,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
)
|
||||
}
|
||||
|
||||
fn with_task_impl<Ctxt: DepContext<DepKind = K>, A, R>(
|
||||
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
|
||||
&self,
|
||||
key: DepNode<K>,
|
||||
cx: Ctxt,
|
||||
@ -271,14 +271,15 @@ impl<K: DepKind> DepGraph<K> {
|
||||
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
|
||||
) -> (R, DepNodeIndex) {
|
||||
if let Some(ref data) = self.data {
|
||||
let dcx = cx.dep_context();
|
||||
let task_deps = create_task(key).map(Lock::new);
|
||||
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
|
||||
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
|
||||
|
||||
let mut hcx = cx.create_stable_hashing_context();
|
||||
let mut hcx = dcx.create_stable_hashing_context();
|
||||
let current_fingerprint = hash_result(&mut hcx, &result);
|
||||
|
||||
let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks();
|
||||
let print_status = cfg!(debug_assertions) && dcx.debug_dep_tasks();
|
||||
|
||||
// Intern the new `DepNode`.
|
||||
let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
|
||||
@ -408,7 +409,7 @@ impl<K: DepKind> DepGraph<K> {
|
||||
|
||||
/// Executes something within an "eval-always" task which is a task
|
||||
/// that runs whenever anything changes.
|
||||
pub fn with_eval_always_task<Ctxt: DepContext<DepKind = K>, A, R>(
|
||||
pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
|
||||
&self,
|
||||
key: DepNode<K>,
|
||||
cx: Ctxt,
|
||||
|
@ -63,6 +63,27 @@ pub trait DepContext: Copy {
|
||||
fn profiler(&self) -> &SelfProfilerRef;
|
||||
}
|
||||
|
||||
pub trait HasDepContext: Copy {
|
||||
type DepKind: self::DepKind;
|
||||
type StableHashingContext;
|
||||
type DepContext: self::DepContext<
|
||||
DepKind = Self::DepKind,
|
||||
StableHashingContext = Self::StableHashingContext,
|
||||
>;
|
||||
|
||||
fn dep_context(&self) -> &Self::DepContext;
|
||||
}
|
||||
|
||||
impl<T: DepContext> HasDepContext for T {
|
||||
type DepKind = T::DepKind;
|
||||
type StableHashingContext = T::StableHashingContext;
|
||||
type DepContext = Self;
|
||||
|
||||
fn dep_context(&self) -> &Self::DepContext {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Describe the different families of dependency nodes.
|
||||
pub trait DepKind: Copy + fmt::Debug + Eq + Hash {
|
||||
const NULL: Self;
|
||||
|
@ -33,9 +33,9 @@ pub(crate) struct QueryVtable<CTX: QueryContext, K, V> {
|
||||
}
|
||||
|
||||
impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
|
||||
pub(crate) fn to_dep_node(&self, tcx: CTX, key: &K) -> DepNode<CTX::DepKind>
|
||||
pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
|
||||
where
|
||||
K: crate::dep_graph::DepNodeParams<CTX>,
|
||||
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
{
|
||||
DepNode::construct(tcx, self.dep_kind, key)
|
||||
}
|
||||
|
@ -10,7 +10,8 @@ use std::num::NonZeroU32;
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
use {
|
||||
super::QueryContext,
|
||||
crate::dep_graph::DepContext,
|
||||
crate::query::QueryContext,
|
||||
parking_lot::{Condvar, Mutex},
|
||||
rustc_data_structures::fx::FxHashSet,
|
||||
rustc_data_structures::stable_hasher::{HashStable, StableHasher},
|
||||
@ -432,7 +433,7 @@ where
|
||||
{
|
||||
// Deterministically pick an entry point
|
||||
// FIXME: Sort this instead
|
||||
let mut hcx = tcx.create_stable_hashing_context();
|
||||
let mut hcx = tcx.dep_context().create_stable_hashing_context();
|
||||
queries
|
||||
.iter()
|
||||
.min_by_key(|v| {
|
||||
|
@ -14,7 +14,7 @@ pub use self::caches::{
|
||||
mod config;
|
||||
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
|
||||
|
||||
use crate::dep_graph::DepContext;
|
||||
use crate::dep_graph::HasDepContext;
|
||||
use crate::query::job::QueryMap;
|
||||
|
||||
use rustc_data_structures::stable_hasher::HashStable;
|
||||
@ -23,7 +23,7 @@ use rustc_data_structures::thin_vec::ThinVec;
|
||||
use rustc_errors::Diagnostic;
|
||||
use rustc_span::def_id::DefId;
|
||||
|
||||
pub trait QueryContext: DepContext {
|
||||
pub trait QueryContext: HasDepContext {
|
||||
type Query: Clone + HashStable<Self::StableHashingContext>;
|
||||
|
||||
fn incremental_verify_ich(&self) -> bool;
|
||||
@ -44,6 +44,6 @@ pub trait QueryContext: DepContext {
|
||||
&self,
|
||||
token: QueryJobId<Self::DepKind>,
|
||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||
compute: impl FnOnce(Self) -> R,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R;
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
//! generate the actual methods on tcx which find and execute the provider,
|
||||
//! manage the caches, and so forth.
|
||||
|
||||
use crate::dep_graph::{DepKind, DepNode};
|
||||
use crate::dep_graph::{DepContext, DepKind, DepNode};
|
||||
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
|
||||
use crate::query::caches::QueryCache;
|
||||
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
|
||||
@ -204,7 +204,7 @@ where
|
||||
// in another thread has completed. Record how long we wait in the
|
||||
// self-profiler.
|
||||
let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
|
||||
Some(tcx.profiler().query_blocked())
|
||||
Some(tcx.dep_context().profiler().query_blocked())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -266,8 +266,8 @@ where
|
||||
let cached = cache
|
||||
.cache
|
||||
.lookup(cache, &key, |value, index| {
|
||||
if unlikely!(tcx.profiler().enabled()) {
|
||||
tcx.profiler().query_cache_hit(index.into());
|
||||
if unlikely!(tcx.dep_context().profiler().enabled()) {
|
||||
tcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@ -395,7 +395,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>(
|
||||
) -> Result<R, QueryLookup>
|
||||
where
|
||||
C: QueryCache,
|
||||
CTX: QueryContext,
|
||||
CTX: DepContext,
|
||||
OnHit: FnOnce(&C::Stored) -> R,
|
||||
{
|
||||
cache.cache.lookup(cache, &key, |value, index| {
|
||||
@ -422,7 +422,7 @@ fn try_execute_query<CTX, C>(
|
||||
) -> C::Stored
|
||||
where
|
||||
C: QueryCache,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
|
||||
@ -432,46 +432,51 @@ where
|
||||
TryGetJob::Cycle(result) => return result,
|
||||
#[cfg(parallel_compiler)]
|
||||
TryGetJob::JobCompleted((v, index)) => {
|
||||
tcx.dep_graph().read_index(index);
|
||||
tcx.dep_context().dep_graph().read_index(index);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
// Fast path for when incr. comp. is off. `to_dep_node` is
|
||||
// expensive for some `DepKind`s.
|
||||
if !tcx.dep_graph().is_fully_enabled() {
|
||||
if !tcx.dep_context().dep_graph().is_fully_enabled() {
|
||||
let null_dep_node = DepNode::new_no_params(DepKind::NULL);
|
||||
return force_query_with_job(tcx, key, job, null_dep_node, query).0;
|
||||
}
|
||||
|
||||
if query.anon {
|
||||
let prof_timer = tcx.profiler().query_provider();
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
|
||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
||||
tcx.start_query(job.id, diagnostics, |tcx| {
|
||||
tcx.dep_graph().with_anon_task(query.dep_kind, || query.compute(tcx, key))
|
||||
tcx.start_query(job.id, diagnostics, || {
|
||||
tcx.dep_context()
|
||||
.dep_graph()
|
||||
.with_anon_task(query.dep_kind, || query.compute(tcx, key))
|
||||
})
|
||||
});
|
||||
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
tcx.dep_graph().read_index(dep_node_index);
|
||||
tcx.dep_context().dep_graph().read_index(dep_node_index);
|
||||
|
||||
if unlikely!(!diagnostics.is_empty()) {
|
||||
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
|
||||
tcx.dep_context().store_diagnostics_for_anon_node(dep_node_index, diagnostics);
|
||||
}
|
||||
|
||||
return job.complete(result, dep_node_index);
|
||||
}
|
||||
|
||||
let dep_node = query.to_dep_node(tcx, &key);
|
||||
let dep_node = query.to_dep_node(*tcx.dep_context(), &key);
|
||||
|
||||
if !query.eval_always {
|
||||
// The diagnostics for this query will be
|
||||
// promoted to the current session during
|
||||
// `try_mark_green()`, so we can ignore them here.
|
||||
let loaded = tcx.start_query(job.id, None, |tcx| {
|
||||
let marked = tcx.dep_graph().try_mark_green_and_read(tcx, &dep_node);
|
||||
let loaded = tcx.start_query(job.id, None, || {
|
||||
let marked = tcx
|
||||
.dep_context()
|
||||
.dep_graph()
|
||||
.try_mark_green_and_read(*tcx.dep_context(), &dep_node);
|
||||
marked.map(|(prev_dep_node_index, dep_node_index)| {
|
||||
(
|
||||
load_from_disk_and_cache_in_memory(
|
||||
@ -492,7 +497,7 @@ where
|
||||
}
|
||||
|
||||
let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query);
|
||||
tcx.dep_graph().read_index(dep_node_index);
|
||||
tcx.dep_context().dep_graph().read_index(dep_node_index);
|
||||
result
|
||||
}
|
||||
|
||||
@ -510,11 +515,11 @@ where
|
||||
// Note this function can be called concurrently from the same query
|
||||
// We must ensure that this is handled correctly.
|
||||
|
||||
debug_assert!(tcx.dep_graph().is_green(dep_node));
|
||||
debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node));
|
||||
|
||||
// First we try to load the result from the on-disk cache.
|
||||
let result = if query.cache_on_disk(tcx, &key, None) {
|
||||
let prof_timer = tcx.profiler().incr_cache_loading();
|
||||
let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
|
||||
let result = query.try_load_from_disk(tcx, prev_dep_node_index);
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
@ -536,10 +541,10 @@ where
|
||||
} else {
|
||||
// We could not load a result from the on-disk cache, so
|
||||
// recompute.
|
||||
let prof_timer = tcx.profiler().query_provider();
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
|
||||
// The dep-graph for this computation is already in-place.
|
||||
let result = tcx.dep_graph().with_ignore(|| query.compute(tcx, key));
|
||||
let result = tcx.dep_context().dep_graph().with_ignore(|| query.compute(tcx, key));
|
||||
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
@ -549,7 +554,7 @@ where
|
||||
// If `-Zincremental-verify-ich` is specified, re-hash results from
|
||||
// the cache and make sure that they have the expected fingerprint.
|
||||
if unlikely!(tcx.incremental_verify_ich()) {
|
||||
incremental_verify_ich(tcx, &result, dep_node, dep_node_index, query);
|
||||
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query);
|
||||
}
|
||||
|
||||
result
|
||||
@ -558,7 +563,7 @@ where
|
||||
#[inline(never)]
|
||||
#[cold]
|
||||
fn incremental_verify_ich<CTX, K, V: Debug>(
|
||||
tcx: CTX,
|
||||
tcx: CTX::DepContext,
|
||||
result: &V,
|
||||
dep_node: &DepNode<CTX::DepKind>,
|
||||
dep_node_index: DepNodeIndex,
|
||||
@ -601,7 +606,7 @@ where
|
||||
// 2. Two distinct query keys get mapped to the same `DepNode`
|
||||
// (see for example #48923).
|
||||
assert!(
|
||||
!tcx.dep_graph().dep_node_exists(&dep_node),
|
||||
!tcx.dep_context().dep_graph().dep_node_exists(&dep_node),
|
||||
"forcing query with already existing `DepNode`\n\
|
||||
- query-key: {:?}\n\
|
||||
- dep-node: {:?}",
|
||||
@ -609,12 +614,12 @@ where
|
||||
dep_node
|
||||
);
|
||||
|
||||
let prof_timer = tcx.profiler().query_provider();
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
|
||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
||||
tcx.start_query(job.id, diagnostics, |tcx| {
|
||||
tcx.start_query(job.id, diagnostics, || {
|
||||
if query.eval_always {
|
||||
tcx.dep_graph().with_eval_always_task(
|
||||
tcx.dep_context().dep_graph().with_eval_always_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
key,
|
||||
@ -622,7 +627,13 @@ where
|
||||
query.hash_result,
|
||||
)
|
||||
} else {
|
||||
tcx.dep_graph().with_task(dep_node, tcx, key, query.compute, query.hash_result)
|
||||
tcx.dep_context().dep_graph().with_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
key,
|
||||
query.compute,
|
||||
query.hash_result,
|
||||
)
|
||||
}
|
||||
})
|
||||
});
|
||||
@ -630,7 +641,7 @@ where
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
if unlikely!(!diagnostics.is_empty()) && dep_node.kind != DepKind::NULL {
|
||||
tcx.store_diagnostics(dep_node_index, diagnostics);
|
||||
tcx.dep_context().store_diagnostics(dep_node_index, diagnostics);
|
||||
}
|
||||
|
||||
let result = job.complete(result, dep_node_index);
|
||||
@ -651,7 +662,7 @@ fn get_query_impl<CTX, C>(
|
||||
where
|
||||
CTX: QueryContext,
|
||||
C: QueryCache,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
{
|
||||
try_execute_query(tcx, state, cache, span, key, lookup, query)
|
||||
}
|
||||
@ -665,9 +676,9 @@ where
|
||||
///
|
||||
/// Note: The optimization is only available during incr. comp.
|
||||
#[inline(never)]
|
||||
fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
|
||||
fn ensure_must_run<CTX, K, V>(tcx: CTX::DepContext, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
|
||||
where
|
||||
K: crate::dep_graph::DepNodeParams<CTX>,
|
||||
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
if query.eval_always {
|
||||
@ -707,14 +718,14 @@ fn force_query_impl<CTX, C>(
|
||||
query: &QueryVtable<CTX, C::Key, C::Value>,
|
||||
) where
|
||||
C: QueryCache,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
// We may be concurrently trying both execute and force a query.
|
||||
// Ensure that only one of them runs the query.
|
||||
let cached = cache.cache.lookup(cache, &key, |_, index| {
|
||||
if unlikely!(tcx.profiler().enabled()) {
|
||||
tcx.profiler().query_cache_hit(index.into());
|
||||
if unlikely!(tcx.dep_context().profiler().enabled()) {
|
||||
tcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@ -752,12 +763,12 @@ pub fn get_query<Q, CTX>(
|
||||
) -> Option<Q::Stored>
|
||||
where
|
||||
Q: QueryDescription<CTX>,
|
||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
let query = &Q::VTABLE;
|
||||
if let QueryMode::Ensure = mode {
|
||||
if !ensure_must_run(tcx, &key, query) {
|
||||
if !ensure_must_run(*tcx.dep_context(), &key, query) {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@ -771,7 +782,7 @@ where
|
||||
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
|
||||
where
|
||||
Q: QueryDescription<CTX>,
|
||||
Q::Key: crate::dep_graph::DepNodeParams<CTX>,
|
||||
Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
|
||||
CTX: QueryContext,
|
||||
{
|
||||
force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)
|
||||
|
Loading…
Reference in New Issue
Block a user