Auto merge of #78780 - cjgillot:req, r=Mark-Simulacrum

Refactor query forcing

The control flow in those functions was very complex, with several layers of continuations.

I tried to simplify the implementation, while keeping essentially the same logic.
Now, all code paths go through `try_execute_query` for the actual query execution.
Communication with the `dep_graph` and the live caches are the only difference between query getting/ensuring/forcing.
This commit is contained in:
bors 2021-09-11 20:39:47 +00:00
commit 8c2b6ea37d
3 changed files with 282 additions and 388 deletions

View File

@ -11,6 +11,7 @@ use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use parking_lot::Mutex;
use smallvec::{smallvec, SmallVec};
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use std::sync::atomic::Ordering::Relaxed;
@ -208,82 +209,16 @@ impl<K: DepKind> DepGraph<K> {
/// `arg` parameter.
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
hash_result: fn(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
self.with_task_impl(
key,
cx,
arg,
task,
|_key| {
Some(TaskDeps {
#[cfg(debug_assertions)]
node: Some(_key),
reads: SmallVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
})
},
hash_result,
)
}
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
if let Some(ref data) = self.data {
let dcx = cx.dep_context();
let task_deps = create_task(key).map(Lock::new);
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
let mut hcx = dcx.create_stable_hashing_context();
let hashing_timer = dcx.profiler().incr_result_hashing();
let current_fingerprint = hash_result(&mut hcx, &result);
let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
// Get timer for profiling `DepNode` interning
let node_intern_timer = self
.node_intern_event_id
.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
// Intern the new `DepNode`.
let (dep_node_index, prev_and_color) = data.current.intern_node(
dcx.profiler(),
&data.previous,
key,
edges,
current_fingerprint,
print_status,
);
drop(node_intern_timer);
hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
if let Some((prev_index, color)) = prev_and_color {
debug_assert!(
data.colors.get(prev_index).is_none(),
"DepGraph::with_task() - Duplicate DepNodeColor \
insertion for {:?}",
key
);
data.colors.insert(prev_index, color);
}
(result, dep_node_index)
if self.is_fully_enabled() {
self.with_task_impl(key, cx, arg, task, hash_result)
} else {
// Incremental compilation is turned off. We just execute the task
// without tracking. We still provide a dep-node index that uniquely
@ -293,6 +228,82 @@ impl<K: DepKind> DepGraph<K> {
}
}
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
hash_result: fn(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
// This function is only called when the graph is enabled.
let data = self.data.as_ref().unwrap();
// If the following assertion triggers, it can have two reasons:
// 1. Something is wrong with DepNode creation, either here or
// in `DepGraph::try_mark_green()`.
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
assert!(
!self.dep_node_exists(&key),
"forcing query with already existing `DepNode`\n\
- query-key: {:?}\n\
- dep-node: {:?}",
arg,
key
);
let task_deps = if key.kind.is_eval_always() {
None
} else {
Some(Lock::new(TaskDeps {
#[cfg(debug_assertions)]
node: Some(key),
reads: SmallVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
}))
};
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
let dcx = cx.dep_context();
let mut hcx = dcx.create_stable_hashing_context();
let hashing_timer = dcx.profiler().incr_result_hashing();
let current_fingerprint = hash_result(&mut hcx, &result);
let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
// Get timer for profiling `DepNode` interning
let node_intern_timer =
self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
// Intern the new `DepNode`.
let (dep_node_index, prev_and_color) = data.current.intern_node(
dcx.profiler(),
&data.previous,
key,
edges,
current_fingerprint,
print_status,
);
drop(node_intern_timer);
hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
if let Some((prev_index, color)) = prev_and_color {
debug_assert!(
data.colors.get(prev_index).is_none(),
"DepGraph::with_task() - Duplicate DepNodeColor \
insertion for {:?}",
key
);
data.colors.insert(prev_index, color);
}
(result, dep_node_index)
}
/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
@ -357,19 +368,6 @@ impl<K: DepKind> DepGraph<K> {
}
}
/// Executes something within an "eval-always" task which is a task
/// that runs whenever anything changes.
pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
self.with_task_impl(key, cx, arg, task, |_| None, hash_result)
}
#[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data {
@ -484,22 +482,11 @@ impl<K: DepKind> DepGraph<K> {
None
}
/// Try to read a node index for the node dep_node.
/// Try to mark a node index for the node dep_node.
///
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
debug_assert!(self.is_green(&dep_node));
self.read_index(dep_node_index);
(prev_index, dep_node_index)
})
}
pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,

View File

@ -143,6 +143,8 @@ impl<D> QueryJobId<D>
where
D: Copy + Clone + Eq + Hash,
{
#[cold]
#[inline(never)]
pub(super) fn find_cycle_in_stack(
&self,
query_map: QueryMap<D>,

View File

@ -2,8 +2,7 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeParams};
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
use crate::query::job::{
@ -13,12 +12,12 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHasher};
#[cfg(parallel_compiler)]
use rustc_data_structures::profiling::TimingGuard;
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
use rustc_data_structures::sync::{Lock, LockGuard};
use rustc_data_structures::thin_vec::ThinVec;
#[cfg(not(parallel_compiler))]
use rustc_errors::DiagnosticBuilder;
use rustc_errors::{Diagnostic, FatalError};
use rustc_errors::{DiagnosticBuilder, FatalError};
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
use std::collections::hash_map::Entry;
@ -148,24 +147,21 @@ impl<D, K> Default for QueryState<D, K> {
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
struct JobOwner<'tcx, D, C>
struct JobOwner<'tcx, D, K>
where
D: Copy + Clone + Eq + Hash,
C: QueryCache,
K: Eq + Hash + Clone,
{
state: &'tcx QueryState<D, C::Key>,
cache: &'tcx QueryCacheStore<C>,
key: C::Key,
state: &'tcx QueryState<D, K>,
key: K,
id: QueryJobId<D>,
}
#[cold]
#[inline(never)]
#[cfg(not(parallel_compiler))]
fn mk_cycle<CTX, V, R>(
tcx: CTX,
root: QueryJobId<CTX::DepKind>,
span: Span,
error: CycleError,
handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
) -> R
@ -174,20 +170,15 @@ where
V: std::fmt::Debug,
R: Clone,
{
let error: CycleError = root.find_cycle_in_stack(
tcx.try_collect_active_jobs().unwrap(),
&tcx.current_query_job(),
span,
);
let error = report_cycle(tcx.dep_context().sess(), error);
let value = handle_cycle_error(tcx, error);
cache.store_nocache(value)
}
impl<'tcx, D, C> JobOwner<'tcx, D, C>
impl<'tcx, D, K> JobOwner<'tcx, D, K>
where
D: Copy + Clone + Eq + Hash,
C: QueryCache,
K: Eq + Hash + Clone,
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
@ -199,14 +190,13 @@ where
/// for some compile-time benchmarks.
#[inline(always)]
fn try_start<'b, CTX>(
tcx: CTX,
state: &'b QueryState<CTX::DepKind, C::Key>,
cache: &'b QueryCacheStore<C>,
tcx: &'b CTX,
state: &'b QueryState<CTX::DepKind, K>,
span: Span,
key: C::Key,
key: K,
lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> TryGetJob<'b, CTX::DepKind, C>
dep_kind: CTX::DepKind,
) -> TryGetJob<'b, CTX::DepKind, K>
where
CTX: QueryContext,
{
@ -227,26 +217,24 @@ where
let key = entry.key().clone();
entry.insert(QueryResult::Started(job));
let global_id = QueryJobId::new(id, shard, query.dep_kind);
let owner = JobOwner { state, cache, id: global_id, key };
let global_id = QueryJobId::new(id, shard, dep_kind);
let owner = JobOwner { state, id: global_id, key };
return TryGetJob::NotYetStarted(owner);
}
Entry::Occupied(mut entry) => {
match entry.get_mut() {
#[cfg(not(parallel_compiler))]
QueryResult::Started(job) => {
let id = QueryJobId::new(job.id, shard, query.dep_kind);
let id = QueryJobId::new(job.id, shard, dep_kind);
drop(state_lock);
// If we are single-threaded we know that we have cycle error,
// so we just return the error.
return TryGetJob::Cycle(mk_cycle(
tcx,
id,
return TryGetJob::Cycle(id.find_cycle_in_stack(
tcx.try_collect_active_jobs().unwrap(),
&tcx.current_query_job(),
span,
query.handle_cycle_error,
&cache.cache,
));
}
#[cfg(parallel_compiler)]
@ -258,7 +246,6 @@ where
// Get the latch out
let latch = job.latch();
let key = entry.key().clone();
drop(state_lock);
@ -266,30 +253,10 @@ where
// thread.
let result = latch.wait_on(tcx.current_query_job(), span);
if let Err(cycle) = result {
let cycle = report_cycle(tcx.dep_context().sess(), cycle);
let value = (query.handle_cycle_error)(tcx, cycle);
let value = cache.cache.store_nocache(value);
return TryGetJob::Cycle(value);
match result {
Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
Err(cycle) => TryGetJob::Cycle(cycle),
}
let cached = cache
.cache
.lookup(cache, &key, |value, index| {
if unlikely!(tcx.dep_context().profiler().enabled()) {
tcx.dep_context().profiler().query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
}
(value.clone(), index)
})
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
return TryGetJob::JobCompleted(cached);
}
QueryResult::Poisoned => FatalError.raise(),
}
@ -299,11 +266,18 @@ where
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
fn complete(self, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
fn complete<C>(
self,
cache: &QueryCacheStore<C>,
result: C::Value,
dep_node_index: DepNodeIndex,
) -> C::Stored
where
C: QueryCache<Key = K>,
{
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
let state = self.state;
let cache = self.cache;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
@ -330,19 +304,10 @@ where
}
}
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
where
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
{
let diagnostics = Lock::new(ThinVec::new());
let result = f(Some(&diagnostics));
(result, diagnostics.into_inner())
}
impl<'tcx, D, C> Drop for JobOwner<'tcx, D, C>
impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K>
where
D: Copy + Clone + Eq + Hash,
C: QueryCache,
K: Eq + Hash + Clone,
{
#[inline(never)]
#[cold]
@ -373,22 +338,22 @@ pub(crate) struct CycleError {
}
/// The result of `try_start`.
enum TryGetJob<'tcx, D, C>
enum TryGetJob<'tcx, D, K>
where
D: Copy + Clone + Eq + Hash,
C: QueryCache,
K: Eq + Hash + Clone,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, D, C>),
NotYetStarted(JobOwner<'tcx, D, K>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)]
JobCompleted((C::Stored, DepNodeIndex)),
JobCompleted(TimingGuard<'tcx>),
/// Trying to execute the query resulted in a cycle.
Cycle(C::Stored),
Cycle(CycleError),
}
/// Checks if the query is already computed and in the cache.
@ -428,119 +393,146 @@ fn try_execute_query<CTX, C>(
span: Span,
key: C::Key,
lookup: QueryLookup,
dep_node: Option<DepNode<CTX::DepKind>>,
query: &QueryVtable<CTX, C::Key, C::Value>,
compute: fn(CTX::DepContext, C::Key) -> C::Value,
) -> C::Stored
) -> (C::Stored, Option<DepNodeIndex>)
where
C: QueryCache,
C::Key: DepNodeParams<CTX::DepContext>,
C::Key: Clone + DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
tcx,
match JobOwner::<'_, CTX::DepKind, C::Key>::try_start(
&tcx,
state,
cache,
span,
key.clone(),
lookup,
query,
query.dep_kind,
) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted((v, index)) => {
tcx.dep_context().dep_graph().read_index(index);
return v;
TryGetJob::NotYetStarted(job) => {
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id, compute);
let result = job.complete(cache, result, dep_node_index);
(result, Some(dep_node_index))
}
};
TryGetJob::Cycle(error) => {
let result = mk_cycle(tcx, error, query.handle_cycle_error, &cache.cache);
(result, None)
}
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted(query_blocked_prof_timer) => {
let (v, index) = cache
.cache
.lookup(cache, &key, |value, index| (value.clone(), index))
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
if unlikely!(tcx.dep_context().profiler().enabled()) {
tcx.dep_context().profiler().query_cache_hit(index.into());
}
#[cfg(debug_assertions)]
{
cache.cache_hits.fetch_add(1, Ordering::Relaxed);
}
query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
(v, Some(index))
}
}
}
fn execute_job<CTX, K, V>(
tcx: CTX,
key: K,
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
query: &QueryVtable<CTX, K, V>,
job_id: QueryJobId<CTX::DepKind>,
compute: fn(CTX::DepContext, K) -> V,
) -> (V, DepNodeIndex)
where
K: Clone + DepNodeParams<CTX::DepContext>,
V: Debug,
CTX: QueryContext,
{
let dep_graph = tcx.dep_context().dep_graph();
// Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() {
let prof_timer = tcx.dep_context().profiler().query_provider();
let result = tcx.start_query(job.id, None, || compute(*tcx.dep_context(), key));
let result = tcx.start_query(job_id, None, || compute(*tcx.dep_context(), key));
let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
return job.complete(result, dep_node_index);
return (result, dep_node_index);
}
if query.anon {
let prof_timer = tcx.dep_context().profiler().query_provider();
if !query.anon && !query.eval_always {
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
tcx.start_query(job.id, diagnostics, || {
dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
compute(*tcx.dep_context(), key)
})
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
dep_graph.read_index(dep_node_index);
let side_effects = QuerySideEffects { diagnostics };
if unlikely!(!side_effects.is_empty()) {
tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
}
return job.complete(result, dep_node_index);
}
let dep_node = query.to_dep_node(*tcx.dep_context(), &key);
if !query.eval_always {
// The diagnostics for this query will be
// promoted to the current session during
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
let loaded = tcx.start_query(job.id, None, || {
let marked = dep_graph.try_mark_green_and_read(tcx, &dep_node);
marked.map(|(prev_dep_node_index, dep_node_index)| {
(
load_from_disk_and_cache_in_memory(
tcx,
key.clone(),
prev_dep_node_index,
dep_node_index,
&dep_node,
query,
compute,
),
dep_node_index,
)
})
});
if let Some((result, dep_node_index)) = loaded {
return job.complete(result, dep_node_index);
if let Some(ret) = tcx.start_query(job_id, None, || {
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute)
}) {
return ret;
}
}
let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query, compute);
dep_graph.read_index(dep_node_index);
result
let prof_timer = tcx.dep_context().profiler().query_provider();
let diagnostics = Lock::new(ThinVec::new());
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
if query.anon {
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
compute(*tcx.dep_context(), key)
});
}
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
dep_graph.with_task(dep_node, *tcx.dep_context(), key, compute, query.hash_result)
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
let diagnostics = diagnostics.into_inner();
let side_effects = QuerySideEffects { diagnostics };
if unlikely!(!side_effects.is_empty()) {
if query.anon {
tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
} else {
tcx.store_side_effects(dep_node_index, side_effects);
}
}
(result, dep_node_index)
}
fn load_from_disk_and_cache_in_memory<CTX, K, V: Debug>(
fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
tcx: CTX,
key: K,
prev_dep_node_index: SerializedDepNodeIndex,
dep_node_index: DepNodeIndex,
key: &K,
dep_node: &DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, K, V>,
compute: fn(CTX::DepContext, K) -> V,
) -> V
) -> Option<(V, DepNodeIndex)>
where
K: Clone,
CTX: QueryContext,
V: Debug,
{
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node));
let dep_graph = tcx.dep_context().dep_graph();
let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
debug_assert!(dep_graph.is_green(dep_node));
// First we try to load the result from the on-disk cache.
let result = if query.cache_on_disk(tcx, &key, None) {
// Some things are never cached on disk.
if query.cache_on_disk(tcx, key, None) {
let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
let result = query.try_load_from_disk(tcx, prev_dep_node_index);
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -552,43 +544,39 @@ where
"missing on-disk cache entry for {:?}",
dep_node
);
result
} else {
// Some things are never cached on disk.
None
};
if let Some(result) = result {
// If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.
if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
if let Some(result) = result {
// If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.
if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
}
return Some((result, dep_node_index));
}
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = tcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place.
let result = tcx.dep_context().dep_graph().with_ignore(|| compute(*tcx.dep_context(), key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// Verify that re-running the query produced a result with the expected hash
// This catches bugs in query implementations, turning them into ICEs.
// For example, a query might sort its result by `DefId` - since `DefId`s are
// not stable across compilation sessions, the result could get up getting sorted
// in a different order when the query is re-run, even though all of the inputs
// (e.g. `DefPathHash` values) were green.
//
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
result
}
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = tcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place.
let result = dep_graph.with_ignore(|| compute(*tcx.dep_context(), key.clone()));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// Verify that re-running the query produced a result with the expected hash
// This catches bugs in query implementations, turning them into ICEs.
// For example, a query might sort its result by `DefId` - since `DefId`s are
// not stable across compilation sessions, the result could get up getting sorted
// in a different order when the query is re-run, even though all of the inputs
// (e.g. `DefPathHash` values) were green.
//
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
Some((result, dep_node_index))
}
fn incremental_verify_ich<CTX, K, V: Debug>(
@ -648,88 +636,6 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
}
}
fn force_query_with_job<C, CTX>(
tcx: CTX,
key: C::Key,
job: JobOwner<'_, CTX::DepKind, C>,
dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>,
compute: fn(CTX::DepContext, C::Key) -> C::Value,
) -> (C::Stored, DepNodeIndex)
where
C: QueryCache,
CTX: QueryContext,
{
// If the following assertion triggers, it can have two reasons:
// 1. Something is wrong with DepNode creation, either here or
// in `DepGraph::try_mark_green()`.
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
assert!(
!tcx.dep_context().dep_graph().dep_node_exists(&dep_node),
"forcing query with already existing `DepNode`\n\
- query-key: {:?}\n\
- dep-node: {:?}",
key,
dep_node
);
let prof_timer = tcx.dep_context().profiler().query_provider();
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
tcx.start_query(job.id, diagnostics, || {
if query.eval_always {
tcx.dep_context().dep_graph().with_eval_always_task(
dep_node,
*tcx.dep_context(),
key,
compute,
query.hash_result,
)
} else {
tcx.dep_context().dep_graph().with_task(
dep_node,
*tcx.dep_context(),
key,
compute,
query.hash_result,
)
}
})
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
let side_effects = QuerySideEffects { diagnostics };
if unlikely!(!side_effects.is_empty()) && dep_node.kind != DepKind::NULL {
tcx.store_side_effects(dep_node_index, side_effects);
}
let result = job.complete(result, dep_node_index);
(result, dep_node_index)
}
#[inline(never)]
fn get_query_impl<CTX, C>(
tcx: CTX,
state: &QueryState<CTX::DepKind, C::Key>,
cache: &QueryCacheStore<C>,
span: Span,
key: C::Key,
lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>,
compute: fn(CTX::DepContext, C::Key) -> C::Value,
) -> C::Stored
where
CTX: QueryContext,
C: QueryCache,
C::Key: DepNodeParams<CTX::DepContext>,
{
try_execute_query(tcx, state, cache, span, key, lookup, query, compute)
}
/// Ensure that either this query has all green inputs or been executed.
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
/// Returns true if the query should still run.
@ -739,13 +645,17 @@ where
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
fn ensure_must_run<CTX, K, V>(
tcx: CTX,
key: &K,
query: &QueryVtable<CTX, K, V>,
) -> (bool, Option<DepNode<CTX::DepKind>>)
where
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
if query.eval_always {
return true;
return (true, None);
}
// Ensuring an anonymous query makes no sense
@ -753,19 +663,21 @@ where
let dep_node = query.to_dep_node(*tcx.dep_context(), key);
match tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node) {
let dep_graph = tcx.dep_context().dep_graph();
match dep_graph.try_mark_green(tcx, &dep_node) {
None => {
// A None return from `try_mark_green_and_read` means that this is either
// A None return from `try_mark_green` means that this is either
// a new dep node or that the dep node has already been marked red.
// Either way, we can't call `dep_graph.read()` as we don't have the
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
true
(true, Some(dep_node))
}
Some((_, dep_node_index)) => {
dep_graph.read_index(dep_node_index);
tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
false
(false, None)
}
}
}
@ -804,23 +716,8 @@ where
Err(lookup) => lookup,
};
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
tcx,
state,
cache,
DUMMY_SP,
key.clone(),
lookup,
query,
) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return true,
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted(_) => return true,
};
force_query_with_job(tcx, key, job, dep_node, query, compute);
let _ =
try_execute_query(tcx, state, cache, DUMMY_SP, key, lookup, Some(dep_node), query, compute);
true
}
@ -842,25 +739,33 @@ where
CTX: QueryContext,
{
let query = &Q::VTABLE;
if let QueryMode::Ensure = mode {
if !ensure_must_run(tcx, &key, query) {
let dep_node = if let QueryMode::Ensure = mode {
let (must_run, dep_node) = ensure_must_run(tcx, &key, query);
if !must_run {
return None;
}
}
dep_node
} else {
None
};
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
let compute = Q::compute_fn(tcx, &key);
let value = get_query_impl(
let (result, dep_node_index) = try_execute_query(
tcx,
Q::query_state(tcx),
Q::query_cache(tcx),
span,
key,
lookup,
dep_node,
query,
compute,
);
Some(value)
if let Some(dep_node_index) = dep_node_index {
tcx.dep_context().dep_graph().read_index(dep_node_index)
}
Some(result)
}
pub fn force_query<Q, CTX>(tcx: CTX, dep_node: &DepNode<CTX::DepKind>) -> bool