rust/compiler/rustc_query_system/src/query/plumbing.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

743 lines
25 KiB
Rust
Raw Normal View History

2019-02-08 13:53:55 +00:00
//! The implementation of the query system itself. This defines the macros that
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
2017-09-18 09:40:13 +00:00
2021-10-16 18:10:23 +00:00
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
2020-03-19 13:13:31 +00:00
use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVTable};
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
2020-03-19 13:13:31 +00:00
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
2021-05-11 18:12:52 +00:00
#[cfg(parallel_compiler)]
use rustc_data_structures::profiling::TimingGuard;
#[cfg(parallel_compiler)]
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_session::Session;
2020-11-02 19:05:10 +00:00
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
use std::ptr;
2017-09-18 09:40:13 +00:00
pub struct QueryState<K> {
#[cfg(parallel_compiler)]
active: Sharded<FxHashMap<K, QueryResult>>,
#[cfg(not(parallel_compiler))]
active: Lock<FxHashMap<K, QueryResult>>,
}
/// Indicates the state of a query for a given key in a query map.
enum QueryResult {
/// An already executing query. The query job can be used to await for its completion.
Started(QueryJob),
2020-01-31 03:00:03 +00:00
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
impl<K> QueryState<K>
where
K: Eq + Hash + Clone + Debug,
{
2020-03-19 13:13:31 +00:00
pub fn all_inactive(&self) -> bool {
#[cfg(parallel_compiler)]
{
let shards = self.active.lock_shards();
shards.iter().all(|shard| shard.is_empty())
}
#[cfg(not(parallel_compiler))]
{
self.active.lock().is_empty()
}
}
2020-03-07 10:03:49 +00:00
2020-12-26 15:36:55 +00:00
pub fn try_collect_active_jobs<CTX: Copy>(
2020-03-07 10:03:49 +00:00
&self,
2020-12-26 15:36:55 +00:00
tcx: CTX,
make_query: fn(CTX, K) -> QueryStackFrame,
jobs: &mut QueryMap,
) -> Option<()> {
#[cfg(parallel_compiler)]
{
// We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked.
let shards = self.active.try_lock_shards()?;
for shard in shards.iter() {
for (k, v) in shard.iter() {
if let QueryResult::Started(ref job) = *v {
let query = make_query(tcx, k.clone());
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
}
}
}
}
#[cfg(not(parallel_compiler))]
{
// We use try_lock here since we are called from the
// deadlock handler, and this shouldn't be locked.
// (FIXME: Is this relevant for non-parallel compilers? It doesn't
// really hurt much.)
for (k, v) in self.active.try_lock()?.iter() {
2020-03-07 10:03:49 +00:00
if let QueryResult::Started(ref job) = *v {
let query = make_query(tcx, k.clone());
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
2020-03-07 10:03:49 +00:00
}
}
}
2020-03-07 10:03:49 +00:00
Some(())
}
}
impl<K> Default for QueryState<K> {
fn default() -> QueryState<K> {
QueryState { active: Default::default() }
2017-09-18 09:40:13 +00:00
}
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
struct JobOwner<'tcx, K>
2020-03-07 09:38:44 +00:00
where
2021-05-11 18:12:52 +00:00
K: Eq + Hash + Clone,
2020-03-07 09:38:44 +00:00
{
state: &'tcx QueryState<K>,
2021-05-11 18:12:52 +00:00
key: K,
id: QueryJobId,
}
#[cold]
#[inline(never)]
fn mk_cycle<CTX, V, R>(
tcx: CTX,
2021-05-11 18:12:52 +00:00
error: CycleError,
handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
) -> R
where
CTX: QueryContext,
V: std::fmt::Debug,
R: Clone,
{
let error = report_cycle(tcx.dep_context().sess(), error);
let value = handle_cycle_error(tcx, error);
cache.store_nocache(value)
}
impl<'tcx, K> JobOwner<'tcx, K>
2020-03-07 09:38:44 +00:00
where
2021-05-11 18:12:52 +00:00
K: Eq + Hash + Clone,
2020-03-07 09:38:44 +00:00
{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
/// This function assumes that `try_get_cached` is already called and returned `lookup`.
/// If the query is executing elsewhere, this will wait for it and return the result.
/// If the query panicked, this will silently panic.
///
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
2021-02-06 13:04:20 +00:00
fn try_start<'b, CTX>(
2021-05-11 18:12:52 +00:00
tcx: &'b CTX,
state: &'b QueryState<K>,
span: Span,
2021-05-11 18:12:52 +00:00
key: K,
) -> TryGetJob<'b, K>
2020-03-07 09:38:44 +00:00
where
2020-03-24 22:48:37 +00:00
CTX: QueryContext,
2020-03-07 09:38:44 +00:00
{
#[cfg(parallel_compiler)]
let mut state_lock = state.active.get_shard_by_value(&key).lock();
#[cfg(not(parallel_compiler))]
let mut state_lock = state.active.lock();
let lock = &mut *state_lock;
2022-02-20 16:59:05 +00:00
match lock.entry(key) {
Entry::Vacant(entry) => {
let id = tcx.next_job_id();
let job = tcx.current_query_job();
let job = QueryJob::new(id, span, job);
2020-02-12 13:24:38 +00:00
let key = entry.key().clone();
entry.insert(QueryResult::Started(job));
2020-02-12 13:24:38 +00:00
let owner = JobOwner { state, id, key };
return TryGetJob::NotYetStarted(owner);
}
Entry::Occupied(mut entry) => {
match entry.get_mut() {
#[cfg(not(parallel_compiler))]
QueryResult::Started(job) => {
let id = job.id;
drop(state_lock);
// If we are single-threaded we know that we have cycle error,
// so we just return the error.
2021-05-11 18:12:52 +00:00
return TryGetJob::Cycle(id.find_cycle_in_stack(
tcx.try_collect_active_jobs().unwrap(),
&tcx.current_query_job(),
span,
));
}
#[cfg(parallel_compiler)]
QueryResult::Started(job) => {
// For parallel queries, we'll block and wait until the query running
// in another thread has completed. Record how long we wait in the
// self-profiler.
let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
// Get the latch out
let latch = job.latch();
drop(state_lock);
// With parallel queries we might just have to wait on some other
// thread.
let result = latch.wait_on(tcx.current_query_job(), span);
2021-05-11 18:12:52 +00:00
match result {
Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
Err(cycle) => TryGetJob::Cycle(cycle),
}
}
QueryResult::Poisoned => FatalError.raise(),
}
}
}
}
2018-06-13 13:44:43 +00:00
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored
2021-05-11 18:12:52 +00:00
where
C: QueryCache<Key = K>,
{
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
2020-03-07 09:38:44 +00:00
let state = self.state;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
let (job, result) = {
let job = {
#[cfg(parallel_compiler)]
let mut lock = state.active.get_shard_by_value(&key).lock();
#[cfg(not(parallel_compiler))]
let mut lock = state.active.lock();
2022-02-20 16:59:05 +00:00
match lock.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
}
};
let result = cache.complete(key, result, dep_node_index);
(job, result)
2020-01-31 03:00:03 +00:00
};
job.signal_complete();
result
}
}
impl<'tcx, K> Drop for JobOwner<'tcx, K>
2020-03-07 09:38:44 +00:00
where
2021-05-11 18:12:52 +00:00
K: Eq + Hash + Clone,
2020-03-07 09:38:44 +00:00
{
2018-12-05 17:59:48 +00:00
#[inline(never)]
#[cold]
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic.
2020-03-07 09:38:44 +00:00
let state = self.state;
2020-01-31 03:00:03 +00:00
let job = {
#[cfg(parallel_compiler)]
let mut shard = state.active.get_shard_by_value(&self.key).lock();
#[cfg(not(parallel_compiler))]
let mut shard = state.active.lock();
2022-02-20 16:59:05 +00:00
let job = match shard.remove(&self.key).unwrap() {
2020-01-31 03:00:03 +00:00
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
};
2022-02-20 16:59:05 +00:00
shard.insert(self.key.clone(), QueryResult::Poisoned);
2020-01-31 03:00:03 +00:00
job
};
// Also signal the completion of the job, so waiters
// will continue execution.
2020-01-31 03:00:03 +00:00
job.signal_complete();
}
}
#[derive(Clone)]
pub(crate) struct CycleError {
/// The query and related span that uses the cycle.
pub usage: Option<(Span, QueryStackFrame)>,
pub cycle: Vec<QueryInfo>,
2018-03-24 05:19:20 +00:00
}
/// The result of `try_start`.
enum TryGetJob<'tcx, K>
where
2021-05-11 18:12:52 +00:00
K: Eq + Hash + Clone,
{
2018-06-13 13:44:43 +00:00
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, K>),
2018-03-24 05:19:20 +00:00
/// The query was already completed.
/// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)]
2021-05-11 18:12:52 +00:00
JobCompleted(TimingGuard<'tcx>),
2019-01-24 19:05:19 +00:00
/// Trying to execute the query resulted in a cycle.
2021-05-11 18:12:52 +00:00
Cycle(CycleError),
2017-09-18 09:40:13 +00:00
}
2020-03-26 08:40:50 +00:00
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
2021-02-16 00:00:00 +00:00
#[inline]
pub fn try_get_cached<'a, CTX, C, R, OnHit>(
2020-03-26 08:40:50 +00:00
tcx: CTX,
cache: &'a C,
2020-10-23 20:34:32 +00:00
key: &C::Key,
2020-03-26 08:40:50 +00:00
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
) -> Result<R, ()>
2020-03-26 08:40:50 +00:00
where
2020-03-24 22:46:47 +00:00
C: QueryCache,
2020-10-18 19:01:36 +00:00
CTX: DepContext,
OnHit: FnOnce(&C::Stored) -> R,
2020-03-26 08:40:50 +00:00
{
cache.lookup(&key, |value, index| {
if std::intrinsics::unlikely(tcx.profiler().enabled()) {
2020-10-23 20:34:32 +00:00
tcx.profiler().query_cache_hit(index.into());
}
tcx.dep_graph().read_index(index);
on_hit(value)
2020-10-23 20:34:32 +00:00
})
2020-03-26 08:40:50 +00:00
}
2020-03-28 12:12:20 +00:00
fn try_execute_query<CTX, C>(
2020-03-26 08:40:50 +00:00
tcx: CTX,
state: &QueryState<C::Key>,
cache: &C,
2020-03-26 08:40:50 +00:00
span: Span,
2020-03-28 12:12:20 +00:00
key: C::Key,
dep_node: Option<DepNode<CTX::DepKind>>,
query: &QueryVTable<CTX, C::Key, C::Value>,
) -> (C::Stored, Option<DepNodeIndex>)
2020-03-26 08:40:50 +00:00
where
2020-03-28 12:12:20 +00:00
C: QueryCache,
C::Key: Clone + DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
2020-03-26 08:40:50 +00:00
{
match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
2021-05-11 18:24:34 +00:00
TryGetJob::NotYetStarted(job) => {
2021-07-11 18:08:17 +00:00
let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
2021-05-11 18:24:34 +00:00
let result = job.complete(cache, result, dep_node_index);
(result, Some(dep_node_index))
}
2021-05-11 18:12:52 +00:00
TryGetJob::Cycle(error) => {
let result = mk_cycle(tcx, error, query.handle_cycle_error, cache);
2021-05-11 18:24:34 +00:00
(result, None)
2021-05-11 18:12:52 +00:00
}
2020-03-26 08:40:50 +00:00
#[cfg(parallel_compiler)]
2021-05-11 18:12:52 +00:00
TryGetJob::JobCompleted(query_blocked_prof_timer) => {
let (v, index) = cache
.lookup(&key, |value, index| (value.clone(), index))
2021-05-11 18:12:52 +00:00
.unwrap_or_else(|_| panic!("value must be in cache after waiting"));
if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
2021-05-11 18:12:52 +00:00
tcx.dep_context().profiler().query_cache_hit(index.into());
}
query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
2021-05-11 18:24:34 +00:00
(v, Some(index))
}
2021-05-11 18:24:34 +00:00
}
}
2021-05-11 18:24:34 +00:00
fn execute_job<CTX, K, V>(
tcx: CTX,
key: K,
2020-12-30 21:08:57 +00:00
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
query: &QueryVTable<CTX, K, V>,
job_id: QueryJobId,
2021-05-11 18:24:34 +00:00
) -> (V, DepNodeIndex)
where
K: Clone + DepNodeParams<CTX::DepContext>,
V: Debug,
CTX: QueryContext,
{
2021-05-12 06:49:49 +00:00
let dep_graph = tcx.dep_context().dep_graph();
// Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() {
let prof_timer = tcx.dep_context().profiler().query_provider();
2022-08-24 01:42:12 +00:00
let result = tcx.start_query(job_id, query.depth_limit, None, || {
query.compute(*tcx.dep_context(), key)
});
2021-05-12 06:49:49 +00:00
let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
2021-05-11 18:24:34 +00:00
return (result, dep_node_index);
2020-03-26 08:40:50 +00:00
}
2020-12-30 21:08:57 +00:00
if !query.anon && !query.eval_always {
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
2020-03-28 12:12:20 +00:00
2020-12-30 21:08:57 +00:00
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
2022-08-24 01:42:12 +00:00
if let Some(ret) = tcx.start_query(job_id, false, None, || {
2021-07-11 18:08:17 +00:00
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
2020-12-30 21:08:57 +00:00
}) {
return ret;
}
}
2020-03-28 12:12:20 +00:00
2020-12-30 21:08:57 +00:00
let prof_timer = tcx.dep_context().profiler().query_provider();
let diagnostics = Lock::new(ThinVec::new());
2020-03-28 12:12:20 +00:00
2022-08-24 01:42:12 +00:00
let (result, dep_node_index) =
tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
if query.anon {
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
query.compute(*tcx.dep_context(), key)
});
}
2020-03-28 12:12:20 +00:00
2022-08-24 01:42:12 +00:00
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
2022-08-24 01:42:12 +00:00
dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
});
2020-12-30 21:08:57 +00:00
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
2020-03-26 08:40:50 +00:00
2020-12-30 21:08:57 +00:00
let diagnostics = diagnostics.into_inner();
let side_effects = QuerySideEffects { diagnostics };
2020-03-26 08:40:50 +00:00
if std::intrinsics::unlikely(!side_effects.is_empty()) {
2020-12-30 21:08:57 +00:00
if query.anon {
tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
} else {
2020-12-30 21:08:57 +00:00
tcx.store_side_effects(dep_node_index, side_effects);
}
}
2020-12-30 21:08:57 +00:00
(result, dep_node_index)
2020-03-26 08:40:50 +00:00
}
fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
2020-03-26 08:40:50 +00:00
tcx: CTX,
2020-11-02 21:36:47 +00:00
key: &K,
2020-03-26 08:40:50 +00:00
dep_node: &DepNode<CTX::DepKind>,
query: &QueryVTable<CTX, K, V>,
) -> Option<(V, DepNodeIndex)>
2020-03-26 08:40:50 +00:00
where
2020-11-02 21:36:47 +00:00
K: Clone,
2020-03-26 08:40:50 +00:00
CTX: QueryContext,
V: Debug,
2020-03-26 08:40:50 +00:00
{
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
2018-12-24 12:35:37 +00:00
2021-05-12 09:21:12 +00:00
let dep_graph = tcx.dep_context().dep_graph();
let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
2021-05-12 09:21:12 +00:00
debug_assert!(dep_graph.is_green(dep_node));
2018-12-24 12:35:37 +00:00
2020-03-26 08:40:50 +00:00
// First we try to load the result from the on-disk cache.
2021-05-12 06:50:03 +00:00
// Some things are never cached on disk.
2021-10-17 15:37:20 +00:00
if query.cache_on_disk {
2020-10-18 19:01:36 +00:00
let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
2021-12-21 02:46:55 +00:00
// The call to `with_query_deserialization` enforces that no new `DepNodes`
// are created during deserialization. See the docs of that method for more
// details.
let result = dep_graph
.with_query_deserialization(|| query.try_load_from_disk(tcx, prev_dep_node_index));
2020-03-26 08:40:50 +00:00
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
2021-05-12 06:50:03 +00:00
if let Some(result) = result {
if std::intrinsics::unlikely(
tcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
) {
dep_graph.mark_debug_loaded_from_disk(*dep_node)
}
let prev_fingerprint = tcx
.dep_context()
.dep_graph()
.prev_fingerprint_of(dep_node)
.unwrap_or(Fingerprint::ZERO);
2021-05-12 06:50:03 +00:00
// If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.
//
// If not, we still seek to verify a subset of fingerprints loaded
// from disk. Re-hashing results is fairly expensive, so we can't
// currently afford to verify every hash. This subset should still
// give us some coverage of potential bugs though.
let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
if std::intrinsics::unlikely(
try_verify || tcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
) {
2021-05-12 06:50:03 +00:00
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
}
2021-05-12 06:50:03 +00:00
return Some((result, dep_node_index));
}
2021-10-16 18:10:23 +00:00
// We always expect to find a cached result for things that
// can be forced from `DepNode`.
debug_assert!(
!tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
"missing on-disk cache entry for {:?}",
dep_node
);
2021-05-12 06:50:03 +00:00
}
2021-05-12 06:50:03 +00:00
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = tcx.dep_context().profiler().query_provider();
2020-03-26 08:40:50 +00:00
2021-05-12 06:50:03 +00:00
// The dep-graph for this computation is already in-place.
2021-07-11 18:08:17 +00:00
let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
2020-03-26 08:40:50 +00:00
2021-05-12 06:50:03 +00:00
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
2021-05-12 06:50:03 +00:00
// Verify that re-running the query produced a result with the expected hash
// This catches bugs in query implementations, turning them into ICEs.
// For example, a query might sort its result by `DefId` - since `DefId`s are
// not stable across compilation sessions, the result could get up getting sorted
// in a different order when the query is re-run, even though all of the inputs
// (e.g. `DefPathHash` values) were green.
//
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
Some((result, dep_node_index))
2020-03-26 08:40:50 +00:00
}
fn incremental_verify_ich<CTX, K, V: Debug>(
2020-10-18 19:01:36 +00:00
tcx: CTX::DepContext,
result: &V,
2020-03-26 08:40:50 +00:00
dep_node: &DepNode<CTX::DepKind>,
query: &QueryVTable<CTX, K, V>,
2020-03-26 08:40:50 +00:00
) where
CTX: QueryContext,
{
assert!(
2021-03-02 21:38:49 +00:00
tcx.dep_graph().is_green(dep_node),
2020-03-26 08:40:50 +00:00
"fingerprint for green query instance not loaded from cache: {:?}",
dep_node,
);
2020-03-26 08:40:50 +00:00
debug!("BEGIN verify_ich({:?})", dep_node);
2021-10-16 20:31:48 +00:00
let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
2021-10-16 20:31:48 +00:00
});
2021-03-02 21:38:49 +00:00
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
2021-10-16 20:31:48 +00:00
debug!("END verify_ich({:?})", dep_node);
if Some(new_hash) != old_hash {
incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result));
}
}
// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
// currently not exposed publicly.
//
// The PR which added this attempted to use `&dyn Debug` instead, but that
// showed statistically significant worse compiler performance. It's not
// actually clear what the cause there was -- the code should be cold. If this
// can be replaced with `&dyn Debug` with on perf impact, then it probably
// should be.
extern "C" {
type Opaque;
}
struct DebugArg<'a> {
value: &'a Opaque,
fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
}
impl<'a, T> From<&'a T> for DebugArg<'a>
where
T: std::fmt::Debug,
{
fn from(value: &'a T) -> DebugArg<'a> {
DebugArg {
value: unsafe { std::mem::transmute(value) },
fmt: unsafe {
std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
},
}
}
}
impl std::fmt::Debug for DebugArg<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
(self.fmt)(self.value, f)
}
}
// Note that this is marked #[cold] and intentionally takes the equivalent of
// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
// different implementations for LLVM to chew on (and filling up the final
// binary, too).
#[cold]
fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
format!("`cargo clean -p {}` or `cargo clean`", crate_name)
} else {
"`cargo clean`".to_string()
};
// When we emit an error message and panic, we try to debug-print the `DepNode`
// and query result. Unfortunately, this can cause us to run additional queries,
// which may result in another fingerprint mismatch while we're in the middle
// of processing this one. To avoid a double-panic (which kills the process
// before we can print out the query static), we print out a terse
// but 'safe' message if we detect a re-entrant call to this method.
thread_local! {
static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
};
let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
if old_in_panic {
sess.emit_err(crate::error::Reentrant);
} else {
sess.emit_err(crate::error::IncrementCompilation {
run_cmd,
dep_node: format!("{:?}", dep_node),
});
panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
}
INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
2020-03-26 08:40:50 +00:00
}
2020-03-27 06:35:32 +00:00
/// Ensure that either this query has all green inputs or been executed.
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
2020-11-18 15:53:39 +00:00
/// Returns true if the query should still run.
2020-03-27 06:35:32 +00:00
///
/// This function is particularly useful when executing passes for their
/// side-effects -- e.g., in order to report errors for erroneous programs.
///
/// Note: The optimization is only available during incr. comp.
2020-03-29 09:44:40 +00:00
#[inline(never)]
2020-12-30 21:07:42 +00:00
fn ensure_must_run<CTX, K, V>(
tcx: CTX,
key: &K,
query: &QueryVTable<CTX, K, V>,
2020-12-30 21:07:42 +00:00
) -> (bool, Option<DepNode<CTX::DepKind>>)
2020-11-18 15:53:39 +00:00
where
2020-10-18 19:01:36 +00:00
K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext,
{
2020-03-28 13:09:53 +00:00
if query.eval_always {
2020-12-30 21:07:42 +00:00
return (true, None);
}
2020-03-27 06:35:32 +00:00
// Ensuring an anonymous query makes no sense
2020-03-28 13:09:53 +00:00
assert!(!query.anon);
2021-01-18 22:53:42 +00:00
let dep_node = query.to_dep_node(*tcx.dep_context(), key);
2021-05-30 08:48:48 +00:00
let dep_graph = tcx.dep_context().dep_graph();
match dep_graph.try_mark_green(tcx, &dep_node) {
2020-03-27 06:35:32 +00:00
None => {
2021-05-30 08:48:48 +00:00
// A None return from `try_mark_green` means that this is either
2020-03-27 06:35:32 +00:00
// a new dep node or that the dep node has already been marked red.
// Either way, we can't call `dep_graph.read()` as we don't have the
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
2020-12-30 21:07:42 +00:00
(true, Some(dep_node))
2020-03-27 06:35:32 +00:00
}
Some((_, dep_node_index)) => {
2021-05-30 08:48:48 +00:00
dep_graph.read_index(dep_node_index);
2021-01-18 22:53:42 +00:00
tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
2020-12-30 21:07:42 +00:00
(false, None)
}
}
2020-03-27 06:35:32 +00:00
}
2022-05-04 08:30:13 +00:00
#[derive(Debug)]
2020-11-18 15:53:39 +00:00
pub enum QueryMode {
Get,
Ensure,
2020-03-28 13:09:53 +00:00
}
pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
2020-03-28 13:09:53 +00:00
where
Q: QueryDescription<CTX>,
2020-11-02 19:05:10 +00:00
Q::Key: DepNodeParams<CTX::DepContext>,
2020-03-28 13:09:53 +00:00
CTX: QueryContext,
{
2021-07-11 18:08:17 +00:00
let query = Q::make_vtable(tcx, &key);
2020-12-30 21:07:42 +00:00
let dep_node = if let QueryMode::Ensure = mode {
2021-07-11 18:08:17 +00:00
let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
2020-12-30 21:07:42 +00:00
if !must_run {
2020-11-18 15:53:39 +00:00
return None;
}
2020-12-30 21:07:42 +00:00
dep_node
} else {
None
};
2020-11-18 15:53:39 +00:00
2020-12-30 21:07:42 +00:00
let (result, dep_node_index) = try_execute_query(
2021-05-10 17:09:30 +00:00
tcx,
Q::query_state(tcx),
Q::query_cache(tcx),
span,
key,
2020-12-30 21:07:42 +00:00
dep_node,
2021-07-11 18:08:17 +00:00
&query,
2021-05-10 17:09:30 +00:00
);
2020-12-30 21:07:42 +00:00
if let Some(dep_node_index) = dep_node_index {
tcx.dep_context().dep_graph().read_index(dep_node_index)
}
Some(result)
2020-03-28 13:09:53 +00:00
}
2021-10-16 19:12:34 +00:00
pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
2020-03-28 13:09:53 +00:00
where
Q: QueryDescription<CTX>,
2020-11-02 19:05:10 +00:00
Q::Key: DepNodeParams<CTX::DepContext>,
2020-03-28 13:09:53 +00:00
CTX: QueryContext,
{
2021-10-16 19:12:34 +00:00
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
let cache = Q::query_cache(tcx);
let cached = cache.lookup(&key, |_, index| {
if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
2021-10-16 19:12:34 +00:00
tcx.dep_context().profiler().query_cache_hit(index.into());
}
});
2021-05-10 17:09:30 +00:00
match cached {
2021-10-16 19:12:34 +00:00
Ok(()) => return,
Err(()) => {}
}
2021-05-10 17:09:30 +00:00
2021-07-11 18:08:17 +00:00
let query = Q::make_vtable(tcx, &key);
2021-10-16 19:12:34 +00:00
let state = Q::query_state(tcx);
2021-10-17 15:37:20 +00:00
debug_assert!(!query.anon);
try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
2020-03-28 13:09:53 +00:00
}