diff --git a/Cargo.lock b/Cargo.lock index 4ae1ab2070e..c717a8f1a9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4202,6 +4202,7 @@ dependencies = [ "rustc_index", "rustc_macros", "rustc_serialize", + "rustc_session", "rustc_span", "smallvec 1.6.1", "tracing", diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs index d862db2674e..c688b23be1d 100644 --- a/compiler/rustc_middle/src/dep_graph/mod.rs +++ b/compiler/rustc_middle/src/dep_graph/mod.rs @@ -2,6 +2,7 @@ use crate::ich::StableHashingContext; use crate::ty::{self, TyCtxt}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::Lock; +use rustc_session::Session; #[macro_use] mod dep_node; @@ -101,20 +102,18 @@ impl<'tcx> DepContext for TyCtxt<'tcx> { TyCtxt::create_stable_hashing_context(*self) } - fn debug_dep_tasks(&self) -> bool { - self.sess.opts.debugging_opts.dep_tasks - } - fn debug_dep_node(&self) -> bool { - self.sess.opts.debugging_opts.incremental_info - || self.sess.opts.debugging_opts.query_dep_graph - } - #[inline] fn dep_graph(&self) -> &DepGraph { &self.dep_graph } + #[inline(always)] fn profiler(&self) -> &SelfProfilerRef { &self.prof } + + #[inline(always)] + fn sess(&self) -> &Session { + self.sess + } } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 43dfe6892b1..e9314797fbd 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -17,19 +17,17 @@ extern crate rustc_middle; extern crate tracing; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_errors::{Diagnostic, Handler, Level}; +use rustc_errors::{DiagnosticBuilder, Handler}; use rustc_hir::def_id::CrateNum; use rustc_index::vec::IndexVec; use rustc_middle::dep_graph; use rustc_middle::ich::StableHashingContext; use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values}; use rustc_middle::ty::query::{Providers, QueryEngine}; -use rustc_middle::ty::TyCtxt; +use rustc_middle::ty::{self, TyCtxt}; use rustc_serialize::opaque; use rustc_span::{Span, DUMMY_SP}; -use std::mem; #[macro_use] mod plumbing; diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index d4093f281dd..37a176de941 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -2,22 +2,19 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use super::{queries, Query}; +use super::queries; use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex}; use rustc_middle::ty::query::on_disk_cache; use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::{self, TyCtxt}; use rustc_query_system::dep_graph::HasDepContext; -use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo}; -use rustc_query_system::query::{QueryContext, QueryDescription}; +use rustc_query_system::query::{QueryContext, QueryDescription, QueryJobId, QueryMap}; -use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; -use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder}; +use rustc_errors::Diagnostic; use rustc_serialize::opaque; use rustc_span::def_id::{DefId, LocalDefId}; -use rustc_span::Span; #[derive(Copy, Clone)] pub struct QueryCtxt<'tcx> { @@ -45,15 +42,6 @@ impl HasDepContext for QueryCtxt<'tcx> { } impl QueryContext for QueryCtxt<'tcx> { - type Query = Query<'tcx>; - - fn incremental_verify_ich(&self) -> bool { - self.sess.opts.debugging_opts.incremental_verify_ich - } - fn verbose(&self) -> bool { - self.sess.verbose() - } - fn def_path_str(&self, def_id: DefId) -> String { self.tcx.def_path_str(def_id) } @@ -62,11 +50,8 @@ impl QueryContext for QueryCtxt<'tcx> { tls::with_related_context(**self, |icx| icx.query) } - fn try_collect_active_jobs( - &self, - ) -> Option, QueryJobInfo>> - { - self.queries.try_collect_active_jobs() + fn try_collect_active_jobs(&self) -> Option> { + self.queries.try_collect_active_jobs(**self) } fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) { @@ -132,14 +117,6 @@ impl QueryContext for QueryCtxt<'tcx> { (cb.force_from_dep_node)(*self, dep_node) } - fn has_errors_or_delayed_span_bugs(&self) -> bool { - self.sess.has_errors_or_delayed_span_bugs() - } - - fn diagnostic(&self) -> &rustc_errors::Handler { - self.sess.diagnostic() - } - // Interactions with on_disk_cache fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec { self.on_disk_cache @@ -196,54 +173,6 @@ impl QueryContext for QueryCtxt<'tcx> { } impl<'tcx> QueryCtxt<'tcx> { - #[inline(never)] - #[cold] - pub(super) fn report_cycle( - self, - CycleError { usage, cycle: stack }: CycleError>, - ) -> DiagnosticBuilder<'tcx> { - assert!(!stack.is_empty()); - - let fix_span = |span: Span, query: &Query<'tcx>| { - self.sess.source_map().guess_head_span(query.default_span(*self, span)) - }; - - // Disable naming impls with types in this path, since that - // sometimes cycles itself, leading to extra cycle errors. - // (And cycle errors around impls tend to occur during the - // collect/coherence phases anyhow.) - ty::print::with_forced_impl_filename_line(|| { - let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); - let mut err = struct_span_err!( - self.sess, - span, - E0391, - "cycle detected when {}", - stack[0].query.describe(self) - ); - - for i in 1..stack.len() { - let query = &stack[i].query; - let span = fix_span(stack[(i + 1) % stack.len()].span, query); - err.span_note(span, &format!("...which requires {}...", query.describe(self))); - } - - err.note(&format!( - "...which again requires {}, completing the cycle", - stack[0].query.describe(self) - )); - - if let Some((span, query)) = usage { - err.span_note( - fix_span(span, &query), - &format!("cycle used when {}", query.describe(self)), - ); - } - - err - }) - } - pub(super) fn encode_query_results( self, encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>, @@ -323,16 +252,16 @@ pub struct QueryStruct { macro_rules! handle_cycle_error { ([][$tcx: expr, $error:expr]) => {{ - $tcx.report_cycle($error).emit(); + $error.emit(); Value::from_cycle_error($tcx) }}; ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{ - $tcx.report_cycle($error).emit(); + $error.emit(); $tcx.sess.abort_if_errors(); unreachable!() }}; ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{ - $tcx.report_cycle($error).delay_as_bug(); + $error.delay_as_bug(); Value::from_cycle_error($tcx) }}; ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => { @@ -386,55 +315,40 @@ macro_rules! define_queries { input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) } - #[allow(nonstandard_style)] - #[derive(Clone, Debug)] - pub enum Query<$tcx> { - $($(#[$attr])* $name(query_keys::$name<$tcx>)),* - } + mod make_query { + use super::*; - impl<$tcx> Query<$tcx> { - pub fn name(&self) -> &'static str { - match *self { - $(Query::$name(_) => stringify!($name),)* - } - } - - pub(crate) fn describe(&self, tcx: QueryCtxt<$tcx>) -> String { - let (r, name) = match *self { - $(Query::$name(key) => { - (queries::$name::describe(tcx, key), stringify!($name)) - })* - }; - if tcx.sess.verbose() { - format!("{} [{}]", r, name) + // Create an eponymous constructor for each query. + $(#[allow(nonstandard_style)] $(#[$attr])* + pub fn $name<$tcx>(tcx: QueryCtxt<$tcx>, key: query_keys::$name<$tcx>) -> QueryStackFrame { + let kind = dep_graph::DepKind::$name; + let name = stringify!($name); + let description = ty::print::with_forced_impl_filename_line( + // Force filename-line mode to avoid invoking `type_of` query. + || queries::$name::describe(tcx, key) + ); + let description = if tcx.sess.verbose() { + format!("{} [{}]", description, name) } else { - r - } - } + description + }; + let span = if kind == dep_graph::DepKind::def_span { + // The `def_span` query is used to calculate `default_span`, + // so exit to avoid infinite recursion. + None + } else { + Some(key.default_span(*tcx)) + }; + let hash = || { + let mut hcx = tcx.create_stable_hashing_context(); + let mut hasher = StableHasher::new(); + std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher); + key.hash_stable(&mut hcx, &mut hasher); + hasher.finish::() + }; - // FIXME(eddyb) Get more valid `Span`s on queries. - pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span { - if !span.is_dummy() { - return span; - } - // The `def_span` query is used to calculate `default_span`, - // so exit to avoid infinite recursion. - if let Query::def_span(..) = *self { - return span - } - match *self { - $(Query::$name(key) => key.default_span(tcx),)* - } - } - } - - impl<'a, $tcx> HashStable> for Query<$tcx> { - fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - $(Query::$name(key) => key.hash_stable(hcx, hasher),)* - } - } + QueryStackFrame::new(name, description, span, hash) + })* } #[allow(nonstandard_style)] @@ -461,7 +375,9 @@ macro_rules! define_queries { type Cache = query_storage::$name<$tcx>; #[inline(always)] - fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState, Self::Key> { + fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState + where QueryCtxt<$tcx>: 'a + { &tcx.queries.$name } @@ -493,7 +409,7 @@ macro_rules! define_queries { fn handle_cycle_error( tcx: QueryCtxt<'tcx>, - error: CycleError> + mut error: DiagnosticBuilder<'_>, ) -> Self::Value { handle_cycle_error!([$($modifiers)*][tcx, error]) } @@ -596,7 +512,6 @@ macro_rules! define_queries_struct { $($(#[$attr])* $name: QueryState< crate::dep_graph::DepKind, - Query<$tcx>, query_keys::$name<$tcx>, >,)* } @@ -614,14 +529,17 @@ macro_rules! define_queries_struct { } pub(crate) fn try_collect_active_jobs( - &self - ) -> Option, QueryJobInfo>>> { - let mut jobs = FxHashMap::default(); + &$tcx self, + tcx: TyCtxt<$tcx>, + ) -> Option> { + let tcx = QueryCtxt { tcx, queries: self }; + let mut jobs = QueryMap::default(); $( self.$name.try_collect_active_jobs( - as QueryAccessors>>::DEP_KIND, - Query::$name, + tcx, + dep_graph::DepKind::$name, + make_query::$name, &mut jobs, )?; )* @@ -666,38 +584,8 @@ macro_rules! define_queries_struct { handler: &Handler, num_frames: Option, ) -> usize { - let query_map = self.try_collect_active_jobs(); - - let mut current_query = query; - let mut i = 0; - - while let Some(query) = current_query { - if Some(i) == num_frames { - break; - } - let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) - { - info - } else { - break; - }; - let mut diag = Diagnostic::new( - Level::FailureNote, - &format!( - "#{} [{}] {}", - i, - query_info.info.query.name(), - query_info.info.query.describe(QueryCtxt { tcx, queries: self }) - ), - ); - diag.span = tcx.sess.source_map().guess_head_span(query_info.info.span).into(); - handler.force_print_diagnostic(diag); - - current_query = query_info.job.parent; - i += 1; - } - - i + let qcx = QueryCtxt { tcx, queries: self }; + rustc_query_system::query::print_query_stack(qcx, query, handler, num_frames) } $($(#[$attr])* diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml index d18a2a6faed..7d3357f8fa2 100644 --- a/compiler/rustc_query_system/Cargo.toml +++ b/compiler/rustc_query_system/Cargo.toml @@ -16,6 +16,7 @@ rustc_errors = { path = "../rustc_errors" } rustc_macros = { path = "../rustc_macros" } rustc_index = { path = "../rustc_index" } rustc_serialize = { path = "../rustc_serialize" } +rustc_session = { path = "../rustc_session" } rustc_span = { path = "../rustc_span" } parking_lot = "0.11" smallvec = { version = "1.6.1", features = ["union", "may_dangle"] } diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs index 1319a31b8f5..f55e2f777a2 100644 --- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs +++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs @@ -87,7 +87,10 @@ impl DepNode { #[cfg(debug_assertions)] { - if !kind.can_reconstruct_query_key() && tcx.debug_dep_node() { + if !kind.can_reconstruct_query_key() + && (tcx.sess().opts.debugging_opts.incremental_info + || tcx.sess().opts.debugging_opts.query_dep_graph) + { tcx.dep_graph().register_dep_node_debug_str(dep_node, || arg.to_debug_str(tcx)); } } diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index f579052c106..0f25572170f 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -280,7 +280,7 @@ impl DepGraph { let mut hcx = dcx.create_stable_hashing_context(); let current_fingerprint = hash_result(&mut hcx, &result); - let print_status = cfg!(debug_assertions) && dcx.debug_dep_tasks(); + let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks; // Intern the new `DepNode`. let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) { @@ -731,7 +731,7 @@ impl DepGraph { return None; } None => { - if !tcx.has_errors_or_delayed_span_bugs() { + if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() { panic!( "try_mark_previous_green() - Forcing the DepNode \ should have set its color" @@ -835,7 +835,7 @@ impl DepGraph { // Promote the previous diagnostics to the current session. tcx.store_diagnostics(dep_node_index, diagnostics.clone().into()); - let handle = tcx.diagnostic(); + let handle = tcx.dep_context().sess().diagnostic(); for diagnostic in diagnostics { handle.emit_diagnostic(&diagnostic); diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index a647381fb03..e8fb71be3e0 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -13,6 +13,7 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::Lock; +use rustc_session::Session; use std::fmt; use std::hash::Hash; @@ -24,9 +25,6 @@ pub trait DepContext: Copy { /// Create a hashing context for hashing new results. fn create_stable_hashing_context(&self) -> Self::StableHashingContext; - fn debug_dep_tasks(&self) -> bool; - fn debug_dep_node(&self) -> bool; - /// Access the DepGraph. fn dep_graph(&self) -> &DepGraph; @@ -34,6 +32,9 @@ pub trait DepContext: Copy { /// Access the profiler. fn profiler(&self) -> &SelfProfilerRef; + + /// Access the compiler session. + fn sess(&self) -> &Session; } pub trait HasDepContext: Copy { diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index 3873b47d4d4..4e2515c3ac3 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -3,10 +3,10 @@ use crate::dep_graph::DepNode; use crate::dep_graph::SerializedDepNodeIndex; use crate::query::caches::QueryCache; -use crate::query::plumbing::CycleError; use crate::query::{QueryCacheStore, QueryContext, QueryState}; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_errors::DiagnosticBuilder; use std::fmt::Debug; use std::hash::Hash; @@ -27,7 +27,7 @@ pub(crate) struct QueryVtable { pub compute: fn(CTX, K) -> V, pub hash_result: fn(&mut CTX::StableHashingContext, &V) -> Option, - pub handle_cycle_error: fn(CTX, CycleError) -> V, + pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V, pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool, pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option, } @@ -52,8 +52,8 @@ impl QueryVtable { (self.hash_result)(hcx, value) } - pub(crate) fn handle_cycle_error(&self, tcx: CTX, error: CycleError) -> V { - (self.handle_cycle_error)(tcx, error) + pub(crate) fn handle_cycle_error(&self, tcx: CTX, diag: DiagnosticBuilder<'_>) -> V { + (self.handle_cycle_error)(tcx, diag) } pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool { @@ -73,7 +73,9 @@ pub trait QueryAccessors: QueryConfig { type Cache: QueryCache; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: CTX) -> &'a QueryState; + fn query_state<'a>(tcx: CTX) -> &'a QueryState + where + CTX: 'a; // Don't use this method to access query results, instead use the methods on TyCtxt fn query_cache<'a>(tcx: CTX) -> &'a QueryCacheStore @@ -88,7 +90,7 @@ pub trait QueryAccessors: QueryConfig { result: &Self::Value, ) -> Option; - fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value; + fn handle_cycle_error(tcx: CTX, diag: DiagnosticBuilder<'_>) -> Self::Value; } pub trait QueryDescription: QueryAccessors { diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 0ecc2694a79..35a2ac865f2 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -1,6 +1,10 @@ +use crate::dep_graph::DepContext; use crate::query::plumbing::CycleError; +use crate::query::{QueryContext, QueryStackFrame}; use rustc_data_structures::fx::FxHashMap; +use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level}; +use rustc_session::Session; use rustc_span::Span; use std::convert::TryFrom; @@ -10,11 +14,9 @@ use std::num::NonZeroU32; #[cfg(parallel_compiler)] use { - crate::dep_graph::DepContext, - crate::query::QueryContext, + crate::dep_graph::DepKind, parking_lot::{Condvar, Mutex}, rustc_data_structures::fx::FxHashSet, - rustc_data_structures::stable_hasher::{HashStable, StableHasher}, rustc_data_structures::sync::Lock, rustc_data_structures::sync::Lrc, rustc_data_structures::{jobserver, OnDrop}, @@ -26,13 +28,13 @@ use { /// Represents a span and a query key. #[derive(Clone, Debug)] -pub struct QueryInfo { +pub struct QueryInfo { /// The span corresponding to the reason for which this query was required. pub span: Span, - pub query: Q, + pub query: QueryStackFrame, } -pub(crate) type QueryMap = FxHashMap, QueryJobInfo>; +pub type QueryMap = FxHashMap, QueryJobInfo>; /// A value uniquely identifying an active query job within a shard in the query cache. #[derive(Copy, Clone, Eq, PartialEq, Hash)] @@ -59,34 +61,34 @@ where QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } } - fn query(self, map: &QueryMap) -> Q { + fn query(self, map: &QueryMap) -> QueryStackFrame { map.get(&self).unwrap().info.query.clone() } #[cfg(parallel_compiler)] - fn span(self, map: &QueryMap) -> Span { + fn span(self, map: &QueryMap) -> Span { map.get(&self).unwrap().job.span } #[cfg(parallel_compiler)] - fn parent(self, map: &QueryMap) -> Option> { + fn parent(self, map: &QueryMap) -> Option> { map.get(&self).unwrap().job.parent } #[cfg(parallel_compiler)] - fn latch<'a, Q: Clone>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { + fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { map.get(&self).unwrap().job.latch.as_ref() } } -pub struct QueryJobInfo { - pub info: QueryInfo, - pub job: QueryJob, +pub struct QueryJobInfo { + pub info: QueryInfo, + pub job: QueryJob, } /// Represents an active query job. #[derive(Clone)] -pub struct QueryJob { +pub struct QueryJob { pub id: QueryShardJobId, /// The span corresponding to the reason for which this query was required. @@ -97,15 +99,14 @@ pub struct QueryJob { /// The latch that is used to wait on this job. #[cfg(parallel_compiler)] - latch: Option>, + latch: Option>, - dummy: PhantomData>, + dummy: PhantomData>, } -impl QueryJob +impl QueryJob where D: Copy + Clone + Eq + Hash, - Q: Clone, { /// Creates a new query job. pub fn new(id: QueryShardJobId, span: Span, parent: Option>) -> Self { @@ -120,7 +121,7 @@ where } #[cfg(parallel_compiler)] - pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch { + pub(super) fn latch(&mut self, _id: QueryJobId) -> QueryLatch { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -128,8 +129,8 @@ where } #[cfg(not(parallel_compiler))] - pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch { - QueryLatch { id, dummy: PhantomData } + pub(super) fn latch(&mut self, id: QueryJobId) -> QueryLatch { + QueryLatch { id } } /// Signals to waiters that the query is complete. @@ -148,23 +149,21 @@ where #[cfg(not(parallel_compiler))] #[derive(Clone)] -pub(super) struct QueryLatch { +pub(super) struct QueryLatch { id: QueryJobId, - dummy: PhantomData, } #[cfg(not(parallel_compiler))] -impl QueryLatch +impl QueryLatch where D: Copy + Clone + Eq + Hash, - Q: Clone, { pub(super) fn find_cycle_in_stack( &self, - query_map: QueryMap, + query_map: QueryMap, current_job: &Option>, span: Span, - ) -> CycleError { + ) -> CycleError { // Find the waitee amongst `current_job` parents let mut cycle = Vec::new(); let mut current_job = Option::clone(current_job); @@ -198,15 +197,15 @@ where } #[cfg(parallel_compiler)] -struct QueryWaiter { +struct QueryWaiter { query: Option>, condvar: Condvar, span: Span, - cycle: Lock>>, + cycle: Lock>, } #[cfg(parallel_compiler)] -impl QueryWaiter { +impl QueryWaiter { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); self.condvar.notify_one(); @@ -214,19 +213,19 @@ impl QueryWaiter { } #[cfg(parallel_compiler)] -struct QueryLatchInfo { +struct QueryLatchInfo { complete: bool, - waiters: Vec>>, + waiters: Vec>>, } #[cfg(parallel_compiler)] #[derive(Clone)] -pub(super) struct QueryLatch { - info: Lrc>>, +pub(super) struct QueryLatch { + info: Lrc>>, } #[cfg(parallel_compiler)] -impl QueryLatch { +impl QueryLatch { fn new() -> Self { QueryLatch { info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), @@ -235,13 +234,13 @@ impl QueryLatch { } #[cfg(parallel_compiler)] -impl QueryLatch { +impl QueryLatch { /// Awaits for the query job to complete. pub(super) fn wait_on( &self, query: Option>, span: Span, - ) -> Result<(), CycleError> { + ) -> Result<(), CycleError> { let waiter = Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); self.wait_on_inner(&waiter); @@ -256,7 +255,7 @@ impl QueryLatch { } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Lrc>) { + fn wait_on_inner(&self, waiter: &Lrc>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -290,7 +289,7 @@ impl QueryLatch { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Lrc> { + fn extract_waiter(&self, waiter: usize) -> Lrc> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -312,14 +311,13 @@ type Waiter = (QueryJobId, usize); /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_compiler)] -fn visit_waiters( - query_map: &QueryMap, +fn visit_waiters( + query_map: &QueryMap, query: QueryJobId, mut visit: F, ) -> Option>> where D: Copy + Clone + Eq + Hash, - Q: Clone, F: FnMut(Span, QueryJobId) -> Option>>, { // Visit the parent query which is a non-resumable waiter since it's on the same stack @@ -349,8 +347,8 @@ where /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. #[cfg(parallel_compiler)] -fn cycle_check( - query_map: &QueryMap, +fn cycle_check( + query_map: &QueryMap, query: QueryJobId, span: Span, stack: &mut Vec<(Span, QueryJobId)>, @@ -358,7 +356,6 @@ fn cycle_check( ) -> Option>> where D: Copy + Clone + Eq + Hash, - Q: Clone, { if !visited.insert(query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) { @@ -394,14 +391,13 @@ where /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. #[cfg(parallel_compiler)] -fn connected_to_root( - query_map: &QueryMap, +fn connected_to_root( + query_map: &QueryMap, query: QueryJobId, visited: &mut FxHashSet>, ) -> bool where D: Copy + Clone + Eq + Hash, - Q: Clone, { // We already visited this or we're deliberately ignoring it if !visited.insert(query) { @@ -421,30 +417,23 @@ where // Deterministically pick an query from a list #[cfg(parallel_compiler)] -fn pick_query<'a, CTX, T, F>( - query_map: &QueryMap, - tcx: CTX, - queries: &'a [T], - f: F, -) -> &'a T +fn pick_query<'a, D, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T where - CTX: QueryContext, - F: Fn(&T) -> (Span, QueryJobId), + D: Copy + Clone + Eq + Hash, + F: Fn(&T) -> (Span, QueryJobId), { // Deterministically pick an entry point // FIXME: Sort this instead - let mut hcx = tcx.dep_context().create_stable_hashing_context(); queries .iter() .min_by_key(|v| { let (span, query) = f(v); - let mut stable_hasher = StableHasher::new(); - query.query(query_map).hash_stable(&mut hcx, &mut stable_hasher); + let hash = query.query(query_map).hash; // Prefer entry points which have valid spans for nicer error messages // We add an integer to the tuple ensuring that entry points // with valid spans are picked first let span_cmp = if span == DUMMY_SP { 1 } else { 0 }; - (span_cmp, stable_hasher.finish::()) + (span_cmp, hash) }) .unwrap() } @@ -455,11 +444,10 @@ where /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. #[cfg(parallel_compiler)] -fn remove_cycle( - query_map: &QueryMap, - jobs: &mut Vec>, - wakelist: &mut Vec>>, - tcx: CTX, +fn remove_cycle( + query_map: &QueryMap, + jobs: &mut Vec>, + wakelist: &mut Vec>>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -509,15 +497,15 @@ fn remove_cycle( None } else { // Deterministically pick one of the waiters to show to the user - let waiter = *pick_query(query_map, tcx, &waiters, |s| *s); + let waiter = *pick_query(query_map, &waiters, |s| *s); Some((span, query, Some(waiter))) } } }) - .collect::, Option<(Span, QueryJobId)>)>>(); + .collect::, Option<(Span, QueryJobId)>)>>(); // Deterministically pick an entry point - let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1)); + let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1)); // Shift the stack so that our entry point is first let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point); @@ -574,7 +562,7 @@ pub fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { let mut found_cycle = false; while jobs.len() > 0 { - if remove_cycle(&query_map, &mut jobs, &mut wakelist, tcx) { + if remove_cycle(&query_map, &mut jobs, &mut wakelist) { found_cycle = true; } } @@ -595,3 +583,76 @@ pub fn deadlock(tcx: CTX, registry: &rayon_core::Registry) { on_panic.disable(); } + +#[inline(never)] +#[cold] +pub(crate) fn report_cycle<'a>( + sess: &'a Session, + CycleError { usage, cycle: stack }: CycleError, +) -> DiagnosticBuilder<'a> { + assert!(!stack.is_empty()); + + let fix_span = |span: Span, query: &QueryStackFrame| { + sess.source_map().guess_head_span(query.default_span(span)) + }; + + let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); + let mut err = + struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description); + + for i in 1..stack.len() { + let query = &stack[i].query; + let span = fix_span(stack[(i + 1) % stack.len()].span, query); + err.span_note(span, &format!("...which requires {}...", query.description)); + } + + err.note(&format!( + "...which again requires {}, completing the cycle", + stack[0].query.description + )); + + if let Some((span, query)) = usage { + err.span_note(fix_span(span, &query), &format!("cycle used when {}", query.description)); + } + + err +} + +pub fn print_query_stack( + tcx: CTX, + mut current_query: Option>, + handler: &Handler, + num_frames: Option, +) -> usize { + // Be careful relying on global state here: this code is called from + // a panic hook, which means that the global `Handler` may be in a weird + // state if it was responsible for triggering the panic. + let mut i = 0; + let query_map = tcx.try_collect_active_jobs(); + + while let Some(query) = current_query { + if Some(i) == num_frames { + break; + } + let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) { + info + } else { + break; + }; + let mut diag = Diagnostic::new( + Level::FailureNote, + &format!( + "#{} [{}] {}", + i, query_info.info.query.name, query_info.info.query.description + ), + ); + diag.span = + tcx.dep_context().sess().source_map().guess_head_span(query_info.info.span).into(); + handler.force_print_diagnostic(diag); + + current_query = query_info.job.parent; + i += 1; + } + + i +} diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index c935e1b9c5c..aef8a13ccef 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -4,7 +4,7 @@ pub use self::plumbing::*; mod job; #[cfg(parallel_compiler)] pub use self::job::deadlock; -pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; +pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap}; mod caches; pub use self::caches::{ @@ -15,27 +15,62 @@ mod config; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; -use crate::query::job::QueryMap; -use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; use rustc_span::def_id::DefId; +use rustc_span::Span; + +/// Description of a frame in the query stack. +/// +/// This is mostly used in case of cycles for error reporting. +#[derive(Clone, Debug)] +pub struct QueryStackFrame { + pub name: &'static str, + pub description: String, + span: Option, + /// This hash is used to deterministically pick + /// a query to remove cycles in the parallel compiler. + #[cfg(parallel_compiler)] + hash: u64, +} + +impl QueryStackFrame { + #[inline] + pub fn new( + name: &'static str, + description: String, + span: Option, + _hash: impl FnOnce() -> u64, + ) -> Self { + Self { + name, + description, + span, + #[cfg(parallel_compiler)] + hash: _hash(), + } + } + + // FIXME(eddyb) Get more valid `Span`s on queries. + #[inline] + pub fn default_span(&self, span: Span) -> Span { + if !span.is_dummy() { + return span; + } + self.span.unwrap_or(span) + } +} pub trait QueryContext: HasDepContext { - type Query: Clone + HashStable; - - fn incremental_verify_ich(&self) -> bool; - fn verbose(&self) -> bool; - /// Get string representation from DefPath. fn def_path_str(&self, def_id: DefId) -> String; /// Get the query information from the TLS context. fn current_query_job(&self) -> Option>; - fn try_collect_active_jobs(&self) -> Option>; + fn try_collect_active_jobs(&self) -> Option>; /// Load data from the on-disk cache. fn try_load_from_on_disk_cache(&self, dep_node: &DepNode); @@ -43,12 +78,6 @@ pub trait QueryContext: HasDepContext { /// Try to force a dep node to execute and see if it's green. fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool; - /// Return whether the current session is tainted by errors. - fn has_errors_or_delayed_span_bugs(&self) -> bool; - - /// Return the diagnostic handler. - fn diagnostic(&self) -> &rustc_errors::Handler; - /// Load diagnostics associated to the node in the previous session. fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index bd22ee2c18b..0050670b338 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -6,8 +6,10 @@ use crate::dep_graph::{DepContext, DepKind, DepNode}; use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use crate::query::caches::QueryCache; use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt}; -use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; -use crate::query::{QueryContext, QueryMap}; +use crate::query::job::{ + report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId, +}; +use crate::query::{QueryContext, QueryMap, QueryStackFrame}; #[cfg(not(parallel_compiler))] use rustc_data_structures::cold_path; @@ -81,37 +83,36 @@ impl QueryCacheStore { } } -struct QueryStateShard { - active: FxHashMap>, +struct QueryStateShard { + active: FxHashMap>, /// Used to generate unique ids for active jobs. jobs: u32, } -impl Default for QueryStateShard { - fn default() -> QueryStateShard { +impl Default for QueryStateShard { + fn default() -> QueryStateShard { QueryStateShard { active: Default::default(), jobs: 0 } } } -pub struct QueryState { - shards: Sharded>, +pub struct QueryState { + shards: Sharded>, } /// Indicates the state of a query for a given key in a query map. -enum QueryResult { +enum QueryResult { /// An already executing query. The query job can be used to await for its completion. - Started(QueryJob), + Started(QueryJob), /// The query panicked. Queries trying to wait on this will raise a fatal error which will /// silently panic. Poisoned, } -impl QueryState +impl QueryState where D: Copy + Clone + Eq + Hash, - Q: Clone, K: Eq + Hash + Clone + Debug, { pub fn all_inactive(&self) -> bool { @@ -119,11 +120,12 @@ where shards.iter().all(|shard| shard.active.is_empty()) } - pub fn try_collect_active_jobs( + pub fn try_collect_active_jobs( &self, + tcx: CTX, kind: D, - make_query: fn(K) -> Q, - jobs: &mut QueryMap, + make_query: fn(CTX, K) -> QueryStackFrame, + jobs: &mut QueryMap, ) -> Option<()> { // We use try_lock_shards here since we are called from the // deadlock handler, and this shouldn't be locked. @@ -133,7 +135,7 @@ where shard.active.iter().filter_map(move |(k, v)| { if let QueryResult::Started(ref job) = *v { let id = QueryJobId::new(job.id, shard_id, kind); - let info = QueryInfo { span: job.span, query: make_query(k.clone()) }; + let info = QueryInfo { span: job.span, query: make_query(tcx, k.clone()) }; Some((id, QueryJobInfo { info, job: job.clone() })) } else { None @@ -145,30 +147,28 @@ where } } -impl Default for QueryState { - fn default() -> QueryState { +impl Default for QueryState { + fn default() -> QueryState { QueryState { shards: Default::default() } } } /// A type representing the responsibility to execute the job in the `job` field. /// This will poison the relevant query if dropped. -struct JobOwner<'tcx, D, Q, C> +struct JobOwner<'tcx, D, C> where D: Copy + Clone + Eq + Hash, - Q: Clone, C: QueryCache, { - state: &'tcx QueryState, + state: &'tcx QueryState, cache: &'tcx QueryCacheStore, key: C::Key, id: QueryJobId, } -impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C> +impl<'tcx, D, C> JobOwner<'tcx, D, C> where D: Copy + Clone + Eq + Hash, - Q: Clone, C: QueryCache, { /// Either gets a `JobOwner` corresponding the query, allowing us to @@ -182,13 +182,13 @@ where #[inline(always)] fn try_start<'b, CTX>( tcx: CTX, - state: &'b QueryState, + state: &'b QueryState, cache: &'b QueryCacheStore, span: Span, key: &C::Key, lookup: QueryLookup, query: &QueryVtable, - ) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C> + ) -> TryGetJob<'b, CTX::DepKind, C> where CTX: QueryContext, { @@ -242,11 +242,12 @@ where // so we just return the error. #[cfg(not(parallel_compiler))] return TryGetJob::Cycle(cold_path(|| { - let error: CycleError = latch.find_cycle_in_stack( + let error: CycleError = latch.find_cycle_in_stack( tcx.try_collect_active_jobs().unwrap(), &tcx.current_query_job(), span, ); + let error = report_cycle(tcx.dep_context().sess(), error); let value = query.handle_cycle_error(tcx, error); cache.cache.store_nocache(value) })); @@ -258,6 +259,7 @@ where let result = latch.wait_on(tcx.current_query_job(), span); if let Err(cycle) = result { + let cycle = report_cycle(tcx.dep_context().sess(), cycle); let value = query.handle_cycle_error(tcx, cycle); let value = cache.cache.store_nocache(value); return TryGetJob::Cycle(value); @@ -327,10 +329,9 @@ where (result, diagnostics.into_inner()) } -impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C> +impl<'tcx, D, C> Drop for JobOwner<'tcx, D, C> where D: Copy + Clone + Eq + Hash, - Q: Clone, C: QueryCache, { #[inline(never)] @@ -355,21 +356,20 @@ where } #[derive(Clone)] -pub struct CycleError { +pub(crate) struct CycleError { /// The query and related span that uses the cycle. - pub usage: Option<(Span, Q)>, - pub cycle: Vec>, + pub usage: Option<(Span, QueryStackFrame)>, + pub cycle: Vec, } /// The result of `try_start`. -enum TryGetJob<'tcx, D, Q, C> +enum TryGetJob<'tcx, D, C> where D: Copy + Clone + Eq + Hash, - Q: Clone, C: QueryCache, { /// The query is not yet started. Contains a guard to the cache eventually used to start it. - NotYetStarted(JobOwner<'tcx, D, Q, C>), + NotYetStarted(JobOwner<'tcx, D, C>), /// The query was already completed. /// Returns the result of the query and its dep-node index @@ -413,7 +413,7 @@ where fn try_execute_query( tcx: CTX, - state: &QueryState, + state: &QueryState, cache: &QueryCacheStore, span: Span, key: C::Key, @@ -425,7 +425,7 @@ where C::Key: crate::dep_graph::DepNodeParams, CTX: QueryContext, { - let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start( + let job = match JobOwner::<'_, CTX::DepKind, C>::try_start( tcx, state, cache, span, &key, lookup, query, ) { TryGetJob::NotYetStarted(job) => job, @@ -550,7 +550,7 @@ where // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. - if unlikely!(tcx.incremental_verify_ich()) { + if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) { incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query); } @@ -589,7 +589,7 @@ fn incremental_verify_ich( fn force_query_with_job( tcx: CTX, key: C::Key, - job: JobOwner<'_, CTX::DepKind, CTX::Query, C>, + job: JobOwner<'_, CTX::DepKind, C>, dep_node: DepNode, query: &QueryVtable, ) -> (C::Stored, DepNodeIndex) @@ -649,7 +649,7 @@ where #[inline(never)] fn get_query_impl( tcx: CTX, - state: &QueryState, + state: &QueryState, cache: &QueryCacheStore, span: Span, key: C::Key, @@ -707,7 +707,7 @@ where #[inline(never)] fn force_query_impl( tcx: CTX, - state: &QueryState, + state: &QueryState, cache: &QueryCacheStore, key: C::Key, span: Span, @@ -735,7 +735,7 @@ fn force_query_impl( Err(lookup) => lookup, }; - let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start( + let job = match JobOwner::<'_, CTX::DepKind, C>::try_start( tcx, state, cache, span, &key, lookup, query, ) { TryGetJob::NotYetStarted(job) => job,