Move diagnostics out from QueryJob and optimize for the case with no diagnostics

This commit is contained in:
John Kåre Alsaker 2018-12-07 03:04:23 +01:00
parent b8c8f0bdf6
commit f21c08439a
5 changed files with 68 additions and 47 deletions

View File

@ -696,7 +696,7 @@ impl DepGraph {
// Promote the previous diagnostics to the current session. // Promote the previous diagnostics to the current session.
tcx.queries.on_disk_cache tcx.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics.clone()); .store_diagnostics(dep_node_index, diagnostics.clone().into());
for diagnostic in diagnostics { for diagnostic in diagnostics {
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit(); DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();

View File

@ -1673,6 +1673,7 @@ impl<'gcx> GlobalCtxt<'gcx> {
let new_icx = ty::tls::ImplicitCtxt { let new_icx = ty::tls::ImplicitCtxt {
tcx, tcx,
query: icx.query.clone(), query: icx.query.clone(),
diagnostics: icx.diagnostics,
layout_depth: icx.layout_depth, layout_depth: icx.layout_depth,
task_deps: icx.task_deps, task_deps: icx.task_deps,
}; };
@ -1782,6 +1783,7 @@ pub mod tls {
use errors::{Diagnostic, TRACK_DIAGNOSTICS}; use errors::{Diagnostic, TRACK_DIAGNOSTICS};
use rustc_data_structures::OnDrop; use rustc_data_structures::OnDrop;
use rustc_data_structures::sync::{self, Lrc, Lock}; use rustc_data_structures::sync::{self, Lrc, Lock};
use rustc_data_structures::thin_vec::ThinVec;
use dep_graph::TaskDeps; use dep_graph::TaskDeps;
#[cfg(not(parallel_queries))] #[cfg(not(parallel_queries))]
@ -1801,10 +1803,14 @@ pub mod tls {
/// by `enter_local` with a new local interner /// by `enter_local` with a new local interner
pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>, pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>,
/// The current query job, if any. This is updated by start_job in /// The current query job, if any. This is updated by JobOwner::start in
/// ty::query::plumbing when executing a query /// ty::query::plumbing when executing a query
pub query: Option<Lrc<query::QueryJob<'gcx>>>, pub query: Option<Lrc<query::QueryJob<'gcx>>>,
/// Where to store diagnostics for the current query job, if any.
/// This is updated by JobOwner::start in ty::query::plumbing when executing a query
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
/// Used to prevent layout from recursing too deeply. /// Used to prevent layout from recursing too deeply.
pub layout_depth: usize, pub layout_depth: usize,
@ -1870,8 +1876,9 @@ pub mod tls {
fn track_diagnostic(diagnostic: &Diagnostic) { fn track_diagnostic(diagnostic: &Diagnostic) {
with_context_opt(|icx| { with_context_opt(|icx| {
if let Some(icx) = icx { if let Some(icx) = icx {
if let Some(ref query) = icx.query { if let Some(ref diagnostics) = icx.diagnostics {
query.diagnostics.lock().push(diagnostic.clone()); let mut diagnostics = diagnostics.lock();
diagnostics.extend(Some(diagnostic.clone()));
} }
} }
}) })
@ -1938,6 +1945,7 @@ pub mod tls {
let icx = ImplicitCtxt { let icx = ImplicitCtxt {
tcx, tcx,
query: None, query: None,
diagnostics: None,
layout_depth: 0, layout_depth: 0,
task_deps: None, task_deps: None,
}; };
@ -1967,6 +1975,7 @@ pub mod tls {
}; };
let icx = ImplicitCtxt { let icx = ImplicitCtxt {
query: None, query: None,
diagnostics: None,
tcx, tcx,
layout_depth: 0, layout_depth: 0,
task_deps: None, task_deps: None,

View File

@ -14,7 +14,6 @@ use ty::query::{
config::QueryDescription, config::QueryDescription,
}; };
use ty::context::TyCtxt; use ty::context::TyCtxt;
use errors::Diagnostic;
use std::process; use std::process;
use std::{fmt, ptr}; use std::{fmt, ptr};
@ -54,9 +53,6 @@ pub struct QueryJob<'tcx> {
/// The parent query job which created this job and is implicitly waiting on it. /// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<Lrc<QueryJob<'tcx>>>, pub parent: Option<Lrc<QueryJob<'tcx>>>,
/// Diagnostic messages which are emitted while the query executes
pub diagnostics: Lock<Vec<Diagnostic>>,
/// The latch which is used to wait on this job /// The latch which is used to wait on this job
#[cfg(parallel_queries)] #[cfg(parallel_queries)]
latch: QueryLatch<'tcx>, latch: QueryLatch<'tcx>,
@ -66,7 +62,6 @@ impl<'tcx> QueryJob<'tcx> {
/// Creates a new query job /// Creates a new query job
pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self { pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
QueryJob { QueryJob {
diagnostics: Lock::new(Vec::new()),
info, info,
parent, parent,
#[cfg(parallel_queries)] #[cfg(parallel_queries)]

View File

@ -7,6 +7,7 @@ use ich::{CachingSourceMapView, Fingerprint};
use mir::{self, interpret}; use mir::{self, interpret};
use mir::interpret::{AllocDecodingSession, AllocDecodingState}; use mir::interpret::{AllocDecodingSession, AllocDecodingState};
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once}; use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once};
use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque, use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque,
@ -341,11 +342,13 @@ impl<'sess> OnDiskCache<'sess> {
/// Store a diagnostic emitted during the current compilation session. /// Store a diagnostic emitted during the current compilation session.
/// Anything stored like this will be available via `load_diagnostics` in /// Anything stored like this will be available via `load_diagnostics` in
/// the next compilation session. /// the next compilation session.
#[inline(never)]
#[cold]
pub fn store_diagnostics(&self, pub fn store_diagnostics(&self,
dep_node_index: DepNodeIndex, dep_node_index: DepNodeIndex,
diagnostics: Vec<Diagnostic>) { diagnostics: ThinVec<Diagnostic>) {
let mut current_diagnostics = self.current_diagnostics.borrow_mut(); let mut current_diagnostics = self.current_diagnostics.borrow_mut();
let prev = current_diagnostics.insert(dep_node_index, diagnostics); let prev = current_diagnostics.insert(dep_node_index, diagnostics.into());
debug_assert!(prev.is_none()); debug_assert!(prev.is_none());
} }
@ -367,16 +370,16 @@ impl<'sess> OnDiskCache<'sess> {
/// Since many anonymous queries can share the same `DepNode`, we aggregate /// Since many anonymous queries can share the same `DepNode`, we aggregate
/// them -- as opposed to regular queries where we assume that there is a /// them -- as opposed to regular queries where we assume that there is a
/// 1:1 relationship between query-key and `DepNode`. /// 1:1 relationship between query-key and `DepNode`.
#[inline(never)]
#[cold]
pub fn store_diagnostics_for_anon_node(&self, pub fn store_diagnostics_for_anon_node(&self,
dep_node_index: DepNodeIndex, dep_node_index: DepNodeIndex,
mut diagnostics: Vec<Diagnostic>) { diagnostics: ThinVec<Diagnostic>) {
let mut current_diagnostics = self.current_diagnostics.borrow_mut(); let mut current_diagnostics = self.current_diagnostics.borrow_mut();
let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| { let x = current_diagnostics.entry(dep_node_index).or_insert(Vec::new());
mem::replace(&mut diagnostics, Vec::new())
});
x.extend(diagnostics.into_iter()); x.extend(Into::<Vec<_>>::into(diagnostics));
} }
fn load_indexed<'tcx, T>(&self, fn load_indexed<'tcx, T>(&self,

View File

@ -18,6 +18,7 @@ use util::common::{profq_msg, ProfileQueriesMsg, QueryMsg};
use rustc_data_structures::fx::{FxHashMap}; use rustc_data_structures::fx::{FxHashMap};
use rustc_data_structures::sync::{Lrc, Lock}; use rustc_data_structures::sync::{Lrc, Lock};
use rustc_data_structures::thin_vec::ThinVec;
use std::mem; use std::mem;
use std::ptr; use std::ptr;
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
@ -195,19 +196,21 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
pub(super) fn start<'lcx, F, R>( pub(super) fn start<'lcx, F, R>(
&self, &self,
tcx: TyCtxt<'_, 'tcx, 'lcx>, tcx: TyCtxt<'_, 'tcx, 'lcx>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: F) compute: F)
-> (R, Vec<Diagnostic>) -> R
where where
F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R
{ {
// The TyCtxt stored in TLS has the same global interner lifetime // The TyCtxt stored in TLS has the same global interner lifetime
// as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes // as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes
// when accessing the ImplicitCtxt // when accessing the ImplicitCtxt
let r = tls::with_related_context(tcx, move |current_icx| { tls::with_related_context(tcx, move |current_icx| {
// Update the ImplicitCtxt to point to our new query job // Update the ImplicitCtxt to point to our new query job
let new_icx = tls::ImplicitCtxt { let new_icx = tls::ImplicitCtxt {
tcx: tcx.global_tcx(), tcx: tcx.global_tcx(),
query: Some(self.job.clone()), query: Some(self.job.clone()),
diagnostics,
layout_depth: current_icx.layout_depth, layout_depth: current_icx.layout_depth,
task_deps: current_icx.task_deps, task_deps: current_icx.task_deps,
}; };
@ -216,13 +219,19 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
tls::enter_context(&new_icx, |_| { tls::enter_context(&new_icx, |_| {
compute(tcx) compute(tcx)
}) })
}); })
// Extract the diagnostic from the job
let diagnostics = mem::replace(&mut *self.job.diagnostics.lock(), Vec::new());
(r, diagnostics)
} }
}
#[inline(always)]
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
where
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R
{
let diagnostics = Lock::new(ThinVec::new());
let result = f(Some(&diagnostics));
(result, diagnostics.into_inner())
} }
impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> { impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
@ -402,20 +411,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
profq_msg!(self, ProfileQueriesMsg::ProviderBegin); profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_activity(Q::CATEGORY)); self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
let res = job.start(self, |tcx| { let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
tcx.dep_graph.with_anon_task(dep_node.kind, || { job.start(self, diagnostics, |tcx| {
Q::compute(tcx.global_tcx(), key) tcx.dep_graph.with_anon_task(dep_node.kind, || {
Q::compute(tcx.global_tcx(), key)
})
}) })
}); });
self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd); profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
let ((result, dep_node_index), diagnostics) = res;
self.dep_graph.read_index(dep_node_index); self.dep_graph.read_index(dep_node_index);
self.queries.on_disk_cache if unlikely!(!diagnostics.is_empty()) {
.store_diagnostics_for_anon_node(dep_node_index, diagnostics); self.queries.on_disk_cache
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}
job.complete(&result, dep_node_index); job.complete(&result, dep_node_index);
@ -487,7 +499,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// The diagnostics for this query have already been // The diagnostics for this query have already been
// promoted to the current session during // promoted to the current session during
// try_mark_green(), so we can ignore them here. // try_mark_green(), so we can ignore them here.
let (result, _) = job.start(self, |tcx| { let result = job.start(self, None, |tcx| {
// The dep-graph for this computation is already in // The dep-graph for this computation is already in
// place // place
tcx.dep_graph.with_ignore(|| { tcx.dep_graph.with_ignore(|| {
@ -566,32 +578,34 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
profq_msg!(self, ProfileQueriesMsg::ProviderBegin); profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
self.sess.profiler(|p| p.start_activity(Q::CATEGORY)); self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
let res = job.start(self, |tcx| { let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
if dep_node.kind.is_eval_always() { job.start(self, diagnostics, |tcx| {
tcx.dep_graph.with_eval_always_task(dep_node, if dep_node.kind.is_eval_always() {
tcx, tcx.dep_graph.with_eval_always_task(dep_node,
key, tcx,
Q::compute) key,
} else { Q::compute)
tcx.dep_graph.with_task(dep_node, } else {
tcx, tcx.dep_graph.with_task(dep_node,
key, tcx,
Q::compute) key,
} Q::compute)
}
})
}); });
self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd); profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
let ((result, dep_node_index), diagnostics) = res;
if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) { if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
self.dep_graph.mark_loaded_from_cache(dep_node_index, false); self.dep_graph.mark_loaded_from_cache(dep_node_index, false);
} }
if dep_node.kind != ::dep_graph::DepKind::Null { if dep_node.kind != ::dep_graph::DepKind::Null {
self.queries.on_disk_cache if unlikely!(!diagnostics.is_empty()) {
.store_diagnostics(dep_node_index, diagnostics); self.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics);
}
} }
job.complete(&result, dep_node_index); job.complete(&result, dep_node_index);