From b2dfd9680d9d59ce5dcc16118da1f3384ceb19ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sat, 22 Dec 2018 18:59:03 +0100 Subject: [PATCH] Fix race condition when emitting stored diagnostics --- src/librustc/dep_graph/graph.rs | 84 ++++++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 21 deletions(-) diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index a9e80ddf4ba..63857d6c918 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -1,4 +1,4 @@ -use errors::DiagnosticBuilder; +use errors::{Diagnostic, DiagnosticBuilder}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; @@ -9,6 +9,7 @@ use std::hash::Hash; use std::collections::hash_map::Entry; use ty::{self, TyCtxt}; use util::common::{ProfileQueriesMsg, profq_msg}; +use parking_lot::{Mutex, Condvar}; use ich::{StableHashingContext, StableHashingContextProvider, Fingerprint}; @@ -60,6 +61,12 @@ struct DepGraphData { colors: DepNodeColorMap, + /// A set of loaded diagnostics which has been emitted. + emitted_diagnostics: Mutex>, + + /// Used to wait for diagnostics to be emitted. + emitted_diagnostics_cond_var: Condvar, + /// When we load, there may be `.o` files, cached mir, or other such /// things available to us. If we find that they are not dirty, we /// load the path to the file storing those work-products here into @@ -83,6 +90,8 @@ impl DepGraph { previous_work_products: prev_work_products, dep_node_debug: Default::default(), current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)), + emitted_diagnostics: Default::default(), + emitted_diagnostics_cond_var: Condvar::new(), previous: prev_graph, colors: DepNodeColorMap::new(prev_graph_node_count), loaded_from_cache: Default::default(), @@ -718,28 +727,18 @@ impl DepGraph { }; // ... emitting any stored diagnostic ... - if did_allocation { - // Only the thread which did the allocation emits the error messages - // FIXME: Ensure that these are printed before returning for all threads. - // Currently threads where did_allocation = false can continue on - // and emit other diagnostics before these diagnostics are emitted. - // Such diagnostics should be emitted after these. - // See https://github.com/rust-lang/rust/issues/48685 - let diagnostics = tcx.queries.on_disk_cache - .load_diagnostics(tcx, prev_dep_node_index); + let diagnostics = tcx.queries.on_disk_cache + .load_diagnostics(tcx, prev_dep_node_index); - if diagnostics.len() > 0 { - let handle = tcx.sess.diagnostic(); - - // Promote the previous diagnostics to the current session. - tcx.queries.on_disk_cache - .store_diagnostics(dep_node_index, diagnostics.clone().into()); - - for diagnostic in diagnostics { - DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit(); - } - } + if unlikely!(diagnostics.len() > 0) { + self.emit_diagnostics( + tcx, + data, + dep_node_index, + did_allocation, + diagnostics + ); } // ... and finally storing a "Green" entry in the color map. @@ -755,6 +754,49 @@ impl DepGraph { Some(dep_node_index) } + /// Atomically emits some loaded diagnotics assuming that this only gets called with + /// did_allocation set to true on one thread + #[cold] + #[inline(never)] + fn emit_diagnostics<'tcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'tcx>, + data: &DepGraphData, + dep_node_index: DepNodeIndex, + did_allocation: bool, + diagnostics: Vec, + ) { + if did_allocation || !cfg!(parallel_queries) { + // Only the thread which did the allocation emits the error messages + let handle = tcx.sess.diagnostic(); + + // Promote the previous diagnostics to the current session. + tcx.queries.on_disk_cache + .store_diagnostics(dep_node_index, diagnostics.clone().into()); + + for diagnostic in diagnostics { + DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit(); + } + + #[cfg(parallel_queries)] + { + // Mark the diagnostics and emitted and wake up waiters + data.emitted_diagnostics.lock().insert(dep_node_index); + data.emitted_diagnostics_cond_var.notify_all(); + } + } else { + // The other threads will wait for the diagnostics to be emitted + + let mut emitted_diagnostics = data.emitted_diagnostics.lock(); + loop { + if emitted_diagnostics.contains(&dep_node_index) { + break; + } + data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics); + } + } + } + // Returns true if the given node has been marked as green during the // current compilation session. Used in various assertions pub fn is_green(&self, dep_node: &DepNode) -> bool {