rust/compiler/rustc_interface/src/queries.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

342 lines
12 KiB
Rust
Raw Normal View History

use crate::errors::{FailedWritingFile, RustcErrorFatal, RustcErrorUnexpectedAnnotation};
2019-03-26 18:07:13 +00:00
use crate::interface::{Compiler, Result};
use crate::{errors, passes, util};
2019-03-26 18:07:13 +00:00
2020-04-27 17:56:11 +00:00
use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend;
2022-04-02 15:26:39 +00:00
use rustc_codegen_ssa::CodegenResults;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, OnceLock, WorkerLocal};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{StableCrateId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_incremental::setup_dep_graph;
use rustc_metadata::creader::CStore;
2020-03-29 15:19:48 +00:00
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
2023-02-16 14:07:42 +00:00
use rustc_middle::ty::{GlobalCtxt, TyCtxt};
use rustc_serialize::opaque::FileEncodeResult;
use rustc_session::config::{self, CrateType, OutputFilenames, OutputType};
use rustc_session::cstore::Untracked;
use rustc_session::output::find_crate_name;
use rustc_session::Session;
use rustc_span::symbol::sym;
use std::any::Any;
use std::cell::{RefCell, RefMut};
use std::sync::Arc;
/// Represent the result of a query.
///
/// This result can be stolen once with the [`steal`] method and generated with the [`compute`] method.
///
/// [`steal`]: Steal::steal
/// [`compute`]: Self::compute
pub struct Query<T> {
/// `None` means no value has been computed yet.
result: RefCell<Option<Result<Steal<T>>>>,
}
impl<T> Query<T> {
fn compute<F: FnOnce() -> Result<T>>(&self, f: F) -> Result<QueryResult<'_, T>> {
RefMut::filter_map(
self.result.borrow_mut(),
|r: &mut Option<Result<Steal<T>>>| -> Option<&mut Steal<T>> {
r.get_or_insert_with(|| f().map(Steal::new)).as_mut().ok()
},
)
.map_err(|r| *r.as_ref().unwrap().as_ref().map(|_| ()).unwrap_err())
.map(QueryResult)
}
}
pub struct QueryResult<'a, T>(RefMut<'a, Steal<T>>);
impl<'a, T> std::ops::Deref for QueryResult<'a, T> {
type Target = RefMut<'a, Steal<T>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'a, T> std::ops::DerefMut for QueryResult<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<'a, 'tcx> QueryResult<'a, &'tcx GlobalCtxt<'tcx>> {
pub fn enter<T>(&mut self, f: impl FnOnce(TyCtxt<'tcx>) -> T) -> T {
(*self.0).get_mut().enter(f)
}
}
impl<T> Default for Query<T> {
fn default() -> Self {
Query { result: RefCell::new(None) }
}
}
2019-11-27 12:13:57 +00:00
pub struct Queries<'tcx> {
compiler: &'tcx Compiler,
2023-08-31 23:14:33 +00:00
gcx_cell: OnceLock<GlobalCtxt<'tcx>>,
2019-11-27 12:24:19 +00:00
arena: WorkerLocal<Arena<'tcx>>,
2021-07-13 16:45:20 +00:00
hir_arena: WorkerLocal<rustc_hir::Arena<'tcx>>,
2019-11-26 22:16:48 +00:00
parse: Query<ast::Crate>,
// This just points to what's in `gcx_cell`.
gcx: Query<&'tcx GlobalCtxt<'tcx>>,
}
2019-11-27 12:13:57 +00:00
impl<'tcx> Queries<'tcx> {
pub fn new(compiler: &'tcx Compiler) -> Queries<'tcx> {
Queries {
compiler,
2023-08-31 23:14:33 +00:00
gcx_cell: OnceLock::new(),
2019-11-27 12:24:19 +00:00
arena: WorkerLocal::new(|_| Arena::default()),
2021-07-13 16:45:20 +00:00
hir_arena: WorkerLocal::new(|_| rustc_hir::Arena::default()),
parse: Default::default(),
gcx: Default::default(),
}
}
pub fn finish(&self) -> FileEncodeResult {
if let Some(gcx) = self.gcx_cell.get() { gcx.finish() } else { Ok(0) }
}
pub fn parse(&self) -> Result<QueryResult<'_, ast::Crate>> {
self.parse.compute(|| {
Make `DiagnosticBuilder::emit` consuming. This works for most of its call sites. This is nice, because `emit` very much makes sense as a consuming operation -- indeed, `DiagnosticBuilderState` exists to ensure no diagnostic is emitted twice, but it uses runtime checks. For the small number of call sites where a consuming emit doesn't work, the commit adds `DiagnosticBuilder::emit_without_consuming`. (This will be removed in subsequent commits.) Likewise, `emit_unless` becomes consuming. And `delay_as_bug` becomes consuming, while `delay_as_bug_without_consuming` is added (which will also be removed in subsequent commits.) All this requires significant changes to `DiagnosticBuilder`'s chaining methods. Currently `DiagnosticBuilder` method chaining uses a non-consuming `&mut self -> &mut Self` style, which allows chaining to be used when the chain ends in `emit()`, like so: ``` struct_err(msg).span(span).emit(); ``` But it doesn't work when producing a `DiagnosticBuilder` value, requiring this: ``` let mut err = self.struct_err(msg); err.span(span); err ``` This style of chaining won't work with consuming `emit` though. For that, we need to use to a `self -> Self` style. That also would allow `DiagnosticBuilder` production to be chained, e.g.: ``` self.struct_err(msg).span(span) ``` However, removing the `&mut self -> &mut Self` style would require that individual modifications of a `DiagnosticBuilder` go from this: ``` err.span(span); ``` to this: ``` err = err.span(span); ``` There are *many* such places. I have a high tolerance for tedious refactorings, but even I gave up after a long time trying to convert them all. Instead, this commit has it both ways: the existing `&mut self -> Self` chaining methods are kept, and new `self -> Self` chaining methods are added, all of which have a `_mv` suffix (short for "move"). Changes to the existing `forward!` macro lets this happen with very little additional boilerplate code. I chose to add the suffix to the new chaining methods rather than the existing ones, because the number of changes required is much smaller that way. This doubled chainging is a bit clumsy, but I think it is worthwhile because it allows a *lot* of good things to subsequently happen. In this commit, there are many `mut` qualifiers removed in places where diagnostics are emitted without being modified. In subsequent commits: - chaining can be used more, making the code more concise; - more use of chaining also permits the removal of redundant diagnostic APIs like `struct_err_with_code`, which can be replaced easily with `struct_err` + `code_mv`; - `emit_without_diagnostic` can be removed, which simplifies a lot of machinery, removing the need for `DiagnosticBuilderState`.
2024-01-03 01:17:35 +00:00
passes::parse(&self.compiler.sess).map_err(|parse_error| parse_error.emit())
})
}
2023-11-04 13:20:09 +00:00
pub fn global_ctxt(&'tcx self) -> Result<QueryResult<'_, &'tcx GlobalCtxt<'tcx>>> {
self.gcx.compute(|| {
let sess = &self.compiler.sess;
let mut krate = self.parse()?.steal();
rustc_builtin_macros::cmdline_attrs::inject(
&mut krate,
&sess.parse_sess,
&sess.opts.unstable_opts.crate_attr,
);
let pre_configured_attrs =
rustc_expand::config::pre_configure_attrs(sess, &krate.attrs);
// parse `#[crate_name]` even if `--crate-name` was passed, to make sure it matches.
2023-08-14 16:16:51 +00:00
let crate_name = find_crate_name(sess, &pre_configured_attrs);
let crate_types = util::collect_crate_types(sess, &pre_configured_attrs);
let stable_crate_id = StableCrateId::new(
crate_name,
crate_types.contains(&CrateType::Executable),
sess.opts.cg.metadata.clone(),
sess.cfg_version,
);
let outputs = util::build_output_filenames(&pre_configured_attrs, sess);
let dep_graph = setup_dep_graph(sess, crate_name, stable_crate_id)?;
2023-09-09 14:02:11 +00:00
let cstore = FreezeLock::new(Box::new(CStore::new(
self.compiler.codegen_backend.metadata_loader(),
stable_crate_id,
)) as _);
2023-09-01 23:28:04 +00:00
let definitions = FreezeLock::new(Definitions::new(stable_crate_id));
2023-03-14 11:51:00 +00:00
let source_span = AppendOnlyIndexVec::new();
let _id = source_span.push(krate.spans.inner_span);
debug_assert_eq!(_id, CRATE_DEF_ID);
let untracked = Untracked { cstore, source_span, definitions };
let qcx = passes::create_global_ctxt(
self.compiler,
crate_types,
stable_crate_id,
dep_graph,
untracked,
&self.gcx_cell,
2019-11-27 12:24:19 +00:00
&self.arena,
2021-05-23 19:42:16 +00:00
&self.hir_arena,
);
qcx.enter(|tcx| {
let feed = tcx.feed_local_crate();
feed.crate_name(crate_name);
let feed = tcx.feed_unit_query();
feed.features_query(tcx.arena.alloc(rustc_expand::config::features(
sess,
&pre_configured_attrs,
crate_name,
)));
feed.crate_for_resolver(tcx.arena.alloc(Steal::new((krate, pre_configured_attrs))));
feed.output_filenames(Arc::new(outputs));
2024-02-14 15:44:01 +00:00
let feed = tcx.feed_local_crate_def_id();
feed.def_kind(DefKind::Mod);
});
Ok(qcx)
})
}
pub fn write_dep_info(&'tcx self) -> Result<()> {
self.global_ctxt()?.enter(|tcx| {
passes::write_dep_info(tcx);
});
Ok(())
}
/// Check for the `#[rustc_error]` annotation, which forces an error in codegen. This is used
2020-12-28 17:15:16 +00:00
/// to write UI tests that actually test that compilation succeeds without reporting
/// an error.
fn check_for_rustc_errors_attr(tcx: TyCtxt<'_>) {
2022-02-18 23:48:49 +00:00
let Some((def_id, _)) = tcx.entry_fn(()) else { return };
2022-05-02 07:31:56 +00:00
for attr in tcx.get_attrs(def_id, sym::rustc_error) {
match attr.meta_item_list() {
// Check if there is a `#[rustc_error(delayed_bug_from_inside_query)]`.
Some(list)
if list.iter().any(|list_item| {
matches!(
list_item.ident().map(|i| i.name),
Some(sym::delayed_bug_from_inside_query)
)
}) =>
{
tcx.ensure().trigger_delayed_bug(def_id);
}
// Bare `#[rustc_error]`.
None => {
tcx.dcx().emit_fatal(RustcErrorFatal { span: tcx.def_span(def_id) });
}
// Some other attribute.
Some(_) => {
tcx.dcx()
.emit_warn(RustcErrorUnexpectedAnnotation { span: tcx.def_span(def_id) });
}
}
}
}
pub fn codegen_and_build_linker(&'tcx self) -> Result<Linker> {
2023-11-16 21:05:56 +00:00
self.global_ctxt()?.enter(|tcx| {
Overhaul the handling of errors at the top-level. Currently `emit_stashed_diagnostic` is called from four(!) different places: `print_error_count`, `DiagCtxtInner::drop`, `abort_if_errors`, and `compile_status`. And `flush_delayed` is called from two different places: `DiagCtxtInner::drop` and `Queries`. This is pretty gross! Each one should really be called from a single place, but there's a bunch of entanglements. This commit cleans up this mess. Specifically, it: - Removes all the existing calls to `emit_stashed_diagnostic`, and adds a single new call in `finish_diagnostics`. - Removes the early `flush_delayed` call in `codegen_and_build_linker`, replacing it with a simple early return if delayed bugs are present. - Changes `DiagCtxtInner::drop` and `DiagCtxtInner::flush_delayed` so they both assert that the stashed diagnostics are empty (i.e. processed beforehand). - Changes `interface::run_compiler` so that any errors emitted during `finish_diagnostics` (i.e. late-emitted stashed diagnostics) are counted and cannot be overlooked. This requires adding `ErrorGuaranteed` return values to several functions. - Removes the `stashed_err_count` call in `analysis`. This is possible now that we don't have to worry about calling `flush_delayed` early from `codegen_and_build_linker` when stashed diagnostics are pending. - Changes the `span_bug` case in `handle_tuple_field_pattern_match` to a `delayed_span_bug`, because it now can be reached due to the removal of the `stashed_err_count` call in `analysis`. - Slightly changes the expected output of three tests. If no errors are emitted but there are delayed bugs, the error count is no longer printed. This is because delayed bugs are now always printed after the error count is printed (or not printed, if the error count is zero). There is a lot going on in this commit. It's hard to break into smaller pieces because the existing code is very tangled. It took me a long time and a lot of effort to understand how the different pieces interact, and I think the new code is a lot simpler and easier to understand.
2024-02-18 23:00:19 +00:00
// Don't do code generation if there were any errors. Likewise if
// there were any delayed bugs, because codegen will likely cause
// more ICEs, obscuring the original problem.
if let Some(guar) = self.compiler.sess.dcx().has_errors_or_delayed_bugs() {
return Err(guar);
}
// Hook for UI tests.
Self::check_for_rustc_errors_attr(tcx);
let ongoing_codegen = passes::start_codegen(&*self.compiler.codegen_backend, tcx);
2023-11-16 21:05:56 +00:00
Ok(Linker {
dep_graph: tcx.dep_graph.clone(),
output_filenames: tcx.output_filenames(()).clone(),
2023-11-16 21:05:56 +00:00
crate_hash: if tcx.needs_crate_hash() {
Some(tcx.crate_hash(LOCAL_CRATE))
} else {
None
},
ongoing_codegen,
})
})
}
}
pub struct Linker {
dep_graph: DepGraph,
output_filenames: Arc<OutputFilenames>,
// Only present when incr. comp. is enabled.
crate_hash: Option<Svh>,
ongoing_codegen: Box<dyn Any>,
}
impl Linker {
pub fn link(self, sess: &Session, codegen_backend: &dyn CodegenBackend) -> Result<()> {
let (codegen_results, work_products) =
codegen_backend.join_codegen(self.ongoing_codegen, sess, &self.output_filenames);
if let Some(guar) = sess.dcx().has_errors() {
return Err(guar);
}
sess.time("serialize_work_products", || {
2023-11-16 22:19:11 +00:00
rustc_incremental::save_work_product_index(sess, &self.dep_graph, work_products)
});
let prof = sess.prof.clone();
2023-11-16 22:19:11 +00:00
prof.generic_activity("drop_dep_graph").run(move || drop(self.dep_graph));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, self.crate_hash);
if !sess
.opts
.output_types
.keys()
.any(|&i| i == OutputType::Exe || i == OutputType::Metadata)
{
return Ok(());
}
if sess.opts.unstable_opts.no_link {
let rlink_file = self.output_filenames.with_extension(config::RLINK_EXT);
CodegenResults::serialize_rlink(
sess,
&rlink_file,
&codegen_results,
&*self.output_filenames,
)
.map_err(|error| {
sess.dcx().emit_fatal(FailedWritingFile { path: &rlink_file, error })
})?;
return Ok(());
}
let _timer = sess.prof.verbose_generic_activity("link_crate");
codegen_backend.link(sess, codegen_results, &self.output_filenames)
}
}
impl Compiler {
2019-11-25 17:36:18 +00:00
pub fn enter<F, T>(&self, f: F) -> T
where
F: for<'tcx> FnOnce(&'tcx Queries<'tcx>) -> T,
{
2023-11-20 02:12:07 +00:00
// Must declare `_timer` first so that it is dropped after `queries`.
2020-01-09 02:48:00 +00:00
let mut _timer = None;
2021-09-30 17:38:50 +00:00
let queries = Queries::new(self);
let ret = f(&queries);
// NOTE: intentionally does not compute the global context if it hasn't been built yet,
// since that likely means there was a parse error.
if let Some(Ok(gcx)) = &mut *queries.gcx.result.borrow_mut() {
let gcx = gcx.get_mut();
// We assume that no queries are run past here. If there are new queries
// after this point, they'll show up as "<unknown>" in self-profiling data.
{
let _prof_timer =
queries.compiler.sess.prof.generic_activity("self_profile_alloc_query_strings");
gcx.enter(rustc_query_impl::alloc_self_profile_query_strings);
}
self.sess.time("serialize_dep_graph", || gcx.enter(rustc_incremental::save_dep_graph));
}
2023-11-20 02:12:07 +00:00
// The timer's lifetime spans the dropping of `queries`, which contains
// the global context.
_timer = Some(self.sess.timer("free_global_ctxt"));
if let Err((path, error)) = queries.finish() {
self.sess.dcx().emit_fatal(errors::FailedWritingFile { path: &path, error });
}
2020-01-09 02:48:00 +00:00
ret
}
}